From 2d83847eb9e4ea6baea22ec7d7326cf885fb64ce Mon Sep 17 00:00:00 2001 From: CharlieC3 <2747302+CharlieC3@users.noreply.github.com> Date: Fri, 19 Jul 2024 12:07:42 -0400 Subject: [PATCH 001/910] chore: update bitcoin peer and ports in example config files --- docs/profiling.md | 2 +- testnet/stacks-node/conf/mainnet-follower-conf.toml | 6 +++--- testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 6 +++--- testnet/stacks-node/conf/regtest-follower-conf.toml | 6 +++--- testnet/stacks-node/conf/testnet-follower-conf.toml | 10 +++++----- testnet/stacks-node/conf/testnet-miner-conf.toml | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/profiling.md b/docs/profiling.md index 832b3d44572..3e43cf9b633 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -28,7 +28,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoin.hiro.so"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 2ecbc806862..02379c65d96 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -7,9 +7,9 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoind.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 8332 peer_port = 8333 diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index aed3e9874c2..e3c93bfd2bf 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -9,9 +9,9 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoind.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 8332 peer_port = 8333 burn_fee_cap = 1 diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml index a2a71c8acb9..5677551264a 100644 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ b/testnet/stacks-node/conf/regtest-follower-conf.toml @@ -8,9 +8,9 @@ wait_time_for_microblocks = 10000 [burnchain] chain = "bitcoin" mode = "krypton" -peer_host = "bitcoind.regtest.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.regtest.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 18443 peer_port = 18444 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index cb23477b27d..46c70a01985 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -8,11 +8,11 @@ wait_time_for_microblocks = 10000 [burnchain] chain = "bitcoin" mode = "xenon" -peer_host = "bitcoind.testnet.stacks.co" -username = "blockstack" -password = "blockstacksystem" -rpc_port = 18332 -peer_port = 18333 +peer_host = "bitcoin.regtest.hiro.so" +username = "hirosystems" +password = "hirosystems" +rpc_port = 18443 +peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index ca52b33a23e..7e1ce1bf5ed 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -14,8 +14,8 @@ mode = "xenon" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = 18332 -peer_port = 18333 +rpc_port = 18443 +peer_port = 18444 [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" From 09ddd9ef73f685a21c675fd3429b9e56e2e04ffb Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Fri, 19 Jul 2024 19:16:34 +0300 Subject: [PATCH 002/910] move changelog and release-process docs from `libsigner` to `stacks-signer` --- {libsigner => stacks-signer}/CHANGELOG.md | 0 {libsigner => stacks-signer}/release-process.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {libsigner => stacks-signer}/CHANGELOG.md (100%) rename {libsigner => stacks-signer}/release-process.md (100%) diff --git a/libsigner/CHANGELOG.md b/stacks-signer/CHANGELOG.md similarity index 100% rename from libsigner/CHANGELOG.md rename to stacks-signer/CHANGELOG.md diff --git a/libsigner/release-process.md b/stacks-signer/release-process.md similarity index 100% rename from libsigner/release-process.md rename to stacks-signer/release-process.md From 592a31b4df5d73d6a5647bfc481bd47b45a3e0bb Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 19 Jul 2024 13:52:31 -0700 Subject: [PATCH 003/910] Updating docs removing several old docs files that are no longer relevant correcting other docs that were out of date --- docs/SIPS.md | 2 +- docs/ci-release.md | 355 ------------------ docs/ci-workflow.md | 281 ++++++++++++++ docs/community.md | 23 -- docs/init.md | 5 +- docs/mining.md | 16 +- docs/release-process.md | 146 ++++--- .../stacks-node/conf/local-follower-conf.toml | 47 --- .../stacks-node/conf/local-leader-conf.toml | 44 --- .../conf/mainnet-follower-conf.toml | 4 +- .../stacks-node/conf/mainnet-miner-conf.toml | 19 +- .../conf/mainnet-mockminer-conf.toml | 3 +- .../conf/mocknet-follower-conf.toml | 33 -- .../stacks-node/conf/mocknet-miner-conf.toml | 32 -- testnet/stacks-node/conf/prometheus.yml | 13 - .../conf/regtest-follower-conf.toml | 37 -- .../conf/testnet-follower-conf.toml | 50 ++- .../stacks-node/conf/testnet-miner-conf.toml | 34 -- 18 files changed, 418 insertions(+), 726 deletions(-) delete mode 100644 docs/ci-release.md create mode 100644 docs/ci-workflow.md delete mode 100644 docs/community.md delete mode 100644 testnet/stacks-node/conf/local-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/local-leader-conf.toml delete mode 100644 testnet/stacks-node/conf/mocknet-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/mocknet-miner-conf.toml delete mode 100644 testnet/stacks-node/conf/prometheus.yml delete mode 100644 testnet/stacks-node/conf/regtest-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/testnet-miner-conf.toml diff --git a/docs/SIPS.md b/docs/SIPS.md index abce8c220cc..0930f5d51e7 100644 --- a/docs/SIPS.md +++ b/docs/SIPS.md @@ -4,4 +4,4 @@ Stacks improvement proposals (SIPs) are aimed at describing the implementation o See [SIP 000](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md) for more details. -The SIPs now located in the [stacksgov/sips](https://github.com/stacksgov/sips) repository as part of the [Stacks Community Governance organization](https://github.com/stacksgov). +The SIPs are located in the [stacksgov/sips](https://github.com/stacksgov/sips) repository as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/docs/ci-release.md b/docs/ci-release.md deleted file mode 100644 index f7881ba675e..00000000000 --- a/docs/ci-release.md +++ /dev/null @@ -1,355 +0,0 @@ -# Releases - -All releases are built via a Github Actions workflow named `CI` ([ci.yml](../.github/workflows/ci.yml)), and is responsible for: - -- Verifying code is formatted correctly -- Building binary archives and checksums -- Docker images -- Triggering tests conditionally (different tests run for a release vs a PR) - -1. Releases are only created if a tag is **manually** provided when the [CI workflow](../.github/workflows/ci.yml) is triggered. -2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. tests can be retried quickly since the cache will persist until the cleanup job is run. -3. [nextest](https://nexte.st/) is used to run the tests from an archived file that is cached (using commit sha as a key)) - - Two [archives](https://nexte.st/book/reusing-builds.html) are created, one for genesis tests and one for generic tests (it is done this way to reduce the time spent building) - - Unit-tests are [partitioned](https://nexte.st/book/partitioning.html) and multi-threaded to speed up execution time - -## TL;DR - -- Pushing a feature branch will not trigger a workflow -- An open/re-opened/synchronized PR will produce a single image built from source on Debian with glibc with 2 tags: - - `stacks-core:` - - `stacks-core:` -- A merged PR into `default-branch` from `develop` will produce a single image built from source on Debian with glibc: - - `stacks-core:` -- An untagged build of any branch will produce a single image built from source on Debian with glibc: - - `stacks-core:` -- A tagged release on a non-default branch will produce: - - Docker Alpine image for several architectures tagged with: - - `stacks-core:` - - Docker Debian image for several architectures tagged with: - - `stacks-core:` -- A tagged release on the default branch will produce: - - Github Release of the specified tag with: - - Binary archives for several architectures - - Docker Alpine image for several architectures tagged with: - - `stacks-core:` - - `stacks-core:` - - Docker Debian image for several architectures tagged with: - - `stacks-core:` - - `stacks-core:` - -## Release workflow - -1. Create a feature branch: `feat/fix-something` -2. PR `feat/fix-something` to the `develop` branch where the PR is numbered `112` - 1. Docker image tagged with the **branch name** and **PR number** - - ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - 2. CI tests are run -3. PR `develop` to the default branch where the PR is numbered `112` - 1. Docker image tagged with the **branch name** and **PR number** - - ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - 2. CI tests are run -4. Merge `develop` branch to the default branch - 1. Docker image is tagged with the **default branch** `master` - - ex: - - `stacks-core:master` - 2. CI tests are run -5. CI workflow is manually triggered on **non-default branch** with a version, i.e. `2.1.0.0.0-rc0` - 1. No Docker images/binaries are created - 2. All release tests are run -6. CI workflow is manually triggered on **default branch** with a version, i.e. `2.1.0.0.0` - 1. Github release for the manually input version is created with binaries - 2. All release tests are run - 3. Docker image pushed with tags of the **input version** and **latest** - - ex: - - `stacks-core:2.1.0.0.0-debian` - - `stacks-core:latest-debian` - - `stacks-core:2.1.0.0.0` - - `stacks-core:latest` - -## Tests - -Tests are separated into several different workflows, with the intention that they can be _conditionally_ run depending upon the triggering operation. For example, on a PR synchronize we don't want to run some identified "slow" tests, but we do want to run the [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) and [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml). - -There are also 2 different methods in use with regard to running tests: - -1. [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs) -2. [nextest partitioning](https://nexte.st/book/partitioning.html) - -A matrix is used when there are several known tests that need to be run. Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). - -There is also a workflow designed to run tests that are manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). -This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. - -Files: - -- [Standalone Tests](../.github/workflows/standalone-tests.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) - -### Adding/changing tests - -With the exception of `unit-tests` in [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml), adding/removing a test requires a change to the workflow matrix. Example from [Atlas Tests](../.github/workflows/atlas-tests.yml): - -```yaml -atlas-tests: - name: Atlas Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - tests::neon_integrations::atlas_integration_test - - tests::neon_integrations::atlas_stress_integration_test -``` - -Example of adding a new test `tests::neon_integrations::atlas_new_test`: - -```yaml - ... - matrix: - test-name: - - tests::neon_integrations::atlas_integration_test - - tests::neon_integrations::atlas_stress_integration_test - - tests::neon_integrations::atlas_new_test -``` - -The separation of tests (outside of [Slow Tests](../.github/workflows/slow-tests.yml)) is performed by creating a separate workflow for each _type_ of test that is being run. Using the example above, to add/remove any tests from being run - the `matrix` will need to be adjusted. - -ex: - -- `Atlas Tests`: Tests related to Atlas -- `Bitcoin Tests`: Tests relating to burnchain operations -- `Epoch Tests`: Tests related to epoch changes -- `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or times out semi-regularly), it should be added here. -- `Stacks Blockchain Tests`: - - `full-genesis`: Tests related to full genesis - -### Checking the result of multiple tests at once - -You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in 1 job. -If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. - -If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. - -In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. -If any of the 11 jobs are failing, the `check-tests` job will also fail. - -```yaml -check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - full-genesis - - unit-tests - - open-api-validation - - core-contracts-clarinet-test - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" -``` - -## Triggering a workflow - -### PR a branch to develop - -ex: Branch is named `feat/fix-something` and the PR is numbered `112` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags -- ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - ---- - -### Merging a branch to develop - -Nothing is triggered automatically - ---- - -### PR develop to master branches - -ex: Branch is named `develop` and the PR is numbered `113` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags -- ex: - - `stacks-core:develop` - - `stacks-core:pr-113` - ---- - -### Merging a PR from develop to master - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag -- ex: - - `stacks-core:master` - ---- - -### Manually triggering workflow without tag (any branch) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag -- ex: - - `stacks-core:` - ---- - -### Manually triggering workflow with tag on a non-default branch (i.e. tag of `2.1.0.0.0-rc0`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) - ---- - -### Manually triggering workflow with tag on default branch (i.e. tag of `2.1.0.0.0`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) -- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) - - Archive and checksum files added to github release -- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag -- [Docker image](../.github/workflows/image-build-binary.yml) built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` -- ex: - - `stacks-core:2.1.0.0.0-debian` - - `stacks-core:latest-debian` - - `stacks-core:2.1.0.0.0` - - `stacks-core:latest` - -## Mutation Testing - -When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. -It checks the new and altered functions through mutation testing. -Mutation testing involves making small changes (mutations) to the code to check if the tests can detect these changes. - -The mutations are run with or without a [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs). -The matrix is used when there is a large number of mutations to run ([check doc specific cases](https://github.com/stacks-network/actions/blob/main/stacks-core/mutation-testing/check-packages-and-shards/README.md#outputs)). -We utilize a matrix strategy with shards to enable parallel execution in GitHub Actions. -This approach allows for the concurrent execution of multiple jobs across various runners. -The total workload is divided across all shards, effectively reducing the overall duration of a workflow because the time taken is approximately the total time divided by the number of shards (+ initial build & test time). -This is particularly advantageous for large packages that have significant build and test times, as it enhances efficiency and speeds up the process. - -Since mutation testing is directly correlated to the written tests, there are slower packages (due to the quantity or time it takes to run the tests) like `stackslib` or `stacks-node`. -These mutations are run separately from the others, with one or more parallel jobs, depending on the amount of mutations found. - -Once all the jobs have finished testing mutants, the last job collects all the tested mutations from the previous jobs, combines them and outputs them to the `Summary` section of the workflow, at the bottom of the page. -There, you can find all mutants on categories, with links to the function they tested, and a short description on how to fix the issue. -The PR should only be approved/merged after all the mutants tested are in the `Caught` category. - -### Time required to run the workflow based on mutants outcome and packages' size - -- Small packages typically completed in under 30 minutes, aided by the use of shards. -- Large packages like stackslib and stacks-node initially required about 20-25 minutes for build and test processes. - - Each "missed" and "caught" mutant took approximately 15 minutes. Using shards, this meant about 50-55 minutes for processing around 32 mutants (10-16 functions modified). Every additional 8 mutants added another 15 minutes to the runtime. - - "Unviable" mutants, which are functions lacking a Default implementation for their returned struct type, took less than a minute each. - - "Timeout" mutants typically required more time. However, these should be marked to be skipped (by adding a skip flag to their header) since they indicate functions unable to proceed in their test workflow with mutated values, as opposed to the original implementations. - -File: - -- [PR Differences Mutants](../.github/workflows/pr-differences-mutants.yml) - -### Mutant Outcomes - -- caught — A test failed with this mutant applied. -This is a good sign about test coverage. - -- missed — No test failed with this mutation applied, which seems to indicate a gap in test coverage. -Or, it may be that the mutant is undistinguishable from the correct code. -In any case, you may wish to add a better test. - -- unviable — The attempted mutation doesn't compile. -This is inconclusive about test coverage, since the function's return structure may not implement `Default::default()` (one of the mutations applied), hence causing the compile to fail. -It is recommended to add `Default` implementation for the return structures of these functions, only mark that the function should be skipped as a last resort. - -- timeout — The mutation caused the test suite to run for a long time, until it was eventually killed. -You might want to investigate the cause and only mark the function to be skipped if necessary. - -### Skipping Mutations - -Some functions may be inherently hard to cover with tests, for example if: - -- Generated mutants cause tests to hang. -- You've chosen to test the functionality by human inspection or some higher-level integration tests. -- The function has side effects or performance characteristics that are hard to test. -- You've decided that the function is not important to test. - -To mark functions as skipped, so they are not mutated: - -- Add a Cargo dependency of the [mutants](https://crates.io/crates/mutants) crate, version `0.0.3` or later (this must be a regular `dependency`, not a `dev-dependency`, because the annotation will be on non-test code) and mark functions with `#[mutants::skip]`, or - -- You can avoid adding the dependency by using the slightly longer `#[cfg_attr(test, mutants::skip)]`. - -### Example - -```rust -use std::time::{Duration, Instant}; - -/// Returns true if the program should stop -#[cfg_attr(test, mutants::skip)] // Returning false would cause a hang -fn should_stop() -> bool { - true -} - -pub fn controlled_loop() { - let start = Instant::now(); - for i in 0.. { - println!("{}", i); - if should_stop() { - break; - } - if start.elapsed() > Duration::from_secs(60 * 5) { - panic!("timed out"); - } - } -} - -mod test { - #[test] - fn controlled_loop_terminates() { - super::controlled_loop() - } -} -``` - ---- diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md new file mode 100644 index 00000000000..df63ee8fa04 --- /dev/null +++ b/docs/ci-workflow.md @@ -0,0 +1,281 @@ +# CI Workflows + +All releases are built via a Github Actions workflow named [`CI`](../.github/workflows/ci.yml), and is responsible for: + +- Verifying code is formatted correctly +- Integration tests +- [Mutation tests](https://en.wikipedia.org/wiki/Mutation_testing) +- Creating releases + - Building binary archives and calculating checksums + - Publishing Docker images + +1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). +2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. + Tests can be retried quickly since the cache will persist until the cleanup job is run. +3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). + - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for generic tests. + - Unit-tests are [partitioned](https://nexte.st/docs/ci-features/partitioning/) and parallelized to speed up execution time. +4. Most workflow steps are called from a separate actions repo to reduce duplication. + +## TL;DR + +- Pushing a new branch will not trigger a workflow +- An open/re-opened/synchronized PR will produce a docker image built from source on Debian with glibc with the following tags: + - `stacks-core:` + - `stacks-core:` +- An untagged build of any branch will produce a single image built from source on Debian with glibc: + - `stacks-core:` +- Running the [CI workflow](../.github/workflows/ci.yml) on a `release/X.Y.Z.A.n` branch will produce: + - Github Release of the branch with: + - Binary archives for several architectures + - Checksum file containing hashes for each archive + - Tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` + - Docker Debian images for several architectures tagged with: + - `stacks-core:latest` + - `stacks-core:X.Y.Z.A.n` + - `stacks-core:X.Y.Z.A.n-debian` + - Docker Alpine images for several architectures tagged with: + - `stacks-core:X.Y.Z.A.n-alpine` + +## Release workflow + +The process to build and tag a release is defined [here](./release-process.md) + +## Tests + +Tests are separated into several different workflows, with the intention that they can be _conditionally_ run depending upon the triggering operation. For example, when a PR is opened we don't want to run some identified "slow" tests, but we do want to run the [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) and [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml). + +There are also 2 different methods in use with regard to running tests: + +1. [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs) +2. [nextest partitioning](https://nexte.st/book/partitioning.html) + +A matrix is used when there are several known tests that need to be run in parallel. +Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). + +There is also a workflow designed to run tests that is manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). +This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. +For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). +Likewise, selecting `Release Tests` will run the same tests as a release workflow. + +### Adding/changing tests + +With the exception of `unit-tests` in [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml), adding/removing a test requires a change to the workflow matrix. Example from [Atlas Tests](../.github/workflows/atlas-tests.yml): + +```yaml +atlas-tests: + name: Atlas Test + ... + matrix: + test-name: + - tests::neon_integrations::atlas_integration_test + - tests::neon_integrations::atlas_stress_integration_test +``` + +Example of adding a new test `tests::neon_integrations::atlas_new_test`: + +```yaml +atlas-tests: + name: Atlas Test + ... + matrix: + test-name: + - tests::neon_integrations::atlas_integration_test + - tests::neon_integrations::atlas_stress_integration_test + - tests::neon_integrations::atlas_new_test +``` + +The separation of tests (outside of [Slow Tests](../.github/workflows/slow-tests.yml)) is performed by creating a separate workflow for each _type_ of test that is being run. +Using the example above, to add/remove any tests from being run - the workflow `matrix` will need to be adjusted. + +ex: + +- `Atlas Tests`: Tests related to Atlas +- `Bitcoin Tests`: Tests relating to burnchain operations +- `Epoch Tests`: Tests related to epoch changes +- `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or even times out intermittently), it should be added here. +- `Stacks Core Tests`: + - `full-genesis`: Tests related to full genesis + - `core-contracts`: Tests related to boot contracts + +### Checking the result of multiple tests at once + +You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in a workflow job. +If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. + +If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. + +In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. +If any of the jobs are failing, the `check-tests` job will also fail. + +```yaml +check-tests: + name: Check Tests + runs-on: ubuntu-latest + if: always() + needs: + - full-genesis + - unit-tests + - open-api-validation + - core-contracts-clarinet-test + steps: + - name: Check Tests Status + id: check_tests_status + uses: stacks-network/actions/check-jobs-status@main + with: + jobs: ${{ toJson(needs) }} + summary_print: "true" +``` + +## Triggering a workflow + +### Opening/Updating a PR + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags + - Creates the following images (where branch is named `feat/fix-something` and the PR is numbered `5446`): + - `stacks-core:feat-fix-something` + - `stacks-core:pr-5446` + +--- + +### Merging a branch to develop + +Once a PR is added to the merge queue, the target branch is merged into the source branch. +Then, the same workflows are triggered as in the [previous step](#opening-a-pr-against-develop). + +--- + +### Manually triggering CI workflow (any branch not named `release/X.Y.Z.A.n`) + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag + - Creates the following images: + - `stacks-core:` + +--- + +### Manually triggering CI workflow with tag on a release branch + +ex: running the [`CI`](../.github/workflows/ci.yml) on a branch named `release/X.Y.Z.A.n` + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Atlas Tests](../.github/workflows/atlas-tests.yml) +- [Epoch Tests](../.github/workflows/epoch-tests.yml) +- [Slow Tests](../.github/workflows/slow-tests.yml) +- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag +- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) + - Archive and checksum files will be uploaded to the versioned github release. +- [Docker Image (Binary)](../.github/workflows/image-build-binary.yml) + - Built from binaries on debian/alpine distributions and pushed with a verrsion and `latest` tags. + - Creates the following images: + - `stacks-core:X.Y.Z.A.n` + - `stacks-core:X.Y.Z.A.n-alpine` + - `stacks-core:latest` + - `stacks-core:latest-alpine` + +## Mutation Testing + +When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. +It checks the new and altered functions through mutation testing. +Mutation testing involves making small changes (mutations) to the code to check if the tests can detect these changes. + +The mutations are run with or without a [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs). +The matrix is used when there is a large number of mutations to run ([check doc specific cases](https://github.com/stacks-network/actions/blob/main/stacks-core/mutation-testing/check-packages-and-shards/README.md#outputs)). +We utilize a matrix strategy with shards to enable parallel execution in GitHub Actions. +This approach allows for the concurrent execution of multiple jobs across various runners. +The total workload is divided across all shards, effectively reducing the overall duration of a workflow because the time taken is approximately the total time divided by the number of shards (+ initial build & test time). +This is particularly advantageous for large packages that have significant build and test times, as it enhances efficiency and speeds up the process. + +Since mutation testing is directly correlated to the written tests, there are slower packages (due to the quantity or time it takes to run the tests) like `stackslib` or `stacks-node`. +These mutations are run separately from the others, with one or more parallel jobs, depending on the amount of mutations found. + +Once all the jobs have finished testing mutants, the last job collects all the tested mutations from the previous jobs, combines them and outputs them to the `Summary` section of the workflow, at the bottom of the page. +There, you can find all mutants on categories, with links to the function they tested, and a short description on how to fix the issue. +The PR should only be approved/merged after all the mutants tested are in the `Caught` category. + +### Time required to run the workflow based on mutants outcome and packages' size + +- Small packages typically completed in under 30 minutes, aided by the use of shards. +- Large packages like stackslib and stacks-node initially required about 20-25 minutes for build and test processes. + - Each "missed" and "caught" mutant took approximately 15 minutes. Using shards, this meant about 50-55 minutes for processing around 32 mutants (10-16 functions modified). Every additional 8 mutants added another 15 minutes to the runtime. + - "Unviable" mutants, which are functions lacking a Default implementation for their returned struct type, took less than a minute each. + - "Timeout" mutants typically required more time. However, these should be marked to be skipped (by adding a skip flag to their header) since they indicate functions unable to proceed in their test workflow with mutated values, as opposed to the original implementations. + +File: + +- [PR Differences Mutants](../.github/workflows/pr-differences-mutants.yml) + +### Mutant Outcomes + +- caught — A test failed with this mutant applied. + This is a good sign about test coverage. + +- missed — No test failed with this mutation applied, which seems to indicate a gap in test coverage. + Or, it may be that the mutant is undistinguishable from the correct code. + In any case, you may wish to add a better test. + +- unviable — The attempted mutation doesn't compile. + This is inconclusive about test coverage, since the function's return structure may not implement `Default::default()` (one of the mutations applied), hence causing the compile to fail. + It is recommended to add `Default` implementation for the return structures of these functions, only mark that the function should be skipped as a last resort. + +- timeout — The mutation caused the test suite to run for a long time, until it was eventually killed. + You might want to investigate the cause and only mark the function to be skipped if necessary. + +### Skipping Mutations + +Some functions may be inherently hard to cover with tests, for example if: + +- Generated mutants cause tests to hang. +- You've chosen to test the functionality by human inspection or some higher-level integration tests. +- The function has side effects or performance characteristics that are hard to test. +- You've decided that the function is not important to test. + +To mark functions as skipped, so they are not mutated: + +- Add a Cargo dependency of the [mutants](https://crates.io/crates/mutants) crate, version `0.0.3` or later (this must be a regular `dependency`, not a `dev-dependency`, because the annotation will be on non-test code) and mark functions with `#[mutants::skip]`, or + +- You can avoid adding the dependency by using the slightly longer `#[cfg_attr(test, mutants::skip)]`. + +### Example + +```rust +use std::time::{Duration, Instant}; + +/// Returns true if the program should stop +#[cfg_attr(test, mutants::skip)] // Returning false would cause a hang +fn should_stop() -> bool { + true +} + +pub fn controlled_loop() { + let start = Instant::now(); + for i in 0.. { + println!("{}", i); + if should_stop() { + break; + } + if start.elapsed() > Duration::from_secs(60 * 5) { + panic!("timed out"); + } + } +} + +mod test { + #[test] + fn controlled_loop_terminates() { + super::controlled_loop() + } +} +``` + +--- diff --git a/docs/community.md b/docs/community.md deleted file mode 100644 index ca842151f2d..00000000000 --- a/docs/community.md +++ /dev/null @@ -1,23 +0,0 @@ -# Community - -Beyond this Github project, -Stacks maintains a public [forum](https://forum.stacks.org) and an -open [Discord](https://discord.com/invite/XYdRyhf) channel. In addition, the project -maintains a [mailing list](https://newsletter.stacks.org/) which sends out -community announcements. - -- [Forum](https://forum.stacks.org) -- [Discord](https://discord.com/invite/XYdRyhf) -- [Telegram](https://t.me/StacksChat) -- [Newsletter](https://newsletter.stacks.org/) - -The greater Stacks community regularly hosts in-person -[meetups](https://www.meetup.com/topics/blockstack/) as well as a [calendar of Stacks ecosystem events](https://community.stacks.org/events#calendar). The project's -[YouTube channel](https://www.youtube.com/channel/UC3J2iHnyt2JtOvtGVf_jpHQ) includes -videos from some of these meetups, as well as video tutorials to help new -users get started and help developers wrap their heads around the system's -design. - -- [Meetups](https://www.meetup.com/topics/blockstack/) -- [Events Calender](https://community.stacks.org/events#calendar) -- [YouTube channel](https://www.youtube.com/channel/UC3J2iHnyt2JtOvtGVf_jpHQ) diff --git a/docs/init.md b/docs/init.md index f3b98076c65..5bf157e7211 100644 --- a/docs/init.md +++ b/docs/init.md @@ -14,9 +14,8 @@ The MacOS configuration assumes stacks-blockchain will be set up for the current ## Configuration -For an example configuration file that describes the configuration settings, -see [mainnet-follower-conf.toml](../testnet/stacks-node/conf/mainnet-follower-conf.toml). -Available configuration options are documented here: https://docs.stacks.co/references/stacks-node-configuration +For an example configuration file that describes the configuration settings, see [mainnet-follower-conf.toml](../testnet/stacks-node/conf/mainnet-follower-conf.toml). +Available configuration options are [documented here](https://docs.stacks.co/stacks-in-depth/nodes-and-miners/stacks-node-configuration). ## Paths diff --git a/docs/mining.md b/docs/mining.md index e113f12d933..2a59f051a9e 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -9,8 +9,8 @@ you should make sure to add the following config fields to your config file: miner = True # Bitcoin private key to spend seed = "YOUR PRIVATE KEY" -# How long to wait for microblocks to arrive before mining a block to confirm them (in milliseconds) -wait_time_for_microblocks = 10000 +# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) +mine_microblocks = false # Run as a mock-miner, to test mining without spending BTC. Needs miner=True. #mock_mining = True @@ -23,10 +23,18 @@ first_attempt_time_ms = 1000 # Time to spend on subsequent attempts to make a block, in milliseconds. # This can be bigger -- new block-commits will be RBF'ed. subsequent_attempt_time_ms = 60000 -# Time to spend mining a microblock, in milliseconds. -microblock_attempt_time_ms = 30000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 + +[burnchain] +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election +burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 ``` You can verify that your node is operating as a miner by checking its log output diff --git a/docs/release-process.md b/docs/release-process.md index 5e2be08b5d1..d7dfb1ea527 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -11,18 +11,16 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | - ## Release Schedule and Hotfixes -Normal releases in this repository that add features such as improved RPC endpoints, improved boot-up time, new event -observer fields or event types, etc., are released on a monthly schedule. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run -a `stacks-node` from that branch, though it has received less rigorous testing than release tags. If bugs are found in -the `develop` branch, please do report them as issues on this repository. +Normal releases in this repository that add new features are released on a monthly schedule. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). +It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. +If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. -For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. These are patches -to the main branch which are backported to the develop branch after merging. These hotfixes are categorized by priority -according to the following rubric: +For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. +These are patches to the main branch which are backported to the develop branch after merging. +These hotfixes are categorized by priority according to the following rubric: - **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. Any fix for an issue that could cause honest miners to produce invalid blocks. - **Medium Priority**. Any fix for an issue that could cause miners to waste funds. @@ -30,90 +28,72 @@ according to the following rubric: ## Versioning -This repository uses a 5 part version number. +This repository uses a 5 part version number: ``` X.Y.Z.A.n -X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +X major version - does not change in practice unless there’s another Stacks 2.0 type event Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) n increments on patches and hot-fixes (akin to semantic PATCH) ``` -For example, a node operator running version `2.0.10.0.0` would not need to wipe and refresh their chainstate -to upgrade to `2.0.10.1.0` or `2.0.10.0.1`. However, upgrading to `2.0.11.0.0` would require a new chainstate. +Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]` ## Non-Consensus Breaking Release Process -For non-consensus breaking releases, this project uses the following release process: - -1. The release must be timed so that it does not interfere with a _prepare - phase_. The timing of the next Stacking cycle can be found - [here](https://stx.eco/dao/tools?tool=2). A release should happen - at least 24 hours before the start of a new cycle, to avoid interfering - with the prepare phase. So, start by being aware of when the release can - happen. - -1. Before creating the release, the release manager must determine the _version - number_ for this release, and create a release branch in the format: `release/X.Y.Z.A.n`. - The factors that determine the version number are - discussed in [Versioning](#versioning). We assume, in this section, - that the change is not consensus-breaking. So, the release manager must first - determine whether there are any "non-consensus-breaking changes that require a - fresh chainstate". This means, in other words, that the database schema has - changed, but an automatic migration was not implemented. Then, the release manager - should determine whether this is a feature release, as opposed to a hotfix or a - patch. Given the answers to these questions, the version number can be computed. - -1. The release manager enumerates the PRs or issues that would _block_ - the release. A label should be applied to each such issue/PR as - `X.Y.Z.A.n-blocker`. The release manager should ping these - issue/PR owners for updates on whether or not those issues/PRs have - any blockers or are waiting on feedback. - -1. The release manager must update the `CHANGELOG.md` file with summaries what - was `Added`, `Changed`, and `Fixed`. The pull requests merged into `develop` - can be found - [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by - _merge time_, so, when sorting by some proxy criterion, some care should - be used to understand which PR's were _merged_ after the last release. - -1. Once the blocker PRs have merged, the release manager will create a new tag - by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) - against the `release/X.Y.Z.A.n` branch. - -1. Once the release candidate has been built, and docker images, etc. are available, - the release manager will notify various ecosystem participants to test the release - candidate on various staging infrastructure: - - 1. Stacks Foundation staging environments. - 1. Hiro PBC testnet network. - 1. Hiro PBC mainnet mock miner. - - The release candidate should be announced in the `#stacks-core-devs` channel in the - Stacks Discord. For coordinating rollouts on specific infrastructure, the release - manager should contact the above participants directly either through e-mail or - Discord DM. The release manager should also confirm that the built release on the - [Github releases](https://github.com/stacks-network/stacks-core/releases/) - page is marked as `Pre-Release`. - -1. The release manager will test that the release candidate successfully syncs with - the current chain from genesis both in testnet and mainnet. This requires starting - the release candidate with an empty chainstate and confirming that it synchronizes - with the current chain tip. - -1. If bugs or issues emerge from the rollout on staging infrastructure, the release - will be delayed until those regressions are resolved. As regressions are resolved, - additional release candidates should be tagged. The release manager is responsible - for updating the `develop -> master` PR with information about the discovered issues, - even if other community members and developers may be addressing the discovered - issues. - -1. Once the final release candidate has rolled out successfully without issue on staging - infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. - Announcements will then be shared in the `#stacks-core-devs` channel in the - Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). - -1. Finally, the release branch `release/X.Y.Z.A.n` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. +The release must be timed so that it does not interfere with a _prepare phase_. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). +A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. + +1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). + + 1. First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. + 2. A new branch in format `release/X.Y.Z.A.n` is created from the base branch `develop`. + +2. Enumerate PRs and/or issues that would _block_ the release. + + 1. A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. + +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick commits into the release branch. + + 1. Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. + 2. Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch + 3. Merge `feat/X.Y.Z.A.n-pr_number` back into `release/X.Y.Z.A.n`. + +4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. + + 1. Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. + 2. Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + +5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. + +6. Once the release has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. + +7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. + +8. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. + + - As regressions are resolved, additional release candidates should be tagged. + - Repeat steps 3-7 as necessary. + +9. Once the final release candidate has rolled out successfully without issue on staging infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +10. Finally, the following merges will happen to complete the release process: + 1. Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. + 2. Then, `master` will be merged back into `develop`. + +## Consensus Breaking Release Process + +Consensus breaking releases shall follow the same overall process as a non-consensus release, with the following considerations: + +- The release must be timed so that sufficient time is given to perform a genesis sync. +- The release must take into account the activation height at which the new consensus rules will take effect. + Generically, a few weeks lead time is required for consensus breaking changes. diff --git a/testnet/stacks-node/conf/local-follower-conf.toml b/testnet/stacks-node/conf/local-follower-conf.toml deleted file mode 100644 index c828c183730..00000000000 --- a/testnet/stacks-node/conf/local-follower-conf.toml +++ /dev/null @@ -1,47 +0,0 @@ -[node] -rpc_bind = "127.0.0.1:30443" -p2p_bind = "127.0.0.1:30444" -bootstrap_node = "04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77@127.0.0.1:20444" -pox_sync_sample_secs = 10 -wait_time_for_microblocks = 0 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "127.0.0.1" -rpc_port = 18443 -peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -# "mnemonic": "point approve language letter cargo rough similar wrap focus edge polar task olympic tobacco cinnamon drop lawn boring sort trade senior screen tiger climb", -# "privateKey": "539e35c740079b79f931036651ad01f76d8fe1496dbd840ba9e62c7e7b355db001", -# "btcAddress": "n1htkoYKuLXzPbkn9avC2DJxt7X85qVNCK", -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "laugh capital express view pull vehicle cluster embark service clerk roast glance lumber glove purity project layer lyrics limb junior reduce apple method pear", -# "privateKey": "075754fb099a55e351fe87c68a73951836343865cd52c78ae4c0f6f48e234f3601", -# "btcAddress": "n2ZGZ7Zau2Ca8CLHGh11YRnLw93b4ufsDR", -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "level garlic bean design maximum inhale daring alert case worry gift frequent floor utility crowd twenty burger place time fashion slow produce column prepare", -# "privateKey": "374b6734eaff979818c5f1367331c685459b03b1a2053310906d1408dc928a0001", -# "btcAddress": "mhY4cbHAFoXNYvXdt82yobvVuvR6PHeghf", -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "drop guess similar uphold alarm remove fossil riot leaf badge lobster ability mesh parent lawn today student olympic model assault syrup end scorpion lab", -# "privateKey": "26f235698d02803955b7418842affbee600fc308936a7ca48bf5778d1ceef9df01", -# "btcAddress": "mkEDDqbELrKYGUmUbTAyQnmBAEz4V1MAro", -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/local-leader-conf.toml b/testnet/stacks-node/conf/local-leader-conf.toml deleted file mode 100644 index 8e10f179d63..00000000000 --- a/testnet/stacks-node/conf/local-leader-conf.toml +++ /dev/null @@ -1,44 +0,0 @@ -[node] -rpc_bind = "127.0.0.1:20443" -p2p_bind = "127.0.0.1:20444" -seed = "0000000000000000000000000000000000000000000000000000000000000000" -local_peer_seed = "0000000000000000000000000000000000000000000000000000000000000000" -miner = true -prometheus_bind = "127.0.0.1:4000" -pox_sync_sample_secs = 10 -wait_time_for_microblocks = 0 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "127.0.0.1" -rpc_port = 18443 -peer_port = 18444 - -[[ustx_balance]] -# "mnemonic": "point approve language letter cargo rough similar wrap focus edge polar task olympic tobacco cinnamon drop lawn boring sort trade senior screen tiger climb", -# "privateKey": "539e35c740079b79f931036651ad01f76d8fe1496dbd840ba9e62c7e7b355db001", -# "btcAddress": "n1htkoYKuLXzPbkn9avC2DJxt7X85qVNCK", -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "laugh capital express view pull vehicle cluster embark service clerk roast glance lumber glove purity project layer lyrics limb junior reduce apple method pear", -# "privateKey": "075754fb099a55e351fe87c68a73951836343865cd52c78ae4c0f6f48e234f3601", -# "btcAddress": "n2ZGZ7Zau2Ca8CLHGh11YRnLw93b4ufsDR", -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "level garlic bean design maximum inhale daring alert case worry gift frequent floor utility crowd twenty burger place time fashion slow produce column prepare", -# "privateKey": "374b6734eaff979818c5f1367331c685459b03b1a2053310906d1408dc928a0001", -# "btcAddress": "mhY4cbHAFoXNYvXdt82yobvVuvR6PHeghf", -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "drop guess similar uphold alarm remove fossil riot leaf badge lobster ability mesh parent lawn today student olympic model assault syrup end scorpion lab", -# "privateKey": "26f235698d02803955b7418842affbee600fc308936a7ca48bf5778d1ceef9df01", -# "btcAddress": "mkEDDqbELrKYGUmUbTAyQnmBAEz4V1MAro", -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 02379c65d96..4377993ed4f 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -1,8 +1,9 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" @@ -10,7 +11,6 @@ mode = "mainnet" peer_host = "bitcoin.hiro.so" username = "hirosystems" password = "hirosystems" -rpc_port = 8332 peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 5b836b01c42..ee5e262d460 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -1,11 +1,14 @@ [node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +rpc_bind = "127.0.0.1:20443" +p2p_bind = "127.0.0.1:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) +mine_microblocks = false [burnchain] chain = "bitcoin" @@ -15,5 +18,11 @@ username = "" password = "" rpc_port = 8332 peer_port = 8333 -satoshis_per_byte = 100 +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 \ No newline at end of file diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index e3c93bfd2bf..2c98499d59e 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -1,10 +1,11 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mocknet-follower-conf.toml b/testnet/stacks-node/conf/mocknet-follower-conf.toml deleted file mode 100644 index 3cb9beb5d78..00000000000 --- a/testnet/stacks-node/conf/mocknet-follower-conf.toml +++ /dev/null @@ -1,33 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -bootstrap_node = "04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77@127.0.0.1:20444" -wait_time_for_microblocks = 10000 -use_test_genesis_chainstate = true - -[burnchain] -chain = "bitcoin" -mode = "mocknet" - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/mocknet-miner-conf.toml b/testnet/stacks-node/conf/mocknet-miner-conf.toml deleted file mode 100644 index 71add782b1b..00000000000 --- a/testnet/stacks-node/conf/mocknet-miner-conf.toml +++ /dev/null @@ -1,32 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -seed = "0000000000000000000000000000000000000000000000000000000000000000" -local_peer_seed = "0000000000000000000000000000000000000000000000000000000000000000" -miner = true -wait_time_for_microblocks = 10000 -use_test_genesis_chainstate = true - -[connection_options] -public_ip_address = "127.0.0.1:20444" - -[burnchain] -chain = "bitcoin" -mode = "mocknet" - -[[ustx_balance]] -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/prometheus.yml b/testnet/stacks-node/conf/prometheus.yml deleted file mode 100644 index ad3a063ba78..00000000000 --- a/testnet/stacks-node/conf/prometheus.yml +++ /dev/null @@ -1,13 +0,0 @@ -global: - scrape_interval: 15s - evaluation_interval: 15s -scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['127.0.0.1:9090'] - - job_name: 'stacks-node-leader' - static_configs: - - targets: ['127.0.0.1:4000'] - - job_name: 'stacks-node-follower' - static_configs: - - targets: ['127.0.0.1:5000'] diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml deleted file mode 100644 index 5677551264a..00000000000 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ /dev/null @@ -1,37 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -bootstrap_node = "048dd4f26101715853533dee005f0915375854fd5be73405f679c1917a5d4d16aaaf3c4c0d7a9c132a36b8c5fe1287f07dad8c910174d789eb24bdfb5ae26f5f27@regtest.stacks.co:20444" -wait_time_for_microblocks = 10000 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "bitcoin.regtest.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 18443 -peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" -amount = 10000000000000000 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 46c70a01985..d237aafd61f 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -1,24 +1,20 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" -wait_time_for_microblocks = 10000 +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" -mode = "xenon" +mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" username = "hirosystems" password = "hirosystems" rpc_port = 18443 peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] +pox_prepare_length = 100 +pox_reward_length = 900 [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" @@ -35,3 +31,39 @@ amount = 10000000000000000 [[ustx_balance]] address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 2000701 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml deleted file mode 100644 index 7e1ce1bf5ed..00000000000 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ /dev/null @@ -1,34 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -seed = "" -local_peer_seed = "" -miner = true -bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" -wait_time_for_microblocks = 10000 - -[burnchain] -chain = "bitcoin" -mode = "xenon" -peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 18443 -peer_port = 18444 - -[[ustx_balance]] -address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" -amount = 10000000000000000 From efed50ef0319feb3e41c66d6302ca3b58c8579de Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:38:26 -0700 Subject: [PATCH 004/910] Add branching doc/minor fixes --- CONTRIBUTING.md | 269 ++++++++---------- README.md | 1 - SECURITY.md | 2 +- docs/branching.md | 35 +++ docs/ci-workflow.md | 14 +- docs/mining.md | 4 +- docs/profiling.md | 2 +- docs/release-process.md | 36 +-- .../stacks-node/conf/mainnet-miner-conf.toml | 3 +- .../conf/testnet-follower-conf.toml | 16 +- 10 files changed, 199 insertions(+), 183 deletions(-) create mode 100644 docs/branching.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 22507d6f338..53fcf8a1683 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ could not only have catastrophic consequences for users (i.e. they lose all their money), but also be intractable to fix, mitigate, or remove. This is because unlike nearly every other kind of networked software, **the state of the blockchain is what the users' computers -say it is.** If you want to make changes, you _must_ get _user_ +say it is.** If you want to make changes, you _must_ get _user_ buy-in, and this is necessarily time-consuming and not at all guaranteed to succeed. @@ -25,28 +25,7 @@ This project and everyone participating in it is governed by this [Code of Condu ## Development Workflow -- For typical development, branch off of the `develop` branch. -- For consensus breaking changes, branch off of the `next` branch. -- For hotfixes, branch off of `master`. - -If you have commit access, use a branch in this repository. If you do -not, then you must use a github fork of the repository. - -### Branch naming - -Branch names should use a prefix that conveys the overall goal of the branch: - -- `feat/some-fancy-new-thing` for new features -- `fix/some-broken-thing` for hot fixes and bug fixes -- `docs/something-needs-a-comment` for documentation -- `ci/build-changes` for continuous-integration changes -- `test/more-coverage` for branches that only add more tests -- `refactor/formatting-fix` for refactors - -The branch suffix must only include ASCII lowercase and uppercase letters, -digits, underscores, periods and dashes. - -The full branch name must be max 128 characters long. +See the branching document in [branching.md](./docs/branching.md). ### Merging PRs from Forks @@ -67,7 +46,6 @@ For an example of this process, see PRs [#3598](https://github.com/stacks-network/stacks-core/pull/3598) and [#3626](https://github.com/stacks-network/stacks-core/pull/3626). - ### Documentation Updates - Any major changes should be added to the [CHANGELOG](CHANGELOG.md). @@ -79,14 +57,17 @@ For an example of this process, see PRs to our [coding guidelines](#Coding-Guidelines). ## Git Commit Messages + Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). The general format is as follows: + ``` [optional scope]: [optional body] [optional footer(s)] ``` + Common types include build, ci, docs, fix, feat, test, refactor, etc. When a commit is addressing or related to a particular Github issue, it @@ -97,6 +78,7 @@ fix: incorporate unlocks in mempool admitter, #3623 ``` ## Recommended developer setup + ### Recommended githooks It is helpful to set up the pre-commit git hook set up, so that Rust formatting issues are caught before @@ -104,6 +86,7 @@ you push your code. Follow these instruction to set it up: 1. Rename `.git/hooks/pre-commit.sample` to `.git/hooks/pre-commit` 2. Change the content of `.git/hooks/pre-commit` to be the following + ```sh #!/bin/sh git diff --name-only --staged | grep '\.rs$' | xargs -P 8 -I {} rustfmt {} --edition 2021 --check --config group_imports=StdExternalCrate,imports_granularity=Module || ( @@ -111,52 +94,53 @@ git diff --name-only --staged | grep '\.rs$' | xargs -P 8 -I {} rustfmt {} --edi exit 1 ) ``` + 3. Make it executable by running `chmod +x .git/hooks/pre-commit` That's it! Now your pre-commit hook should be configured on your local machine. # Creating and Reviewing PRs -This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. +This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. ## Overview -Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. +Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. -Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). +Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). -A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. +A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. -This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. +This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. ## Reviewer Expectations -The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. +The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. -Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. +Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. Reviewers should make use of Github's "pending comments" feature. This ensures that the review is "atomic": when the reviewer submits the review, all the comments are published at once. -Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. +Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. -Code reviews should be timely. Reviewers should start no more than +Code reviews should be timely. Reviewers should start no more than **2 business days** after reviewers are assigned. This applies to each reviewer: i.e., we expect all reviewers to respond within two days. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the -submitter. If a review cannot be started within this timeframe, then +submitter. If a review cannot be started within this timeframe, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. -Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. +Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. **As a reviewer, if you do not understand the PR's code or the potential consequences of the code, it is the submitter's responsibility to simplify the code, provide better documentation, or withdraw the PR.** ## Submitter Expectations -Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. +Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. -The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. +The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going so far as to ask in advance if a particular person could be available for review. @@ -172,13 +156,13 @@ Weekly Blockchain Engineering Meeting (information can be found in Discord). A PR submission's text should **answer the following questions** for reviewers: -* What problem is being solved by this PR? -* What does the solution do to address them? -* Why is this the best solution? What alternatives were considered, and why are they worse? -* What do reviewers need to be familiar with in order to provide useful feedback? -* What issue(s) are addressed by this PR? -* What are some hints to understanding some of the more intricate or clever parts of the PR? -* Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? +- What problem is being solved by this PR? +- What does the solution do to address them? +- Why is this the best solution? What alternatives were considered, and why are they worse? +- What do reviewers need to be familiar with in order to provide useful feedback? +- What issue(s) are addressed by this PR? +- What are some hints to understanding some of the more intricate or clever parts of the PR? +- Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. @@ -195,7 +179,7 @@ the immediate problem they are meant to solve will be rejected. #### Type simplicity -Simplicity of implementation includes simplicity of types. Type parameters +Simplicity of implementation includes simplicity of types. Type parameters and associated types should only be used if there are at least two possible implementations of those types. @@ -204,17 +188,17 @@ on its own. ### Builds with a stable Rust compiler -We use a recent, stable Rust compiler. Contributions should _not_ +We use a recent, stable Rust compiler. Contributions should _not_ require nightly Rust features to build and run. ### Minimal dependencies -Adding new package dependencies is very much discouraged. Exceptions will be +Adding new package dependencies is very much discouraged. Exceptions will be granted on a case-by-case basis, and only if deemed absolutely necessary. ### Minimal global macros -Adding new global macros is discouraged. Exceptions will only be given if +Adding new global macros is discouraged. Exceptions will only be given if absolutely necessary. ### No compiler warnings @@ -230,162 +214,160 @@ Contributions should not contain `unsafe` blocks if at all possible. ## Documentation -* Each file must have a **copyright statement**. -* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). -* Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. +- Each file must have a **copyright statement**. +- Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). +- Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. Within the source files, the following **code documentation** standards are expected: -* Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. -* Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. -* Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. +- Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. +- Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. +- Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. ## Factoring -* **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. +- **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. -* Directories represent collections of related but distinct subsystems. +- Directories represent collections of related but distinct subsystems. -* To the greatest extent possible, **business logic and I/O should be - separated**. A common pattern used in the codebase is to place the +- To the greatest extent possible, **business logic and I/O should be + separated**. A common pattern used in the codebase is to place the business logic into an "inner" function that does not do I/O, and - handle I/O reads and writes in an "outer" function. The "outer" + handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the - "inner" function. The "inner" function is often private, whereas + "inner" function. The "inner" function is often private, whereas the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). ## Refactoring -* **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. +- **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. -* Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. +- Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. ## Databases -* If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. +- If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. -* Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. +- Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. -* The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). +- The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). -* Database changes **cannot be consensus-critical** unless part of a hard fork (see below). +- Database changes **cannot be consensus-critical** unless part of a hard fork (see below). -* If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. +- If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. ## Data Input -* **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. +- **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. -* **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. +- **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. -* **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. +- **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. -* **All input deserialization is resource-bound.** Every piece of code +- **All input deserialization is resource-bound.** Every piece of code that ingests data must impose a maximum amount of RAM and CPU - required to decode it into a structured representation. If the data + required to decode it into a structured representation. If the data does not decode with the allotted resources, then no further processing may be done and the data is discarded. For an example, see how the parsing functions in the http module use `BoundReader` and `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). -* **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. +- **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. -* **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). +- **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). ## Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity -Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. +Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. -Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. +Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. ## Changes to the Peer Network -Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. +Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. Changes to the peer network should be deployed incrementally and tested by multiple parties when possible to verify that they function properly in a production setting. ## Performance Improvements -Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. +Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. For an example, see [PR #3075](https://github.com/stacks-network/stacks-core/pull/3075). ## Error Handling -* **Results must use `Error` types**. Fallible functions in the -codebase must use `Error` types in their `Result`s. If a new module's -errors are sufficiently different from existing `Error` types in the -codebaes, the new module must define a new `Error` type. Errors that -are caused by other `Error` types should be wrapped in a variant of -the new `Error` type. You should provide conversions via a `From` -trait implementation. +- **Results must use `Error` types**. Fallible functions in the + codebase must use `Error` types in their `Result`s. If a new module's + errors are sufficiently different from existing `Error` types in the + codebaes, the new module must define a new `Error` type. Errors that + are caused by other `Error` types should be wrapped in a variant of + the new `Error` type. You should provide conversions via a `From` + trait implementation. -* Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. +- Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. -* **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. +- **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. -* If a runtime panic is desired, it **must have an appropriate error message**. +- If a runtime panic is desired, it **must have an appropriate error message**. ## Logging -* Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. +- Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. -* **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). +- **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). -* Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. +- Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. -* Use `trace!()` and `test_debug!()` liberally. It only runs in tests. +- Use `trace!()` and `test_debug!()` liberally. It only runs in tests. -* Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. +- Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. -* Use `info!()` sparingly. +- Use `info!()` sparingly. -* Use `warn!()` or `error!()` only when there really is a problem. +- Use `warn!()` or `error!()` only when there really is a problem. ## Consensus-Critical Code -A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. +A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. -* **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. +- **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. -* **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). +- **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). A non-exhaustive list of examples of consensus-critical changes include: -* Adding or changing block, microblock, or transaction wire formats -* Changing the criteria under which a burnchain operation will be accepted by the node -* Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs -* Changing the order in which data gets stored in the above -* Adding, changing, or removing Clarity functions -* Changing the cost of a Clarity function -* Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. +- Adding or changing block, microblock, or transaction wire formats +- Changing the criteria under which a burnchain operation will be accepted by the node +- Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs +- Changing the order in which data gets stored in the above +- Adding, changing, or removing Clarity functions +- Changing the cost of a Clarity function +- Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. ## Testing -* **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. +- **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. -* **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. +- **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. -* If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. +- If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. -* When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** +- When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** -* **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. +- **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. -* Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. +- Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel (which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. A test should be marked `#[ignore]` if: - 1. It does not _always_ pass `cargo test` in a vanilla environment - (i.e., it does not need to run with `--test-threads 1`). - - 2. Or, it runs for over a minute via a normal `cargo test` execution - (the `cargo test` command will warn if this is not the case). - +1. It does not _always_ pass `cargo test` in a vanilla environment + (i.e., it does not need to run with `--test-threads 1`). +2. Or, it runs for over a minute via a normal `cargo test` execution + (the `cargo test` command will warn if this is not the case). ## Formatting @@ -406,17 +388,18 @@ cargo fmt-stacks ``` ## Comments + Comments are very important for the readability and correctness of the codebase. The purpose of comments is: -* Allow readers to understand the roles of components and functions without having to check how they are used. -* Allow readers to check the correctness of the code against the comments. -* Allow readers to follow tests. +- Allow readers to understand the roles of components and functions without having to check how they are used. +- Allow readers to check the correctness of the code against the comments. +- Allow readers to follow tests. In the limit, if there are no comments, the problems that arise are: -* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. -* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. -* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. +- Understanding one part of the code requires understanding _many_ parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. +- The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. +- The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. ### Comment Formatting @@ -430,14 +413,13 @@ Comments are to be formatted in typical `rust` style, specifically: - When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: - ```rust - # Errors - * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. - ``` + ```rust + # Errors + * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. + ``` ### Content of Comments - #### Component Comments Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall @@ -485,7 +467,7 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. -#### Comments on Implementations of Virtual Methods +#### Comments on Implementations of Virtual Methods Note that, if a function implements a virtual function on an interface, the comments should not repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. @@ -507,7 +489,7 @@ pub struct ReadOnlyChecker<'a, 'b> { defined_functions: HashMap, ``` -This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. +This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is _read-only_, whereas this cannot be gotten from the signature alone. #### Test Comments @@ -543,14 +525,14 @@ This comment is considered positive because it explains the purpose of the test Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. -Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. +Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are _not_ always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. ### Don't Restate Names in Comments The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: -* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** -* **the author should only state information that is new** +- **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** +- **the author should only state information that is new** So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! @@ -561,7 +543,7 @@ So, if a function and its variables have very descriptive names, then there may fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> ``` -This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. +This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, _do_ add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. **Good Example** @@ -573,39 +555,40 @@ This is considered bad because the function name already says "append transactio fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> ``` -This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. +This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. ### Do's and Dont's of Comments -*Don't* over-comment by documenting things that are clear from the context. E.g.: +_Don't_ over-comment by documenting things that are clear from the context. E.g.: - Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. - Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. - Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. -*Do* document things that are not clear, e.g.: +_Do_ document things that are not clear, e.g.: - For a function called `process_block`, explain what it means to "process" a block. - For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. - For a function called `run`, explain the steps involved in "running". - For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. -- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. +- For a function that takes an argument `height`, either explain in the comment what this is the _height of_. Alternatively, expand the variable name to remove the ambiguity. - For a test, document what it is meant to test, and why the expected answers are, in fact, expected. ### Changing Code Instead of Comments Keep in mind that better variable names can reduce the need for comments, e.g.: -* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks -* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment +- `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height +- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +- `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment # Licensing and contributor license agreement -`stacks-core` is released under the terms of the GPL version 3. Contributions -that are not licensed under compatible terms will be rejected. Moreover, +`stacks-core` is released under the terms of the GPL version 3. Contributions +that are not licensed under compatible terms will be rejected. Moreover, contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. ## Use of AI-code Generation + The Stacks Foundation has a very strict policy of not accepting AI-generated code PRs due to uncertainly about licensing issues. diff --git a/README.md b/README.md index 6cdb42857f4..0279b25116f 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,6 @@ Stacks is a layer-2 blockchain that uses Bitcoin as a base layer for security an [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg?style=flat)](https://www.gnu.org/licenses/gpl-3.0) [![Release](https://img.shields.io/github/v/release/stacks-network/stacks-core?style=flat)](https://github.com/stacks-network/stacks-core/releases/latest) -[![Build Status](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml/badge.svg?branch=master&event=workflow_dispatch&style=flat)](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml?query=event%3Aworkflow_dispatch+branch%3Amaster) [![Discord Chat](https://img.shields.io/discord/621759717756370964.svg)](https://stacks.chat) ## Building diff --git a/SECURITY.md b/SECURITY.md index e59229b3a1a..e9001abe0ac 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,7 +2,7 @@ ## Supported Versions -Please see [Releases](https://github.com/stacks-network/stacks-blockchain/releases). It is recommended to use the [most recently released version](https://github.com/stacks-network/stacks-blockchain/releases/latest). +Please see [Releases](https://github.com/stacks-network/stacks-core/releases). It is recommended to use the [most recently released version](https://github.com/stacks-network/stacks-core/releases/latest). ## Reporting a vulnerability diff --git a/docs/branching.md b/docs/branching.md new file mode 100644 index 00000000000..38db57e3e54 --- /dev/null +++ b/docs/branching.md @@ -0,0 +1,35 @@ +# Git Branching + +The following is a modified version of the gitflow branching strategy described in + +## Main Branches + +- **master** - `origin/master` is the main branch where the source code of HEAD always reflects a production-ready state. +- **develop** - `origin/develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. +- **next** - `origin/next` may contain consensus-breaking changes. +- **release/X.Y.Z.A.n** is the release branch. + +When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). +After release, the following will happen: + +- `release/X.Y.Z.A.n` branch is merged back to `origin/master`. +- `origin/master` is then merged into `origin/develop`, and development continues in the `origin/develop` branch. +- `origin/develop` is then merged into `origin/next`. + +## Supporting Branches + +Branch names should use a prefix that conveys the overall goal of the branch. +All branches should be based off of `origin/develop`, with the exception being a hotfix branch which may be based off of `origin/master`. + +- `feat/some-fancy-new-thing`: For new features. +- `fix/some-broken-thing`: For hot fixes and bug fixes. +- `chore/some-update`: Any non code related change (ex: updating CHANGELOG.md, adding comments to code). +- `docs/something-needs-a-comment`: For documentation. +- `ci/build-changes`: For continuous-integration changes. +- `test/more-coverage`: For branches that only add more tests. +- `refactor/formatting-fix`: For refactors of the codebase. + +The full branch name **must**: + +- Have a maximum of 128 characters. +- Only includes ASCII lowercase and uppercase letters, digits, underscores, periods and dashes. diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md index df63ee8fa04..16d020985da 100644 --- a/docs/ci-workflow.md +++ b/docs/ci-workflow.md @@ -1,4 +1,4 @@ -# CI Workflows +# CI Workflow All releases are built via a Github Actions workflow named [`CI`](../.github/workflows/ci.yml), and is responsible for: @@ -11,11 +11,11 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor 1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). 2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. - Tests can be retried quickly since the cache will persist until the cleanup job is run. + Tests can be retried quickly since the cache will persist until the cleanup job is run or the cache is evicted. 3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). - - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for generic tests. + - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for non-genesis tests. - Unit-tests are [partitioned](https://nexte.st/docs/ci-features/partitioning/) and parallelized to speed up execution time. -4. Most workflow steps are called from a separate actions repo to reduce duplication. +4. Most workflow steps are called from a separate actions repo to enforce DRY. ## TL;DR @@ -55,7 +55,7 @@ Partitions (shards) are used when there is a large and unknown number of tests t There is also a workflow designed to run tests that is manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. -For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). +For example, selecting `Epoch Tests` will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. ### Adding/changing tests @@ -105,7 +105,7 @@ If any of the tests given to the action (JSON string of `needs` field) fails, th If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. -In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. +In the following example, `unit-tests` is a matrix job from [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. If any of the jobs are failing, the `check-tests` job will also fail. ```yaml @@ -145,7 +145,7 @@ check-tests: ### Merging a branch to develop Once a PR is added to the merge queue, the target branch is merged into the source branch. -Then, the same workflows are triggered as in the [previous step](#opening-a-pr-against-develop). +Then, the same workflows are triggered as in the [previous step](#openingupdating-a-pr). --- diff --git a/docs/mining.md b/docs/mining.md index 2a59f051a9e..8b40eb8cc87 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -1,7 +1,7 @@ # Stacks Mining Stacks tokens (STX) are mined by transferring BTC via PoX. To run as a miner, -you should make sure to add the following config fields to your config file: +you should make sure to add the following config fields to your [config file](../testnet/stacks-node/conf/mainnet-miner-conf.toml): ```toml [node] @@ -80,4 +80,4 @@ Estimates are then randomly "fuzzed" using uniform random fuzz of size up to ## Further Reading - [stacksfoundation/miner-docs](https://github.com/stacksfoundation/miner-docs) -- [Mining Documentation](https://docs.stacks.co/docs/nodes-and-miners/miner-mainnet) +- [Mining Documentation](https://docs.stacks.co/stacks-in-depth/nodes-and-miners/mine-mainnet-stacks-tokens) diff --git a/docs/profiling.md b/docs/profiling.md index 3e43cf9b633..26d1c119aeb 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -9,7 +9,7 @@ This document describes several techniques to profile (i.e. find performance bot - generating flame graphs, and - profiling sqlite queries. -Note that all bash commands in this document are run from the stacks-blockchain repository root directory. +Note that all bash commands in this document are run from the [stacks-core repository](https://github.com/stacks-network/stacks-core) root directory. ## Logging tips diff --git a/docs/release-process.md b/docs/release-process.md index d7dfb1ea527..b9e2be87481 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -14,8 +14,8 @@ ## Release Schedule and Hotfixes Normal releases in this repository that add new features are released on a monthly schedule. -The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). -It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags or the [master branch](https://github.com/stacks-network/stacks-core/tree/master). If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. @@ -40,7 +40,7 @@ A increments on non-consensus-breaking changes that do not require a fresh chain n increments on patches and hot-fixes (akin to semantic PATCH) ``` -Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]` +Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]`. ## Non-Consensus Breaking Release Process @@ -50,31 +50,32 @@ A release should happen at least 24 hours before the start of a new cycle, to av 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - 1. First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - In other words, the database schema has changed, but an automatic migration was not implemented. - Determine whether this a feature release, as opposed to a hotfix or a patch. - 2. A new branch in format `release/X.Y.Z.A.n` is created from the base branch `develop`. + - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. - 1. A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. + - A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. -3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick commits into the release branch. +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch. - 1. Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. - 2. Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch - 3. Merge `feat/X.Y.Z.A.n-pr_number` back into `release/X.Y.Z.A.n`. + - Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. + - Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch + - Merge `feat/X.Y.Z.A.n-pr_number` into `release/X.Y.Z.A.n`. 4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. - 1. Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - 2. Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. + - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. -5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. +5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. -6. Once the release has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. +6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. 7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. @@ -87,8 +88,8 @@ A release should happen at least 24 hours before the start of a new cycle, to av Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). 10. Finally, the following merges will happen to complete the release process: - 1. Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. - 2. Then, `master` will be merged back into `develop`. + - Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. + - Then, `master` will be merged into `develop`. ## Consensus Breaking Release Process @@ -96,4 +97,3 @@ Consensus breaking releases shall follow the same overall process as a non-conse - The release must be timed so that sufficient time is given to perform a genesis sync. - The release must take into account the activation height at which the new consensus rules will take effect. - Generically, a few weeks lead time is required for consensus breaking changes. diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index ee5e262d460..3fdf293a4f4 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -7,8 +7,7 @@ prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" miner = true -# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) -mine_microblocks = false +mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index d237aafd61f..376f669893d 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 0 +start_height = 230 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 1 +start_height = 240 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 2 +start_height = 240 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 3 +start_height = 241 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 4 +start_height = 242 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 5 +start_height = 243 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 6 +start_height = 244 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 2_000_250 \ No newline at end of file From 890ab288bd435295fee079041ba8034379fa7351 Mon Sep 17 00:00:00 2001 From: Dean Chi <21262275+deantchi@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:43:22 -0700 Subject: [PATCH 005/910] testnet-docs: update conf --- .../stacks-node/conf/testnet-follower-conf.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 376f669893d..b327fbb0018 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 230 +start_height = 0 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 240 +start_height = 1 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 240 +start_height = 2 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 241 +start_height = 3 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 242 +start_height = 4 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 243 +start_height = 5 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 244 +start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2_000_250 \ No newline at end of file +start_height = 2000701 \ No newline at end of file From 172a0fdd402b00354e64291c83168097b237e5ff Mon Sep 17 00:00:00 2001 From: Dean Chi <21262275+deantchi@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:43:22 -0700 Subject: [PATCH 006/910] testnet-docs: update conf --- .../stacks-node/conf/testnet-follower-conf.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 376f669893d..b327fbb0018 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 230 +start_height = 0 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 240 +start_height = 1 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 240 +start_height = 2 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 241 +start_height = 3 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 242 +start_height = 4 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 243 +start_height = 5 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 244 +start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2_000_250 \ No newline at end of file +start_height = 2000701 \ No newline at end of file From 17e39e0d536fc1395cb4a304699a12a906caccce Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:03:36 -0700 Subject: [PATCH 007/910] placeholder for testnet miner config --- .../stacks-node/conf/testnet-miner-conf.toml | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 testnet/stacks-node/conf/testnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml new file mode 100644 index 00000000000..f3a49a33d4c --- /dev/null +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -0,0 +1,77 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +chain = "bitcoin" +mode = "krypton" +peer_host = "127.0.0.1" +username = "" +password = "" +rpc_port = 18443 +peer_port = 18444 +pox_prepare_length = 100 +pox_reward_length = 900 +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election +burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 + +[[ustx_balance]] +address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" +amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 2000701 \ No newline at end of file From b7471578ae34e6276ebf4f809c146f0a3d8f53f3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:47:12 -0700 Subject: [PATCH 008/910] Updating stacks-signer release process --- docs/release-process.md | 2 +- stacks-signer/release-process.md | 88 +++++++++++++++++--------------- 2 files changed, 48 insertions(+), 42 deletions(-) diff --git a/docs/release-process.md b/docs/release-process.md index b9e2be87481..ac7bd60d56a 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -65,7 +65,7 @@ A release should happen at least 24 hours before the start of a new cycle, to av - Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch - Merge `feat/X.Y.Z.A.n-pr_number` into `release/X.Y.Z.A.n`. -4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. +4. Open a PR to update the [CHANGELOG](../CHANGELOG.md) file in the `release/X.Y.Z.A.n` branch. - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 599d8c7af40..bb0f36fadfb 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -11,27 +11,30 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | - ## Release Schedule and Hotfixes -Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. +Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. +If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. For fixes that impact the correct functioning or liveness of the signer, _hotfixes_ may be issued. These hotfixes are categorized by priority according to the following rubric: -- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. -- **Medium Priority**. ny fix for an issue that could deny service to individual nodes. -- **Low Priority**. Any fix for an issue that is not high or medium priority. +- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. +- **Medium Priority**. Any fix for an issue that could deny service to individual nodes. +- **Low Priority**. Any fix for an issue that is not high or medium priority. ## Versioning -This project uses a 6 part version number. When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release (5 part version). When there are changes in-between stacks-core releases, the signer binary will assume a 6 part version. +This project uses a 6 part version number. +When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release ([5 part version](../docs/release-process.md#versioning)). +When there are changes in-between `stacks-core` releases, the `stacks-signer` binary will assume a 6 part version: ``` X.Y.Z.A.n.x -X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +X major version - does not change in practice unless there’s another Stacks 2.0 type event Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) @@ -39,47 +42,50 @@ n increments on patches and hot-fixes (akin to semantic PATCH) x increments on the current stacks-core release version ``` -For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` will also be versioned as 2.6.0.0.0. If a change is needed in the signer, it may be released apart from the stacks-core as version 2.6.0.0.0.1 and will increment until the next stacks-core release. +## Non-Consensus Breaking Release Process + +The release must be timed so that it does not interfere with a _prepare phase_. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). +A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. + +1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). + + - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. + - A new branch in the format `release/signer-X.Y.Z.A.n.x` is created from the base branch `develop`. + +2. Enumerate PRs and/or issues that would _block_ the release. + + - A label should be applied to each such issue/PR as `signer-X.Y.Z.A.n.x-blocker`. + +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch. -## Release Process + - Create a feature branch from `release/signer-X.Y.Z.A.n.x`, ex: `feat/signer-X.Y.Z.A.n.x-pr_number`. + - Add cherry-picked commits to the `feat/signer-X.Y.Z.A.n.x-pr_number` branch + - Merge `feat/signer-X.Y.Z.A.n.x-pr_number` into `release/signer-X.Y.Z.A.n.x`. +4. Open a PR to update the [CHANGELOG](./CHANGELOG.md) file in the `release/signer-X.Y.Z.A.n.x` branch. -1. The release must be timed so that it does not interfere with a _prepare - phase_. The timing of the next Stacking cycle can be found - [here](https://stx.eco/dao/tools?tool=2). A release should happen - at least 48 hours before the start of a new cycle, to avoid interfering - with the prepare phase. + - Create a chore branch from `release/signer-X.Y.Z.A.n.x`, ex: `chore/signer-X.Y.Z.A.n.x-changelog`. + - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. -2. Before creating the release, the release manager must determine the _version - number_ for this release, and create a release branch in the format: `release/signer-X.Y.Z.A.n.x`. - The factors that determine the version number are discussed in [Versioning](#versioning). + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). -3. _Blocking_ PRs or issues are enumerated and a label should be applied to each - issue/PR such as `signer-X.Y.Z.A.n.x-blocker`. The Issue/PR owners for each should be pinged - for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. - __Note__: It may be necessary to cherry-pick these PR's into the target branch `release/signer-X.Y.Z.A.n.x` + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. -4. The [CHANGELOG.md](./CHANGELOG.md) file shall be updated with summaries of what - was `Added`, `Changed`, and `Fixed` in the base branch. For example, pull requests - merged into `develop` can be found [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - Note, however, that GitHub apparently does not allow sorting by _merge time_, - so, when sorting by some proxy criterion, some care should be used to understand - which PR's were _merged_ after the last release. +5. Once `chore/signer-X.Y.Z.A.n.x-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/signer-X.Y.Z.A.n.x` branch. -5. Once any blocker PRs have merged, a new tag will be created - by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) - against the `release/signer-X.Y.Z.A.n.x` branch. +6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. -6. Ecosystem participants will be notified of the release candidate in order - to test the release on various staging infrastructure. +7. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. -7. If bugs or issues emerge from the rollout on staging infrastructure, the release - will be delayed until those regressions are resolved. As regressions are resolved, - additional release candidates shall be tagged. + - As regressions are resolved, additional release candidates should be tagged. + - Repeat steps 3-6 as necessary. -8. Once the final release candidate has rolled out successfully without issue on staging - infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) - page. Announcements will then be shared in the `#stacks-core-devs` channel in the - Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). +8. Once the final release candidate has rolled out successfully without issue on staging infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). -9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. +9. Finally, the following merges will happen to complete the release process: + - Release branch `release/signer-X.Y.Z.A.n.x` will be merged into the `master` branch. + - Then, `master` will be merged into `develop`. From 269e5c029d2c99bd19966f10d4e96127b42729d6 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:09:20 -0700 Subject: [PATCH 009/910] Update stacks-signer/release-process.md Co-authored-by: Adriano Di Luzio --- stacks-signer/release-process.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 556aa811c0f..8d563320817 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -13,8 +13,7 @@ ## Release Schedule and Hotfixes -Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. -The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +`stack-signer` releases that add new or updated features shall be released in an ad-hoc manner. It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. From 696e78f4e79bd53482e5a6ebede67d46fad36d55 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:16:03 -0700 Subject: [PATCH 010/910] Addressing PR comments --- docs/branching.md | 14 +++++++------- docs/release-process.md | 13 ++++++------- stacks-signer/release-process.md | 4 +--- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/docs/branching.md b/docs/branching.md index 38db57e3e54..5b9a96b12ab 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -4,22 +4,22 @@ The following is a modified version of the gitflow branching strategy described ## Main Branches -- **master** - `origin/master` is the main branch where the source code of HEAD always reflects a production-ready state. -- **develop** - `origin/develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. -- **next** - `origin/next` may contain consensus-breaking changes. +- **master** - `master` is the main branch where the source code of HEAD always reflects a production-ready state. +- **develop** - `develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. +- **next** - `next` may contain consensus-breaking changes. - **release/X.Y.Z.A.n** is the release branch. When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). After release, the following will happen: -- `release/X.Y.Z.A.n` branch is merged back to `origin/master`. -- `origin/master` is then merged into `origin/develop`, and development continues in the `origin/develop` branch. -- `origin/develop` is then merged into `origin/next`. +- `release/X.Y.Z.A.n` branch is merged back to `master`. +- `master` is then merged into `develop`, and development continues in the `develop` branch. +- `develop` is then merged into `next`. ## Supporting Branches Branch names should use a prefix that conveys the overall goal of the branch. -All branches should be based off of `origin/develop`, with the exception being a hotfix branch which may be based off of `origin/master`. +All branches should be based off of `develop`, with the exception being a hotfix branch which may be based off of `master`. - `feat/some-fancy-new-thing`: For new features. - `fix/some-broken-thing`: For hot fixes and bug fixes. diff --git a/docs/release-process.md b/docs/release-process.md index ac7bd60d56a..27e5b0ac4a1 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -45,14 +45,13 @@ Optionally, an extra pre-release field may be appended to the version to specify ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). -A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - - In other words, the database schema has changed, but an automatic migration was not implemented. - - Determine whether this a feature release, as opposed to a hotfix or a patch. + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. @@ -69,9 +68,9 @@ A release should happen at least 24 hours before the start of a new cycle, to av - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. - - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. 5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 8d563320817..9d3f2cb5e13 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -44,8 +44,7 @@ x increments on the current stacks-core release version ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). -A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). @@ -88,4 +87,3 @@ A release should happen at least 24 hours before the start of a new cycle, to av 9. Finally, the following merges will happen to complete the release process: - Release branch `release/signer-X.Y.Z.A.n.x` will be merged into the `master` branch. - Then, `master` will be merged into `develop`. - From f95e131e31fb61b89e9d7b6749df6ab25cff4dbf Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:42:01 -0700 Subject: [PATCH 011/910] Update stacks-signer changelog for 2.5.0.0.5.1 --- stacks-signer/CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index fcc7ab17f5c..6b28b15e8f6 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,3 +7,18 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +## [2.5.0.0.5.1] + +### Added + +- Adds signerdb schema versioning (#4965) +- Added voting cli commands `generate-vote` and `verify-vote` (#4934) +- Add soritiion tracking cache (#4905) +- Push blocks to signer set and adds `/v3/blocks/upload` (#4902) + +### Changed + +- Fix an issue of poorly timed tenure and bitcoin blocks (#4956) +- Process pending blocks before ending tenure (#4952) +- Update rusqlite/sqlite versions (#4948) +- return last block sortition in `/v3/sortitions` (#4939) From c388af4fcafc3fe3a5f853e711a4158b6edb34d4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Jul 2024 13:02:01 -0400 Subject: [PATCH 012/910] Add block proposal timeout configuration option to signer Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 3 ++- stacks-signer/src/config.rs | 14 ++++++++++++++ stacks-signer/src/runloop.rs | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index e3e3dd1dc5a..71720a015be 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -565,7 +565,8 @@ pub(crate) mod tests { tx_fee_ustx: config.tx_fee_ustx, max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), - first_proposal_burn_block_timing: Duration::from_secs(30), + first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, + block_proposal_timeout: config.block_proposal_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 4c7bc565d34..66cf5a5f7d5 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -36,6 +36,7 @@ use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; // Default transaction fee to use in microstacks (if unspecificed in the config file) const TX_FEE_USTX: u64 = 10_000; @@ -154,6 +155,8 @@ pub struct SignerConfig { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// The parsed configuration for the signer @@ -196,6 +199,8 @@ pub struct GlobalConfig { /// How much time between the first block proposal in a tenure and the next bitcoin block /// must pass before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// Internal struct for loading up the config file @@ -236,6 +241,8 @@ struct RawConfigFile { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, + /// How much time to wait for a miner to propose a block following a sortition in milliseconds + pub block_proposal_timeout_ms: Option, } impl RawConfigFile { @@ -324,6 +331,12 @@ impl TryFrom for GlobalConfig { None => None, }; + let block_proposal_timeout = Duration::from_millis( + raw_data + .block_proposal_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -343,6 +356,7 @@ impl TryFrom for GlobalConfig { db_path, metrics_endpoint, first_proposal_burn_block_timing, + block_proposal_timeout, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 6795f0cfee1..2909c9383a2 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -270,6 +270,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo tx_fee_ustx: self.config.tx_fee_ustx, max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), + block_proposal_timeout: self.config.block_proposal_timeout, }) } From 9844895154da1473c66dfcac82d61d98f121b1d5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 24 Jul 2024 11:57:52 -0400 Subject: [PATCH 013/910] Add block proposal timeout check to handle_block_proposal Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 3 ++ stacks-signer/src/tests/chainstate.rs | 1 + stacks-signer/src/v0/signer.rs | 30 ++++++++++++++++++- .../stacks-node/src/nakamoto_node/relayer.rs | 6 ++-- .../src/tests/nakamoto_integrations.rs | 3 ++ testnet/stacks-node/src/tests/signer/v0.rs | 1 + 6 files changed, 40 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 95c60d3a3c9..c4bed351b47 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -84,12 +84,15 @@ pub struct ProposalEvalConfig { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// Time between processing a sortition and proposing a block before the block is considered invalid + pub block_proposal_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { fn from(value: &SignerConfig) -> Self { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing.clone(), + block_proposal_timeout: value.block_proposal_timeout.clone(), } } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index c2c65f265c1..62ce751f94a 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -86,6 +86,7 @@ fn setup_test_environment( last_sortition, config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(30), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd0..a6554ee27d3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -14,6 +14,7 @@ // along with this program. If not, see . use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; @@ -26,7 +27,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; -use crate::chainstate::{ProposalEvalConfig, SortitionsView}; +use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -311,6 +312,33 @@ impl Signer { // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { + // If this is the first block in the tenure, check if it was proposed after the timeout + if let Ok(None) = self + .signer_db + .get_last_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + { + if let Ok(Some(received_ts)) = self + .signer_db + .get_burn_block_receive_time(&sortition_state.cur_sortition.burn_block_hash) + { + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let elapsed = std::time::SystemTime::now() + .duration_since(received_time) + .unwrap_or_else(|_| { + panic!("{self}: Failed to calculate time since burn block received") + }); + if elapsed >= self.proposal_config.block_proposal_timeout { + warn!( + "{self}: miner proposed block after timeout."; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + sortition_state.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } + } + match sortition_state.check_proposal( stacks_client, &self.signer_db, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a6140434722..36e93f3d7e6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -110,7 +110,7 @@ pub struct LastCommit { /// the tenure consensus hash for the tip's tenure tenure_consensus_hash: ConsensusHash, /// the start-block hash of the tip's tenure - start_block_hash: BlockHeaderHash, + _start_block_hash: BlockHeaderHash, /// What is the epoch in which this was sent? epoch_id: StacksEpochId, /// commit txid (to be filled in on submission) @@ -123,7 +123,7 @@ impl LastCommit { burn_tip: BlockSnapshot, stacks_tip: StacksBlockId, tenure_consensus_hash: ConsensusHash, - start_block_hash: BlockHeaderHash, + _start_block_hash: BlockHeaderHash, epoch_id: StacksEpochId, ) -> Self { Self { @@ -131,7 +131,7 @@ impl LastCommit { burn_tip, stacks_tip, tenure_consensus_hash, - start_block_hash, + _start_block_hash, epoch_id, txid: None, } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 816e130a599..c13fdf5bf2b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4653,6 +4653,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -4766,6 +4767,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view @@ -4832,6 +4834,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9da738d3a..3543fb1ce81 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -268,6 +268,7 @@ fn block_proposal_rejection() { let reward_cycle = signer_test.get_current_reward_cycle(); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); let mut block = NakamotoBlock { From cf2566560c38c97ebf4f99acc90f50e458f2d3a2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 24 Jul 2024 13:06:22 -0400 Subject: [PATCH 014/910] Move block proposal timeout check to check_proposal and add unit tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 36 +++++++++++++-- stacks-signer/src/signerdb.rs | 4 +- stacks-signer/src/tests/chainstate.rs | 44 +++++++++++++++++-- stacks-signer/src/v0/signer.rs | 30 +------------ .../src/tests/nakamoto_integrations.rs | 6 ++- 5 files changed, 79 insertions(+), 41 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c4bed351b47..4c04d798d8c 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::time::Duration; +use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; @@ -91,8 +91,8 @@ pub struct ProposalEvalConfig { impl From<&SignerConfig> for ProposalEvalConfig { fn from(value: &SignerConfig) -> Self { Self { - first_proposal_burn_block_timing: value.first_proposal_burn_block_timing.clone(), - block_proposal_timeout: value.block_proposal_timeout.clone(), + first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, + block_proposal_timeout: value.block_proposal_timeout, } } } @@ -150,12 +150,40 @@ impl<'a> ProposedBy<'a> { impl SortitionsView { /// Apply checks from the SortitionsView on the block proposal. pub fn check_proposal( - &self, + &mut self, client: &StacksClient, signer_db: &SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { + // If this is the first block in the tenure, check if it was proposed after the timeout + if signer_db + .get_last_signed_block_in_tenure(&block.header.consensus_hash)? + .is_none() + { + if let Some(received_ts) = + signer_db.get_burn_block_receive_time(&self.cur_sortition.burn_block_hash)? + { + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let elapsed = std::time::SystemTime::now() + .duration_since(received_time) + .unwrap_or_else(|_| { + panic!("Failed to calculate time since burn block received") + }); + if elapsed >= self.config.block_proposal_timeout { + warn!( + "Miner proposed first block after block proposal timeout."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + "burn_block_received_time" => ?received_time, + ); + self.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } + } let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); if !bitvec_all_1s { warn!( diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5ecef398d45..09c17ed40df 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -189,7 +189,7 @@ impl SignerDb { }) .optional(); match result { - Ok(x) => Ok(x.unwrap_or_else(|| 0)), + Ok(x) => Ok(x.unwrap_or(0)), Err(e) => Err(DBError::from(e)), } } @@ -294,7 +294,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height ASC LIMIT 1"; - let result: Option = query_row(&self.db, query, &[tenure])?; + let result: Option = query_row(&self.db, query, [tenure])?; try_deserialize(result) } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 62ce751f94a..e469cbdeb9f 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -86,7 +86,7 @@ fn setup_test_environment( last_sortition, config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), - block_proposal_timeout: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(5), }, }; @@ -111,7 +111,7 @@ fn setup_test_environment( parent_block_id: StacksBlockId([0; 32]), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash([0; 32]), - timestamp: 11, + timestamp: 3, miner_signature: MessageSignature::empty(), signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), @@ -140,7 +140,7 @@ fn check_proposal_units() { #[test] fn check_proposal_miner_pkh_mismatch() { - let (stacks_client, signer_db, _block_pk, view, mut block) = + let (stacks_client, signer_db, _block_pk, mut view, mut block) = setup_test_environment("miner_pkh_mismatch"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); @@ -328,7 +328,7 @@ fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { #[test] fn check_proposal_tenure_extend_invalid_conditions() { - let (stacks_client, signer_db, block_pk, view, mut block) = + let (stacks_client, signer_db, block_pk, mut view, mut block) = setup_test_environment("tenure_extend"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let mut extend_payload = make_tenure_change_payload(); @@ -351,3 +351,39 @@ fn check_proposal_tenure_extend_invalid_conditions() { .check_proposal(&stacks_client, &signer_db, &block, &block_pk) .unwrap()); } + +#[test] +fn check_block_proposal_timeout() { + let (stacks_client, mut signer_db, block_pk, mut view, mut curr_sortition_block) = + setup_test_environment("block_proposal_timeout"); + curr_sortition_block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut last_sortition_block = curr_sortition_block.clone(); + last_sortition_block.header.consensus_hash = + view.last_sortition.as_ref().unwrap().consensus_hash; + + // Ensure we have a burn height to compare against + let burn_hash = view.cur_sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); + + // Sleep a bit to time out the block proposal + std::thread::sleep(Duration::from_secs(5)); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); +} diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a6554ee27d3..93927b03fd0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -14,7 +14,6 @@ // along with this program. If not, see . use std::fmt::Debug; use std::sync::mpsc::Sender; -use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; @@ -27,7 +26,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; -use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; +use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -312,33 +311,6 @@ impl Signer { // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { - // If this is the first block in the tenure, check if it was proposed after the timeout - if let Ok(None) = self - .signer_db - .get_last_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) - { - if let Ok(Some(received_ts)) = self - .signer_db - .get_burn_block_receive_time(&sortition_state.cur_sortition.burn_block_hash) - { - let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); - let elapsed = std::time::SystemTime::now() - .duration_since(received_time) - .unwrap_or_else(|_| { - panic!("{self}: Failed to calculate time since burn block received") - }); - if elapsed >= self.proposal_config.block_proposal_timeout { - warn!( - "{self}: miner proposed block after timeout."; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - sortition_state.cur_sortition.miner_status = - SortitionMinerStatus::InvalidatedBeforeFirstBlock; - } - } - } - match sortition_state.check_proposal( stacks_client, &self.signer_db, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c13fdf5bf2b..9173dd86456 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4655,7 +4655,8 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); // check the prior tenure's proposals again, confirming that the sortitions_view // will reject them. @@ -4769,7 +4770,8 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, From 5b6eae44d7baa9a1f9dd54c4f2ca6d5955bff97d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 25 Jul 2024 12:42:06 -0400 Subject: [PATCH 015/910] CRC: move sortition time out check into its own function Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 66 ++++++++++++++--------- stacks-signer/src/tests/chainstate.rs | 77 +++++++++++++++++++++++++++ 2 files changed, 118 insertions(+), 25 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4c04d798d8c..ee747407f00 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -78,6 +78,39 @@ pub struct SortitionState { pub burn_block_hash: BurnchainHeaderHash, } +impl SortitionState { + /// Check if the sortition is timed out (i.e., the miner did not propose a block in time) + pub fn is_timed_out( + &self, + timeout: Duration, + signer_db: &SignerDb, + ) -> Result { + // if the miner has already been invalidated, we don't need to check if they've timed out. + if self.miner_status != SortitionMinerStatus::Valid { + return Ok(false); + } + // if we've already signed a block in this tenure, the miner can't have timed out. + let has_blocks = signer_db + .get_last_signed_block_in_tenure(&self.consensus_hash)? + .is_some(); + if has_blocks { + return Ok(false); + } + let Some(received_ts) = signer_db.get_burn_block_receive_time(&self.burn_block_hash)? + else { + return Ok(false); + }; + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let Ok(elapsed) = std::time::SystemTime::now().duration_since(received_time) else { + return Ok(false); + }; + if elapsed > timeout { + return Ok(true); + } + Ok(false) + } +} + /// Captures the configuration settings used by the signer when evaluating block proposals. #[derive(Debug, Clone)] pub struct ProposalEvalConfig { @@ -156,32 +189,15 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { - // If this is the first block in the tenure, check if it was proposed after the timeout - if signer_db - .get_last_signed_block_in_tenure(&block.header.consensus_hash)? - .is_none() + if self + .cur_sortition + .is_timed_out(self.config.block_proposal_timeout, signer_db)? { - if let Some(received_ts) = - signer_db.get_burn_block_receive_time(&self.cur_sortition.burn_block_hash)? - { - let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); - let elapsed = std::time::SystemTime::now() - .duration_since(received_time) - .unwrap_or_else(|_| { - panic!("Failed to calculate time since burn block received") - }); - if elapsed >= self.config.block_proposal_timeout { - warn!( - "Miner proposed first block after block proposal timeout."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, - "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), - "burn_block_received_time" => ?received_time, - ); - self.cur_sortition.miner_status = - SortitionMinerStatus::InvalidatedBeforeFirstBlock; - } + self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + if let Some(last_sortition) = self.last_sortition.as_mut() { + if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { + last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } } let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index e469cbdeb9f..d0c7f1d9f3f 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -387,3 +387,80 @@ fn check_block_proposal_timeout() { .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) .unwrap()); } + +#[test] +fn check_sortition_timeout() { + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!( + "{signer_db_dir}/sortition_timeout.{}.sqlite", + get_epoch_time_secs() + ); + fs::create_dir_all(signer_db_dir).unwrap(); + let mut signer_db = SignerDb::new(signer_db_path).unwrap(); + + let mut sortition = SortitionState { + miner_pkh: Hash160([0; 20]), + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 2, + burn_block_hash: BurnchainHeaderHash([1; 32]), + }; + // Ensure we have a burn height to compare against + let burn_hash = sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + std::thread::sleep(Duration::from_secs(1)); + // We have not yet timed out + assert!(!sortition + .is_timed_out(Duration::from_secs(10), &signer_db) + .unwrap()); + // We are a valid sortition, have an empty tenure, and have now timed out + assert!(sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + // This will not be marked as timed out as the status is no longer valid + sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + + // Revert the status to continue other checks + sortition.miner_status = SortitionMinerStatus::Valid; + // Insert a signed over block so its no longer an empty tenure + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: sortition.consensus_hash, + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; + + let mut block_info = BlockInfo::from(block_proposal); + block_info.signed_over = true; + signer_db.insert_block(&block_info).unwrap(); + + // This will no longer be timed out as we have a non-empty tenure + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); +} From 77c6a0e2d99363b1043aec9a74ef04ff722e1cc8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 26 Jul 2024 14:17:10 +0300 Subject: [PATCH 016/910] move main signer messages from debug to info --- stacks-signer/src/runloop.rs | 2 +- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 6795f0cfee1..3145cd6f546 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -365,7 +365,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo self.refresh_signer_config(next_reward_cycle); } } else { - debug!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; + info!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; "reward_cycle" => reward_cycle_info.reward_cycle, "reward_cycle_length" => reward_cycle_info.reward_cycle_length, "prepare_phase_block_length" => reward_cycle_info.prepare_phase_block_length, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd0..37ba216aa2c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -153,7 +153,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Receved a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - debug!("{self}: Received a block proposal: {block_proposal:?}"); + info!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f78f3b9e296..83a0c9a39b7 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - debug!("{self}: Received a block proposal result from the stacks node..."); + info!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -244,7 +244,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - debug!( + info!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, From 861bff076b051e0338b37dacbe0a17cd1b90b3b3 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 26 Jul 2024 14:22:00 +0300 Subject: [PATCH 017/910] moved from info to debug stacks block related logs --- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 37ba216aa2c..72c8cc4dfe9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -153,7 +153,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - info!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - info!("{self}: Received a block proposal: {block_proposal:?}"); + debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - info!("{self}: Received a block validate response: {block_validate_response:?}"); + debug!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 83a0c9a39b7..bd4b36f4894 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - info!("{self}: Received a block proposal result from the stacks node..."); + debug!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - info!( + debug!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, From 030fa6434b55b49b1e4bfa31470647ce6ab3a095 Mon Sep 17 00:00:00 2001 From: janniks Date: Fri, 26 Jul 2024 17:09:04 +0200 Subject: [PATCH 018/910] fix: ensure minimum non-dust amount as change output on regtest --- .../burnchains/bitcoin_regtest_controller.rs | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 30f088a96f4..0ef63b6dc08 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1715,6 +1715,7 @@ impl BitcoinRegtestController { spent_in_outputs + min_tx_size * fee_rate + estimated_rbf, &mut utxos_cloned, signer, + true, ); let serialized_tx = SerializedTx::new(tx_cloned); cmp::max(min_tx_size, serialized_tx.bytes.len() as u64) @@ -1731,6 +1732,7 @@ impl BitcoinRegtestController { spent_in_outputs + tx_size * fee_rate + rbf_fee, utxos_set, signer, + true, ); signer.dispose(); Some(()) @@ -1744,38 +1746,45 @@ impl BitcoinRegtestController { &mut self, epoch_id: StacksEpochId, tx: &mut Transaction, - total_to_spend: u64, + tx_cost: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> bool { let mut public_key = signer.get_public_key(); - let mut total_consumed = 0; + + let total_target = if force_change_output { + tx_cost + DUST_UTXO_LIMIT + } else { + tx_cost + }; // select UTXOs until we have enough to cover the cost + let mut total_consumed = 0; let mut available_utxos = vec![]; available_utxos.append(&mut utxos_set.utxos); for utxo in available_utxos.into_iter() { total_consumed += utxo.amount; utxos_set.utxos.push(utxo); - if total_consumed >= total_to_spend { + if total_consumed >= total_target { break; } } - if total_consumed < total_to_spend { + if total_consumed < total_target { warn!( "Consumed total {} is less than intended spend: {}", - total_consumed, total_to_spend + total_consumed, total_target ); return false; } // Append the change output - let value = total_consumed - total_to_spend; + let value = total_consumed - tx_cost; debug!( "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_to_spend + value, total_consumed, total_target ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { From afc9c32648e81e3ab3a9073a070aead2ae221d70 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 26 Jul 2024 08:40:34 -0700 Subject: [PATCH 019/910] Removing unnecessary config fields for sample configs --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 5 +---- testnet/stacks-node/conf/mainnet-miner-conf.toml | 12 ++++++------ testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 7 +------ testnet/stacks-node/conf/testnet-follower-conf.toml | 7 ++----- testnet/stacks-node/conf/testnet-miner-conf.toml | 12 ++++++------ 5 files changed, 16 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 4377993ed4f..ba42fb66579 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" @@ -9,9 +9,6 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "mainnet" peer_host = "bitcoin.hiro.so" -username = "hirosystems" -password = "hirosystems" -peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 3fdf293a4f4..fc526f08781 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "127.0.0.1:20443" p2p_bind = "127.0.0.1:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" @@ -13,10 +13,10 @@ mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-n chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 8332 -peer_port = 8333 +username = "" +password = "" +rpc_port = +peer_port = # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 # Amount (in sats) per byte - Used to calculate the transaction fees @@ -24,4 +24,4 @@ satoshis_per_byte = 25 # Amount of sats to add when RBF'ing bitcoin tx (default: 5) rbf_fee_increment = 5 # Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) -max_rbf = 150 \ No newline at end of file +max_rbf = 150 diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 2c98499d59e..75785454dc6 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true @@ -11,8 +11,3 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "mainnet" peer_host = "bitcoin.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 8332 -peer_port = 8333 -burn_fee_cap = 1 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index b327fbb0018..f5fb2c04b00 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" @@ -9,9 +9,6 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 18443 peer_port = 18444 pox_prepare_length = 100 pox_reward_length = 900 @@ -66,4 +63,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 \ No newline at end of file +start_height = 2000701 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index f3a49a33d4c..e565fd0ee20 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" @@ -9,10 +9,10 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "krypton" peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 18443 -peer_port = 18444 +username = "" +password = "" +rpc_port = +peer_port = pox_prepare_length = 100 pox_reward_length = 900 # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election @@ -74,4 +74,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 \ No newline at end of file +start_height = 2000701 From ba24d00285dcc8294957a4838271b87946669446 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 12:03:36 -0400 Subject: [PATCH 020/910] Add an integration test to ensure an empty sortition can time out Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++++++++++ 2 files changed, 157 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index c35406fd3b2..d12ae61f381 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -89,6 +89,7 @@ jobs: - tests::signer::v0::end_of_tenure - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid + - tests::signer::v0::empty_sortition - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3543fb1ce81..90e32fb6b81 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::ops::Add; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -1068,3 +1069,158 @@ fn retry_on_timeout() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks the behaviour of signers when a sortition is empty. Specifically: +/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition +fn empty_sortition() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(5); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + signer_test.boot_to_epoch_3(); + + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("------------------------- Test Mine Regular Tenure A -------------------------"); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Mine a regular tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + info!("Pausing stacks block mining to trigger an empty sortition."); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Start new Tenure B + // In the next block, the miner should win the tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("Pausing commit op to prevent tenure C from starting..."); + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + + let blocks_after = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + info!("------------------------- Test Delayed Block is Rejected -------------------------"); + let reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // The miner's proposed block should get rejected by the signers + let start_polling = Instant::now(); + let mut found_rejection = false; + while !found_rejection { + std::thread::sleep(Duration::from_secs(1)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code, + .. + })) = message + { + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + found_rejection = true; + } else { + panic!("Unexpected message type"); + } + } + assert!( + start_polling.elapsed() <= short_timeout, + "Timed out after waiting for response from signer" + ); + } + signer_test.shutdown(); +} From 3b38148dff35e931cb25fe36855a1db1fab91777 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 13:24:48 -0400 Subject: [PATCH 021/910] Add mock signature message type to SignerMessages Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 119 +++++++++++++++++- .../src/nakamoto_node/sign_coordinator.rs | 4 + 2 files changed, 121 insertions(+), 2 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d15f566e163..aeedf76d68c 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -38,12 +38,15 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey, StacksPublicKey}; +use clarity::types::PrivateKey; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha512_256}; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, @@ -55,6 +58,7 @@ use tiny_http::{ }; use crate::http::{decode_http_body, decode_http_request}; +use crate::stacks_common::types::PublicKey; use crate::{ BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait, @@ -65,7 +69,9 @@ define_u8_enum!( /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1 + BlockResponse = 1, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 2 }); define_u8_enum!( @@ -100,7 +106,9 @@ SignerMessageTypePrefix { /// Block Response message from signers BlockResponse = 1, /// Block Pushed message from miners - BlockPushed = 2 + BlockPushed = 2, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 3 }); #[cfg_attr(test, mutants::skip)] @@ -143,6 +151,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, + SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, } } } @@ -156,6 +165,8 @@ pub enum SignerMessage { BlockResponse(BlockResponse), /// A block pushed from miners to the signers set BlockPushed(NakamotoBlock), + /// A mock signature from the epoch 2.5 signers + MockSignature(MockSignature), } impl SignerMessage { @@ -167,6 +178,7 @@ impl SignerMessage { match self { Self::BlockProposal(_) | Self::BlockPushed(_) => None, Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), + Self::MockSignature(_) => Some(MessageSlotID::MockSignature), } } } @@ -180,6 +192,7 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockProposal(block_proposal) => block_proposal.consensus_serialize(fd), SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), + SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), }?; Ok(()) } @@ -201,6 +214,10 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockPushed(block) } + SignerMessageTypePrefix::MockSignature => { + let signature = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockSignature(signature) + } }; Ok(message) } @@ -214,6 +231,59 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } +/// A signer's mock signature across its last seen Stacks Consensus Hash. This is only used +/// by Epoch 2.5 signers to simulate the signing of a block for every sortition. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignature { + /// The signature across the stacks consensus hash + pub signature: MessageSignature, + /// The block hash that the signature is across + pub stacks_consensus_hash: ConsensusHash, +} + +impl MockSignature { + /// The signature hash for the mock signature + pub fn signature_hash(&self) -> Result { + let mut hasher = Sha512_256::new(); + let fd = &mut hasher; + write_next(fd, &self.stacks_consensus_hash)?; + Ok(Sha512Trunc256Sum::from_hasher(hasher)) + } + /// Sign the mock signature and set the internal signature field + pub fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + self.signature = private_key.sign(&signature_hash.0)?; + Ok(()) + } + /// Verify the mock signature against the provided public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); + } + let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } +} + +impl StacksMessageCodec for MockSignature { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signature)?; + write_next(fd, &self.stacks_consensus_hash)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signature = read_next::(fd)?; + let stacks_consensus_hash = read_next::(fd)?; + Ok(Self { + signature, + stacks_consensus_hash, + }) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { @@ -508,6 +578,7 @@ mod test { }; use blockstack_lib::util_lib::strings::StacksString; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; use clarity::util::secp256k1::MessageSignature; use rand::{thread_rng, Rng, RngCore}; @@ -622,4 +693,48 @@ mod test { .expect("Failed to deserialize SignerMessage"); assert_eq!(signer_message, deserialized_signer_message); } + + #[test] + fn verify_sign_mock_signature() { + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + + let bad_private_key = StacksPrivateKey::new(); + let bad_public_key = StacksPublicKey::from_private(&bad_private_key); + + let byte: u8 = thread_rng().gen(); + let stacks_consensus_hash = ConsensusHash([byte; 20]); + let mut mock_signature = MockSignature { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + assert!(!mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + + mock_signature + .sign(&private_key) + .expect("Failed to sign MockSignature"); + + assert!(mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + assert!(!mock_signature + .verify(&bad_public_key) + .expect("Failed to verify MockSignature")); + } + + #[test] + fn serde_mock_signature() { + let byte: u8 = thread_rng().gen(); + let stacks_consensus_hash = ConsensusHash([byte; 20]); + let mock_signature = MockSignature { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + let serialized_signature = mock_signature.serialize_to_vec(); + let deserialized_signature = read_next::(&mut &serialized_signature[..]) + .expect("Failed to deserialize MockSignature"); + assert_eq!(mock_signature, deserialized_signature); + } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 30f73e75bed..1990a382e9f 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -758,6 +758,10 @@ impl SignCoordinator { debug!("Received block pushed message. Ignoring."); continue; } + SignerMessageV0::MockSignature(_) => { + debug!("Received mock signature message. Ignoring."); + continue; + } }; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { From a407c94373430e3b78cab7bc2945cc8a4220a97b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 13:43:07 -0400 Subject: [PATCH 022/910] Send a mock signature message per sortition Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 20 ++++++++++++--- stacks-signer/src/v0/signer.rs | 27 +++++++++++++++++-- testnet/stacks-node/src/tests/signer/v0.rs | 30 ++++++++++++++++++++++ 3 files changed, 72 insertions(+), 5 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index aeedf76d68c..76dee99ded4 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -236,12 +236,26 @@ pub trait StacksMessageCodecExtensions: Sized { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MockSignature { /// The signature across the stacks consensus hash - pub signature: MessageSignature, + signature: MessageSignature, /// The block hash that the signature is across - pub stacks_consensus_hash: ConsensusHash, + stacks_consensus_hash: ConsensusHash, } impl MockSignature { + /// Create a new mock signature with the provided stacks consensus hash and private key + pub fn new( + stacks_consensus_hash: ConsensusHash, + stacks_private_key: &StacksPrivateKey, + ) -> Self { + let mut sig = Self { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + sig.sign(stacks_private_key) + .expect("Failed to sign MockSignature"); + sig + } + /// The signature hash for the mock signature pub fn signature_hash(&self) -> Result { let mut hasher = Sha512_256::new(); @@ -250,7 +264,7 @@ impl MockSignature { Ok(Sha512Trunc256Sum::from_hasher(hasher)) } /// Sign the mock signature and set the internal signature field - pub fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; self.signature = private_key.sign(&signature_hash.0)?; Ok(()) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd0..0ab444b78f8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -17,10 +17,12 @@ use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; -use clarity::types::PrivateKey; +use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; -use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; +use libsigner::v0::messages::{ + BlockResponse, MessageSlotID, MockSignature, RejectCode, SignerMessage, +}; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; @@ -166,6 +168,10 @@ impl SignerTrait for Signer { ); } *sortition_state = None; + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + // We are in epoch 25, so we should mock mine to prove we are still alive. + self.mock_mine(stacks_client); + }; } } } @@ -462,4 +468,21 @@ impl Signer { .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } + + /// Send a mock signature to stackerdb to prove we are still alive + fn mock_mine(&mut self, stacks_client: &StacksClient) { + let Ok(peer_info) = stacks_client.get_peer_info() else { + warn!("{self}: Failed to get peer info. Cannot mock mine."); + return; + }; + let mock_signature = + MockSignature::new(peer_info.stacks_tip_consensus_hash, &self.private_key); + let message = SignerMessage::MockSignature(mock_signature); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(message) + { + warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); + } + } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9da738d3a..75856593bb0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1067,3 +1067,33 @@ fn retry_on_timeout() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks that the miner will retry when signature collection times out. +fn mock_mine_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + todo!("BOOT TO EPOCH 2.5 AND VERIFY WE RECEIVE A MOCK SIGNATURE PER SORTITION"); +} From 9d69e0ea4af28bb40f256e7c07fdb178ea6981b0 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 26 Jul 2024 11:07:47 -0700 Subject: [PATCH 023/910] Update docs/release-process.md Co-authored-by: Adriano Di Luzio --- docs/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/release-process.md b/docs/release-process.md index 27e5b0ac4a1..46b4bae621e 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -76,7 +76,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to 6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. -7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. +7. The release candidate will test that it successfully syncs with the current chain from genesis both in testnet and mainnet. 8. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. From cd2cb5d2113a54bfcd7d5823616f032c4dedffe9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Sun, 28 Jul 2024 09:26:55 -0700 Subject: [PATCH 024/910] Oneliner to set signer version at compile time --- stacks-signer/src/cli.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 3b63ebdd59b..ecb1c247417 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -42,6 +42,8 @@ extern crate alloc; #[derive(Parser, Debug)] #[command(author, version, about)] +#[command(long_version = option_env!("SIGNER_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")))] + /// The CLI arguments for the stacks signer pub struct Cli { /// Subcommand action to take From e5cc717b5aaf9ec75621b7dec3f165b2ae6daa0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 26 Jul 2024 14:39:04 -0400 Subject: [PATCH 025/910] fix: set miner stackerdb sync frequency to 1 sec; stackerdb sync pushchunks should not retry indefinitely; check for the absence of neighbors to sync to as a stopping condition for pushchunks --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/net/stackerdb/mod.rs | 4 +++ stackslib/src/net/stackerdb/sync.rs | 44 +++++++++++++++++++----- 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a72a357b632..e0dd0c0afdb 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4104,7 +4104,7 @@ impl NakamotoChainState { Ok(StackerDBConfig { chunk_size: MAX_PAYLOAD_LEN.into(), signers, - write_freq: 5, + write_freq: 0, max_writes: u32::MAX, // no limit on number of writes max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 754df3fba1c..3e91c9b5420 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -401,6 +401,10 @@ pub struct StackerDBSync { num_attempted_connections: u64, /// How many connections have been made in the last pass (gets reset) num_connections: u64, + /// Number of state machine passes + rounds: u128, + /// Round when we last pushed + push_round: u128, } impl StackerDBSyncResult { diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 02390211bc7..7309ad40b2e 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -74,6 +74,8 @@ impl StackerDBSync { stale_neighbors: HashSet::new(), num_connections: 0, num_attempted_connections: 0, + rounds: 0, + push_round: 0, }; dbsync.reset(None, config); dbsync @@ -215,6 +217,7 @@ impl StackerDBSync { self.state = StackerDBSyncState::ConnectBegin; self.num_connections = 0; self.num_attempted_connections = 0; + self.rounds += 1; result } @@ -407,6 +410,16 @@ impl StackerDBSync { thread_rng().gen::() % chunk_inv.num_outbound_replicas == 0 }; + debug!( + "{:?}: Can push chunk StackerDBChunk(db={},id={},ver={}) to {}. Replicate? {}", + &network.get_local_peer(), + &self.smart_contract_id, + our_chunk.chunk_data.slot_id, + our_chunk.chunk_data.slot_version, + &naddr, + do_replicate + ); + if !do_replicate { continue; } @@ -1000,9 +1013,11 @@ impl StackerDBSync { /// Returns true if there are no more chunks to push. /// Returns false if there are pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_push_priorities.len() == 0 { + if self.chunk_push_priorities.len() == 0 && self.push_round != self.rounds { + // only do this once per round let priorities = self.make_chunk_push_schedule(&network)?; self.chunk_push_priorities = priorities; + self.push_round = self.rounds; } if self.chunk_push_priorities.len() == 0 { // done @@ -1017,8 +1032,6 @@ impl StackerDBSync { self.chunk_push_priorities.len() ); - let mut pushed = 0; - // fill up our comms with $capacity requests for _i in 0..self.request_capacity { if self.comms.count_inflight() >= self.request_capacity { @@ -1030,7 +1043,8 @@ impl StackerDBSync { .1 .iter() .enumerate() - .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); + // .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); + .find(|(_i, _naddr)| true); let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { x @@ -1072,8 +1086,6 @@ impl StackerDBSync { continue; } - pushed += 1; - // record what we just sent self.chunk_push_receipts .insert(selected_neighbor.clone(), (slot_id, slot_version)); @@ -1088,7 +1100,13 @@ impl StackerDBSync { return Err(net_error::PeerNotConnected); } self.next_chunk_push_priority = cur_priority; - Ok(self.chunk_push_priorities.len() == 0) + Ok(self + .chunk_push_priorities + .iter() + .fold(0usize, |acc, (_chunk, num_naddrs)| { + acc.saturating_add(num_naddrs.len()) + }) + == 0) } /// Collect push-chunk replies from neighbors. @@ -1138,7 +1156,14 @@ impl StackerDBSync { } } - self.comms.count_inflight() == 0 + let inflight = self.comms.count_inflight(); + debug!( + "{:?}: inflight messages for {}: {:?}", + network.get_local_peer(), + &self.smart_contract_id, + inflight + ); + inflight == 0 } /// Recalculate the download schedule based on chunkinvs received on push @@ -1189,8 +1214,9 @@ impl StackerDBSync { loop { debug!( - "{:?}: stacker DB sync state is {:?}", + "{:?}: stacker DB sync state for {} is {:?}", network.get_local_peer(), + &self.smart_contract_id, &self.state ); let mut blocked = true; From 7c7f9b3238b47059ec3fe489cec58fb964967873 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 26 Jul 2024 14:52:41 -0500 Subject: [PATCH 026/910] feat: support multi-miner integration test * test: add nakamoto_integration test with multiple miners (this test uses the blind signer test signing channel) * fix: nakamoto miner communicates over the correct miner slot for their block election (rather than searching by pubkey) * fix: neon miner does not submit block commits if the next burn block is in nakamoto * feat: update `/v2/neighbors` to use qualified contract identifier's ToString and parse() for JSON serialization * perf: nakamoto miner caches the reward set for their tenure --- stackslib/src/chainstate/nakamoto/mod.rs | 93 ++-- .../src/chainstate/nakamoto/tests/mod.rs | 10 +- stackslib/src/net/api/getneighbors.rs | 34 ++ stackslib/src/net/rpc.rs | 12 +- stackslib/src/net/stackerdb/config.rs | 15 +- stackslib/src/net/stackerdb/mod.rs | 54 +- stackslib/src/net/stackerdb/sync.rs | 3 - .../burnchains/bitcoin_regtest_controller.rs | 101 ++-- .../stacks-node/src/nakamoto_node/miner.rs | 127 +++-- .../src/nakamoto_node/sign_coordinator.rs | 16 +- testnet/stacks-node/src/neon_node.rs | 11 + testnet/stacks-node/src/tests/mod.rs | 35 ++ .../src/tests/nakamoto_integrations.rs | 468 ++++++++++++++++-- .../src/tests/neon_integrations.rs | 7 + 14 files changed, 776 insertions(+), 210 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e0dd0c0afdb..b65e9ff0864 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -455,6 +455,28 @@ impl MaturedMinerPaymentSchedules { } } +pub struct MinersDBInformation { + signer_0_sortition: ConsensusHash, + signer_1_sortition: ConsensusHash, + latest_winner: u16, +} + +impl MinersDBInformation { + pub fn get_signer_index(&self, sortition: &ConsensusHash) -> Option { + if sortition == &self.signer_0_sortition { + Some(0) + } else if sortition == &self.signer_1_sortition { + Some(1) + } else { + None + } + } + + pub fn get_latest_winner_index(&self) -> u16 { + self.latest_winner + } +} + /// Calculated matured miner rewards, from scheduled rewards #[derive(Debug, Clone)] pub struct MaturedMinerRewards { @@ -4039,7 +4061,7 @@ impl NakamotoChainState { pub fn make_miners_stackerdb_config( sortdb: &SortitionDB, tip: &BlockSnapshot, - ) -> Result { + ) -> Result<(StackerDBConfig, MinersDBInformation), ChainstateError> { let ih = sortdb.index_handle(&tip.sortition_id); let last_winner_snapshot = ih.get_last_snapshot_with_sortition(tip.block_height)?; let parent_winner_snapshot = ih.get_last_snapshot_with_sortition( @@ -4051,13 +4073,13 @@ impl NakamotoChainState { // go get their corresponding leader keys, but preserve the miner's relative position in // the stackerdb signer list -- if a miner was in slot 0, then it should stay in slot 0 // after a sortition (and vice versa for 1) - let sns = if last_winner_snapshot.num_sortitions % 2 == 0 { - [last_winner_snapshot, parent_winner_snapshot] + let (latest_winner_idx, sns) = if last_winner_snapshot.num_sortitions % 2 == 0 { + (0, [last_winner_snapshot, parent_winner_snapshot]) } else { - [parent_winner_snapshot, last_winner_snapshot] + (1, [parent_winner_snapshot, last_winner_snapshot]) }; - for sn in sns { + for sn in sns.iter() { // find the commit let Some(block_commit) = ih.get_block_commit_by_txid(&sn.sortition_id, &sn.winning_block_txid)? @@ -4088,6 +4110,12 @@ impl NakamotoChainState { ); } + let miners_db_info = MinersDBInformation { + signer_0_sortition: sns[0].consensus_hash, + signer_1_sortition: sns[1].consensus_hash, + latest_winner: latest_winner_idx, + }; + let signers = miner_key_hash160s .into_iter() .map(|hash160| @@ -4101,14 +4129,17 @@ impl NakamotoChainState { )) .collect(); - Ok(StackerDBConfig { - chunk_size: MAX_PAYLOAD_LEN.into(), - signers, - write_freq: 0, - max_writes: u32::MAX, // no limit on number of writes - max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers - hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? - }) + Ok(( + StackerDBConfig { + chunk_size: MAX_PAYLOAD_LEN.into(), + signers, + write_freq: 0, + max_writes: u32::MAX, // no limit on number of writes + max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers + hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? + }, + miners_db_info, + )) } /// Get the slot range for the given miner's public key. @@ -4119,33 +4150,29 @@ impl NakamotoChainState { pub fn get_miner_slot( sortdb: &SortitionDB, tip: &BlockSnapshot, - miner_pubkey: &StacksPublicKey, + election_sortition: &ConsensusHash, ) -> Result>, ChainstateError> { - let miner_hash160 = Hash160::from_node_public_key(&miner_pubkey); - let stackerdb_config = Self::make_miners_stackerdb_config(sortdb, &tip)?; + let (stackerdb_config, miners_info) = Self::make_miners_stackerdb_config(sortdb, &tip)?; // find out which slot we're in - let mut slot_index = 0; - let mut slot_id_result = None; - for (addr, slot_count) in stackerdb_config.signers.iter() { - if addr.bytes == miner_hash160 { - slot_id_result = Some(Range { - start: slot_index, - end: slot_index + slot_count, - }); - break; - } - slot_index += slot_count; - } - - let Some(slot_id_range) = slot_id_result else { - // miner key does not match any slot + let Some(signer_ix) = miners_info + .get_signer_index(election_sortition) + .map(usize::from) + else { warn!("Miner is not in the miners StackerDB config"; - "miner" => %miner_hash160, "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); - return Ok(None); }; + let mut signer_ranges = stackerdb_config.signer_ranges(); + if signer_ix >= signer_ranges.len() { + // should be unreachable, but always good to be careful + warn!("Miner is not in the miners StackerDB config"; + "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + + return Ok(None); + } + let slot_id_range = signer_ranges.swap_remove(signer_ix); + Ok(Some(slot_id_range)) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index b96ed86f032..ef38ec76c66 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2049,8 +2049,9 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); // check the stackerdb config as of this chain tip - let stackerdb_config = - NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip).unwrap(); + let stackerdb_config = NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip) + .unwrap() + .0; eprintln!( "stackerdb_config at i = {} (sorition? {}): {:?}", &i, sortition, &stackerdb_config @@ -2079,8 +2080,9 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let miner_privkey = &miner_keys[i]; let miner_pubkey = StacksPublicKey::from_private(miner_privkey); - let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &miner_pubkey) - .expect("Failed to get miner slot"); + let slot_id = + NakamotoChainState::get_miner_slot(&sort_db, &tip, &block.header.consensus_hash) + .expect("Failed to get miner slot"); if sortition { let slot_id = slot_id.expect("No miner slot exists for this miner").start; let slot_version = stackerdbs diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 51454352a18..6707ed3ba16 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -51,9 +51,43 @@ pub struct RPCNeighbor { pub public_key_hash: Hash160, pub authenticated: bool, #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "serde_opt_vec_qci")] pub stackerdbs: Option>, } +/// Serialize and deserialize `Option>` +/// using the `to_string()` and `parse()` implementations of `QualifiedContractIdentifier`. +mod serde_opt_vec_qci { + use clarity::vm::types::QualifiedContractIdentifier; + use serde::{Deserialize, Serialize}; + + pub fn serialize( + opt: &Option>, + serializer: S, + ) -> Result { + let serialize_as: Option> = opt + .as_ref() + .map(|vec_qci| vec_qci.iter().map(ToString::to_string).collect()); + serialize_as.serialize(serializer) + } + + pub fn deserialize<'de, D>(de: D) -> Result>, D::Error> + where + D: serde::Deserializer<'de>, + { + let from_str: Option> = Deserialize::deserialize(de)?; + let Some(vec_str) = from_str else { + return Ok(None); + }; + let parse_opt: Result, _> = vec_str + .into_iter() + .map(|x| QualifiedContractIdentifier::parse(&x).map_err(serde::de::Error::custom)) + .collect(); + let out_vec = parse_opt?; + Ok(Some(out_vec)) + } +} + impl RPCNeighbor { pub fn from_neighbor_key_and_pubkh( nk: NeighborKey, diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index efa0484d4b4..78b1ff096b2 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -554,12 +554,12 @@ impl ConversationHttp { )?; info!("Handled StacksHTTPRequest"; - "verb" => %verb, - "path" => %request_path, - "processing_time_ms" => start_time.elapsed().as_millis(), - "latency_ms" => latency, - "conn_id" => self.conn_id, - "peer_addr" => &self.peer_addr); + "verb" => %verb, + "path" => %request_path, + "processing_time_ms" => start_time.elapsed().as_millis(), + "latency_ms" => latency, + "conn_id" => self.conn_id, + "peer_addr" => &self.peer_addr); if let Some(msg) = msg_opt { ret.push(msg); diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 2d4c39e349e..97f8214913a 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -555,10 +555,17 @@ impl StackerDBConfig { reason, )); } else if let Some(Err(e)) = res { - warn!( - "Could not use contract {} for StackerDB: {:?}", - contract_id, &e - ); + if contract_id.is_boot() { + debug!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } else { + warn!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } return Err(e); } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 3e91c9b5420..847363b2e31 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -119,6 +119,7 @@ pub mod db; pub mod sync; use std::collections::{HashMap, HashSet}; +use std::ops::Range; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; @@ -205,6 +206,22 @@ impl StackerDBConfig { pub fn num_slots(&self) -> u32 { self.signers.iter().fold(0, |acc, s| acc + s.1) } + + /// What are the slot index ranges for each signer? + /// Returns the ranges in the same ordering as `self.signers` + pub fn signer_ranges(&self) -> Vec> { + let mut slot_index = 0; + let mut result = Vec::with_capacity(self.signers.len()); + for (_, slot_count) in self.signers.iter() { + let end = slot_index + *slot_count; + result.push(Range { + start: slot_index, + end, + }); + slot_index = end; + } + result + } } /// This is the set of replicated chunks in all stacker DBs that this node subscribes to. @@ -280,14 +297,16 @@ impl StackerDBs { == boot_code_id(MINERS_NAME, chainstate.mainnet) { // .miners contract -- directly generate the config - NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip).unwrap_or_else(|e| { - warn!( - "Failed to generate .miners StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); - StackerDBConfig::noop() - }) + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map(|(config, _)| config) + .unwrap_or_else(|e| { + warn!( + "Failed to generate .miners StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + StackerDBConfig::noop() + }) } else { // attempt to load the config from the contract itself StackerDBConfig::from_smart_contract( @@ -297,11 +316,20 @@ impl StackerDBs { num_neighbors, ) .unwrap_or_else(|e| { - warn!( - "Failed to load StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); + if matches!(e, net_error::NoSuchStackerDB(_)) && stackerdb_contract_id.is_boot() + { + debug!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } else { + warn!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } StackerDBConfig::noop() }) }; diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 7309ad40b2e..f574efd5fbd 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1096,9 +1096,6 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); } - if pushed == 0 { - return Err(net_error::PeerNotConnected); - } self.next_chunk_push_priority = cur_priority; Ok(self .chunk_push_priorities diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 30f088a96f4..8b5a6c470c7 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2037,6 +2037,61 @@ impl BitcoinRegtestController { let tx: Transaction = btc_deserialize(&hex_bytes(&txstr).unwrap()).unwrap(); tx } + + /// Produce `num_blocks` regtest bitcoin blocks, sending the bitcoin coinbase rewards + /// to the bitcoin single sig addresses corresponding to `pks` in a round robin fashion. + #[cfg(test)] + pub fn bootstrap_chain_to_pks(&mut self, num_blocks: usize, pks: &[Secp256k1PublicKey]) { + info!("Creating wallet if it does not exist"); + if let Err(e) = self.create_wallet_if_dne() { + error!("Error when creating wallet: {e:?}"); + } + + for pk in pks { + debug!("Import public key '{}'", &pk.to_hex()); + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + warn!("Error when importing pubkey: {e:?}"); + } + } + + if pks.len() == 1 { + // if we only have one pubkey, just generate all the blocks at once + let address = self.get_miner_address(StacksEpochId::Epoch21, &pks[0]); + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pks[0].to_hex() + ); + if let Err(e) = BitcoinRPCRequest::generate_to_address( + &self.config, + num_blocks.try_into().unwrap(), + addr2str(&address), + ) { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + return; + } + + // otherwise, round robin generate blocks + for i in 0..num_blocks { + let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let address = self.get_miner_address(StacksEpochId::Epoch21, pk); + if i < pks.len() { + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pk.to_hex(), + ); + } + if let Err(e) = + BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) + { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + } + } } impl BurnchainController for BitcoinRegtestController { @@ -2152,45 +2207,19 @@ impl BurnchainController for BitcoinRegtestController { #[cfg(test)] fn bootstrap_chain(&mut self, num_blocks: u64) { - if let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key { - // NOTE: miner address is whatever the miner's segwit setting says it is here - let mut local_mining_pubkey = - Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - let address = self.get_miner_address(StacksEpochId::Epoch21, &local_mining_pubkey); - - if self.config.miner.segwit { - local_mining_pubkey.set_compressed(true); - } - - info!("Creating wallet if it does not exist"); - match self.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} - } - - test_debug!("Import public key '{}'", &local_mining_pubkey.to_hex()); - - let _result = BitcoinRPCRequest::import_public_key(&self.config, &local_mining_pubkey); + let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key else { + warn!("No local mining pubkey while bootstrapping bitcoin regtest, will not generate bitcoin blocks"); + return; + }; - test_debug!( - "Generate to address '{}' for public key '{}'", - &addr2str(&address), - &local_mining_pubkey.to_hex() - ); - let result = BitcoinRPCRequest::generate_to_address( - &self.config, - num_blocks, - addr2str(&address), - ); + // NOTE: miner address is whatever the miner's segwit setting says it is here + let mut local_mining_pubkey = Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - match result { - Ok(_) => {} - Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); - panic!(); - } - } + if self.config.miner.segwit { + local_mining_pubkey.set_compressed(true); } + + self.bootstrap_chain_to_pks(num_blocks.try_into().unwrap(), &[local_mining_pubkey]) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index aaeb931a1a7..527117fb4dc 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -150,6 +150,7 @@ pub struct BlockMinerThread { reason: MinerReason, /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, + signer_set_cache: Option, } impl BlockMinerThread { @@ -175,6 +176,7 @@ impl BlockMinerThread { parent_tenure_id, reason, p2p_handle: rt.get_p2p_handle(), + signer_set_cache: None, } } @@ -324,19 +326,14 @@ impl BlockMinerThread { } } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - burn_block_height: u64, - stackerdbs: &mut StackerDBs, - attempts: &mut u64, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; + /// Load the signer set active for this miner's blocks. This is the + /// active reward set during `self.burn_election_block`. The miner + /// thread caches this information, and this method will consult + /// that cache (or populate it if necessary). + fn load_signer_set(&mut self) -> Result { + if let Some(set) = self.signer_set_cache.as_ref() { + return Ok(set.clone()); + } let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -348,22 +345,6 @@ impl BlockMinerThread { )) })?; - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to retrieve chain tip: {:?}", - e - )) - }) - .and_then(|result| { - result.ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) - }) - })?; - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { NakamotoNodeError::SigningCoordinatorFailure(format!( @@ -371,14 +352,16 @@ impl BlockMinerThread { )) })?; + let burn_election_height = self.burn_election_block.block_height; + let reward_info = match load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) + .pox_reward_cycle(burn_election_height) .expect("FATAL: no reward cycle for sortition"), - &tip.sortition_id, + &self.burn_election_block.sortition_id, &self.burnchain, &mut chain_state, - &new_block.header.parent_block_id, + &self.parent_tenure_id, &sort_db, &OnChainRewardSetProvider::new(), ) { @@ -401,7 +384,52 @@ impl BlockMinerThread { )); }; + self.signer_set_cache = Some(reward_set.clone()); + Ok(reward_set) + } + + /// Gather a list of signatures from the signers for the block + fn gather_signatures( + &mut self, + new_block: &mut NakamotoBlock, + burn_block_height: u64, + stackerdbs: &mut StackerDBs, + attempts: &mut u64, + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &new_block.header.consensus_hash, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); + let reward_set = self.load_signer_set()?; let mut coordinator = SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( |e| { @@ -421,6 +449,7 @@ impl BlockMinerThread { &sort_db, &stackerdbs, &self.globals.counters, + &self.burn_election_block.consensus_hash, )?; return Ok((reward_set, signature)); @@ -644,6 +673,7 @@ impl BlockMinerThread { MinerSlotID::BlockPushed, chain_state.mainnet, &mut miners_session, + &self.burn_election_block.consensus_hash, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure) } @@ -886,6 +916,7 @@ impl BlockMinerThread { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); + let reward_set = self.load_signer_set()?; // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) @@ -932,38 +963,6 @@ impl BlockMinerThread { let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) - .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; - - let reward_info = match load_nakamoto_reward_set( - self.burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) - .expect("FATAL: no reward cycle defined for sortition tip"), - &tip.sortition_id, - &self.burnchain, - &mut chain_state, - &parent_block_info.stacks_parent_header.index_block_hash(), - &burn_db, - &OnChainRewardSetProvider::new(), - ) { - Ok(Some((reward_info, _))) => reward_info, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set stored yet. Cannot mine!".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 30f73e75bed..b6e42b87ee1 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -23,7 +23,7 @@ use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; @@ -341,6 +341,7 @@ impl SignCoordinator { miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, ) -> Result<(), String> { let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); miner_sk.set_compress_public(true); @@ -353,6 +354,7 @@ impl SignCoordinator { miner_slot_id, is_mainnet, miners_session, + election_sortition, ) } @@ -366,9 +368,9 @@ impl SignCoordinator { miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, ) -> Result<(), String> { - let miner_pubkey = StacksPublicKey::from_private(&miner_sk); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -417,6 +419,7 @@ impl SignCoordinator { sortdb: &SortitionDB, stackerdbs: &StackerDBs, counters: &Counters, + election_sortiton: &ConsensusHash, ) -> Result { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -450,6 +453,7 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); @@ -604,6 +608,7 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) { Ok(()) => { debug!("Miner/Coordinator: sent outbound message."); @@ -636,6 +641,7 @@ impl SignCoordinator { sortdb: &SortitionDB, stackerdbs: &StackerDBs, counters: &Counters, + election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -664,11 +670,15 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortition, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); #[cfg(test)] { + info!( + "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); // In test mode, short-circuit waiting for the signers if the TEST_SIGNING // channel has been created. This allows integration tests for the stacks-node // independent of the stacks-signer. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index ed31540f204..8c3c4ed1799 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3985,6 +3985,17 @@ impl RelayerThread { } RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { debug!("Relayer: directive Run tenure"); + let Ok(Some(next_block_epoch)) = SortitionDB::get_stacks_epoch( + self.sortdb_ref().conn(), + last_burn_block.block_height.saturating_add(1), + ) else { + warn!("Failed to load Stacks Epoch for next burn block, skipping RunTenure directive"); + return true; + }; + if next_block_epoch.epoch_id.uses_nakamoto_blocks() { + info!("Next burn block is in Nakamoto epoch, skipping RunTenure directive for 2.x node"); + return true; + } self.block_miner_thread_try_start( registered_key, last_burn_block, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 0b8c379f7cf..a7892b9a2db 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -324,6 +324,41 @@ pub fn new_test_conf() -> Config { conf } +/// Randomly change the config's network ports to new ports. +pub fn set_random_binds(config: &mut Config) { + let prior_rpc_port: u16 = config + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = config + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { + break (rpc_port, p2p_port); + } + }; + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); + config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); + config.node.data_url = format!("http://{}:{}", localhost, rpc_port); + config.node.p2p_address = format!("{}:{}", localhost, p2p_port); +} + pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b15b83afb4..ba80f64c6cc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,12 +100,13 @@ use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - call_read_only, get_account, get_account_result, get_chain_info_result, get_pox_info, - next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, + get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, + wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - to_addr, + set_random_binds, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -293,31 +294,79 @@ pub fn blind_signer( signers: &TestSigners, proposals_count: RunLoopCounter, ) -> JoinHandle<()> { + blind_signer_multinode(signers, &[conf], vec![proposals_count]) +} + +/// Spawn a blind signing thread listening to potentially multiple stacks nodes. +/// `signer` is the private key of the individual signer who broadcasts the response to the StackerDB. +/// The thread will check each node's proposal counter in order to wake up, but will only read from the first +/// node's StackerDB (it will read all of the StackerDBs to provide logging information, though). +pub fn blind_signer_multinode( + signers: &TestSigners, + configs: &[&Config], + proposals_count: Vec, +) -> JoinHandle<()> { + assert_eq!( + configs.len(), + proposals_count.len(), + "Expect the same number of node configs as proposals counters" + ); let sender = TestSigningChannel::instantiate(); let mut signed_blocks = HashSet::new(); - let conf = conf.clone(); + let configs: Vec<_> = configs.iter().map(|x| Clone::clone(*x)).collect(); let signers = signers.clone(); - let mut last_count = proposals_count.load(Ordering::SeqCst); - thread::spawn(move || loop { - thread::sleep(Duration::from_millis(100)); - let cur_count = proposals_count.load(Ordering::SeqCst); - if cur_count <= last_count { - continue; - } - last_count = cur_count; - match read_and_sign_block_proposal(&conf, &signers, &signed_blocks, &sender) { - Ok(signed_block) => { - if signed_blocks.contains(&signed_block) { - continue; - } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); - signed_blocks.insert(signed_block); + let mut last_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + thread::Builder::new() + .name("blind-signer".into()) + .spawn(move || loop { + thread::sleep(Duration::from_millis(100)); + let cur_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + if cur_count + .iter() + .zip(last_count.iter()) + .all(|(cur_count, last_count)| cur_count <= last_count) + { + continue; } - Err(e) => { - warn!("Error reading and signing block proposal: {e}"); + thread::sleep(Duration::from_secs(2)); + info!("Checking for a block proposal to sign..."); + last_count = cur_count; + let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, will sleep and try again"; "signer_sig_hash" => signed_block.to_hex()); + thread::sleep(Duration::from_secs(5)); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, ignoring"; "signer_sig_hash" => signed_block.to_hex()); + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } + }; + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } } - } - }) + }) + .unwrap() } pub fn get_latest_block_proposal( @@ -325,26 +374,68 @@ pub fn get_latest_block_proposal( sortdb: &SortitionDB, ) -> Result<(NakamotoBlock, StacksPublicKey), String> { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); - let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .map_err(|_| "Unable to get miner slot")? - .ok_or("No miner slot exists")?; - - let proposed_block = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - let message: SignerMessageV0 = miners_stackerdb - .get_latest(miner_slot_id.start) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found"); - let SignerMessageV0::BlockProposal(block_proposal) = message else { - panic!("Expected a signer message block proposal. Got {message:?}"); - }; - // TODO: use v1 message types behind epoch gate - // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); - block_proposal.block - }; - Ok((proposed_block, miner_pubkey)) + let (stackerdb_conf, miner_info) = + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map_err(|e| e.to_string())?; + let miner_ranges = stackerdb_conf.signer_ranges(); + let latest_miner = usize::from(miner_info.get_latest_winner_index()); + let miner_contract_id = boot_code_id(MINERS_NAME, false); + let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); + + let mut proposed_blocks: Vec<_> = stackerdb_conf + .signers + .iter() + .enumerate() + .zip(miner_ranges) + .filter_map(|((miner_ix, (miner_addr, _)), miner_slot_id)| { + let proposed_block = { + let message: SignerMessageV0 = + miners_stackerdb.get_latest(miner_slot_id.start).ok()??; + let SignerMessageV0::BlockProposal(block_proposal) = message else { + panic!("Expected a signer message block proposal. Got {message:?}"); + }; + block_proposal.block + }; + Some((proposed_block, miner_addr, miner_ix == latest_miner)) + }) + .collect(); + + proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { + if block_a.header.chain_length > block_b.header.chain_length { + return std::cmp::Ordering::Greater; + } else if block_a.header.chain_length < block_b.header.chain_length { + return std::cmp::Ordering::Less; + } + // the heights are tied, tie break with the latest miner + if *is_latest_a { + return std::cmp::Ordering::Greater; + } + if *is_latest_b { + return std::cmp::Ordering::Less; + } + return std::cmp::Ordering::Equal; + }); + + for (b, _, is_latest) in proposed_blocks.iter() { + info!("Consider block"; "signer_sighash" => %b.header.signer_signature_hash(), "is_latest_sortition" => is_latest, "chain_height" => b.header.chain_length); + } + + let (proposed_block, miner_addr, _) = proposed_blocks.pop().unwrap(); + + let pubkey = StacksPublicKey::recover_to_pubkey( + proposed_block.header.miner_signature_hash().as_bytes(), + &proposed_block.header.miner_signature, + ) + .map_err(|e| e.to_string())?; + let miner_signed_addr = StacksAddress::p2pkh(false, &pubkey); + if miner_signed_addr.bytes != miner_addr.bytes { + return Err(format!( + "Invalid miner signature on proposal. Found {}, expected {}", + miner_signed_addr.bytes, miner_addr.bytes + )); + } + + Ok((proposed_block, pubkey)) } #[allow(dead_code)] @@ -369,11 +460,12 @@ fn get_block_proposal_msg_v1( } pub fn read_and_sign_block_proposal( - conf: &Config, + configs: &[&Config], signers: &TestSigners, signed_blocks: &HashSet, channel: &Sender>, ) -> Result { + let conf = configs.first().unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (mut chainstate, _) = StacksChainState::open( @@ -387,8 +479,30 @@ pub fn read_and_sign_block_proposal( let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; + let other_views_result: Result, _> = configs + .get(1..) + .unwrap() + .iter() + .map(|other_conf| { + get_latest_block_proposal(other_conf, &sortdb).map(|proposal| { + ( + proposal.0.header.signer_signature_hash(), + proposal.0.header.chain_length, + ) + }) + }) + .collect(); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); + let other_views = other_views_result?; + if !other_views.is_empty() { + info!( + "Fetched block proposals"; + "primary_latest_signer_sighash" => %signer_sig_hash, + "primary_latest_block_height" => proposed_block.header.chain_length, + "other_views" => ?other_views, + ); + } if signed_blocks.contains(&signer_sig_hash) { // already signed off on this block, don't sign again. @@ -632,6 +746,17 @@ pub fn boot_to_epoch_3( // first mined stacks block next_block_and_wait(btc_regtest_controller, &blocks_processed); + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1444,6 +1569,261 @@ fn mine_multiple_per_tenure_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 5 tenures are mined after 3.0 starts +/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) +fn multiple_nodes() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.node.local_peer_seed = vec![1, 1, 1, 1]; + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 15; + let inter_blocks_per_tenure = 6; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut conf_node_2 = naka_conf.clone(); + set_random_binds(&mut conf_node_2); + conf_node_2.node.seed = vec![2, 2, 2, 2]; + conf_node_2.burnchain.local_mining_public_key = Some( + Keychain::default(conf_node_2.node.seed.clone()) + .get_pub_key() + .to_hex(), + ); + conf_node_2.node.local_peer_seed = vec![2, 2, 2, 2]; + conf_node_2.node.miner = true; + + let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), + naka_conf.burnchain.chain_id, + naka_conf.burnchain.peer_version, + ); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain_to_pks( + 201, + &[ + Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + Secp256k1PublicKey::from_hex( + conf_node_2 + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + ], + ); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let run_loop_2_stopper = run_loop.get_termination_switch(); + let Counters { + naka_proposed_blocks: proposals_submitted_2, + .. + } = run_loop_2.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let coord_channel_2 = run_loop_2.coordinator_channels(); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer_multinode( + &signers, + &[&naka_conf, &conf_node_2], + vec![proposals_submitted, proposals_submitted_2], + ); + + info!("Neighbors 1"; "neighbors" => ?get_neighbors(&naka_conf)); + info!("Neighbors 2"; "neighbors" => ?get_neighbors(&conf_node_2)); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + info!("Peer 1 information"; "chain_info" => ?get_chain_info(&naka_conf).stacks_tip_height); + info!("Peer 2 information"; "chain_info" => ?get_chain_info(&conf_node_2).stacks_tip_height); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + coord_channel_2 + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_2_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + // run_loop_2_thread.join().unwrap(); +} + #[test] #[ignore] fn correct_burn_outs() { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e1f72ba2e37..ac6a3ea978c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1318,6 +1318,13 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco get_account_result(http_origin, account).unwrap() } +pub fn get_neighbors(conf: &Config) -> Option { + let client = reqwest::blocking::Client::new(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let path = format!("{}/v2/neighbors", http_origin); + client.get(&path).send().ok()?.json().ok() +} + pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/pox", http_origin); From 75baf497a856c2e1c1831c76e71a719111c45277 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 12 Jul 2024 10:12:02 -0400 Subject: [PATCH 027/910] feat: support mock mining in epoch 3.0 --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index aaeb931a1a7..70a6f7b3e3b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -411,6 +411,10 @@ impl BlockMinerThread { }, )?; + if self.config.get_node_config(false).mock_mining { + return Ok((reward_set, Vec::new())); + } + *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e94..0d8567a95d7 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -412,7 +412,7 @@ impl RelayerThread { } let directive = if sn.sortition { - if won_sortition { + if won_sortition || self.config.get_node_config(false).mock_mining { MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, From 23876fb3d8d5ab11c4f0eaad1e7f1254edbd2c76 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 29 Jul 2024 18:50:03 +0300 Subject: [PATCH 028/910] move stacks block validate response to info logs from debug --- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 72c8cc4dfe9..b38467d4549 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - debug!("{self}: Received a block proposal: {block_proposal:?}"); + info!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -287,7 +287,7 @@ impl Signer { return; } - debug!( + info!( "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index bd4b36f4894..18e31946c02 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - debug!("{self}: Received a block proposal result from the stacks node..."); + info!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - debug!( + info!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, @@ -1130,7 +1130,7 @@ impl Signer { match operation_result { OperationResult::Sign(signature) => { crate::monitoring::increment_operation_results("sign"); - debug!("{self}: Received signature result"); + info!("{self}: Received signature result"); self.process_signature(signature); } OperationResult::SignTaproot(_) => { From 5ee315224aa3ec1a5b64313eb986be7a99629f69 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 11:29:57 -0500 Subject: [PATCH 029/910] chore: fix unit test, add integration test to github workflow --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 18 ++++++++++++-- .../src/chainstate/nakamoto/tests/mod.rs | 5 ++-- .../src/tests/nakamoto_integrations.rs | 24 +++++++++---------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5604efd7dd..b1e81a71128 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -97,6 +97,7 @@ jobs: - tests::nakamoto_integrations::check_block_info - tests::nakamoto_integrations::check_block_info_rewards - tests::nakamoto_integrations::continue_tenure_extend + - tests::nakamoto_integrations::multiple_miners # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b65e9ff0864..09794c47759 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -455,6 +455,8 @@ impl MaturedMinerPaymentSchedules { } } +/// Struct containing information about the miners assigned in the +/// .miners stackerdb config pub struct MinersDBInformation { signer_0_sortition: ConsensusHash, signer_1_sortition: ConsensusHash, @@ -462,6 +464,8 @@ pub struct MinersDBInformation { } impl MinersDBInformation { + /// What index in the `.miners` stackerdb is the miner who won + /// `sortition`? pub fn get_signer_index(&self, sortition: &ConsensusHash) -> Option { if sortition == &self.signer_0_sortition { Some(0) @@ -472,6 +476,12 @@ impl MinersDBInformation { } } + /// Get all of the sortitions whose winners are included in .miners + pub fn get_sortitions(&self) -> [&ConsensusHash; 2] { + [&self.signer_0_sortition, &self.signer_1_sortition] + } + + /// Get the index of the latest sortition winner in .miners pub fn get_latest_winner_index(&self) -> u16 { self.latest_winner } @@ -4160,14 +4170,18 @@ impl NakamotoChainState { .map(usize::from) else { warn!("Miner is not in the miners StackerDB config"; - "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); return Ok(None); }; let mut signer_ranges = stackerdb_config.signer_ranges(); if signer_ix >= signer_ranges.len() { // should be unreachable, but always good to be careful warn!("Miner is not in the miners StackerDB config"; - "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); return Ok(None); } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index ef38ec76c66..722cfa541af 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2080,9 +2080,8 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let miner_privkey = &miner_keys[i]; let miner_pubkey = StacksPublicKey::from_private(miner_privkey); - let slot_id = - NakamotoChainState::get_miner_slot(&sort_db, &tip, &block.header.consensus_hash) - .expect("Failed to get miner slot"); + let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &tip.consensus_hash) + .expect("Failed to get miner slot"); if sortition { let slot_id = slot_id.expect("No miner slot exists for this miner").start; let slot_version = stackerdbs diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ba80f64c6cc..9f38e23cfc5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1571,14 +1571,15 @@ fn mine_multiple_per_tenure_integration() { #[test] #[ignore] -/// This test spins up a nakamoto-neon node. +/// This test spins up two nakamoto nodes, both configured to mine. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches /// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop /// struct handles the epoch-2/3 tear-down and spin-up. /// This test makes three assertions: -/// * 5 tenures are mined after 3.0 starts -/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) -fn multiple_nodes() { +/// * 15 tenures are mined after 3.0 starts +/// * Each tenure has 6 blocks (the coinbase block and 5 interim blocks) +/// * Both nodes see the same chainstate at the end of the test +fn multiple_miners() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1683,7 +1684,7 @@ fn multiple_nodes() { let coord_channel = run_loop.coordinator_channels(); let coord_channel_2 = run_loop_2.coordinator_channels(); - let run_loop_2_thread = thread::Builder::new() + let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); @@ -1733,11 +1734,7 @@ fn multiple_nodes() { info!("Neighbors 2"; "neighbors" => ?get_neighbors(&conf_node_2)); // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { @@ -1799,8 +1796,10 @@ fn multiple_nodes() { "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), ); - info!("Peer 1 information"; "chain_info" => ?get_chain_info(&naka_conf).stacks_tip_height); - info!("Peer 2 information"; "chain_info" => ?get_chain_info(&conf_node_2).stacks_tip_height); + let peer_1_height = get_chain_info(&naka_conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height); + assert_eq!(peer_1_height, peer_2_height); assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert_eq!( @@ -1821,7 +1820,6 @@ fn multiple_nodes() { run_loop_2_stopper.store(false, Ordering::SeqCst); run_loop_thread.join().unwrap(); - // run_loop_2_thread.join().unwrap(); } #[test] From 53855fecdd7f91c28199a47858571dd32fc18259 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 12:52:51 -0500 Subject: [PATCH 030/910] chore: cleanup commented out code in stackerdb::sync --- stackslib/src/net/stackerdb/sync.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index f574efd5fbd..53a1f67c469 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1039,16 +1039,13 @@ impl StackerDBSync { } let chunk_push = self.chunk_push_priorities[cur_priority].0.clone(); + // try the first neighbor in the chunk_push_priorities list let selected_neighbor_opt = self.chunk_push_priorities[cur_priority] .1 - .iter() - .enumerate() - // .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); - .find(|(_i, _naddr)| true); + .first() + .map(|neighbor| (0, neighbor)); - let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { - x - } else { + let Some((idx, selected_neighbor)) = selected_neighbor_opt else { debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, From ebb23943e6436a47432da2a44564d66a8b1ac2ab Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:08:25 -0700 Subject: [PATCH 031/910] Update to print full version string --- stacks-signer/Cargo.toml | 1 + stacks-signer/src/cli.rs | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 1d1af6da783..b4f77abca1e 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -25,6 +25,7 @@ clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } lazy_static = "1.4.0" +once_cell = "1.8.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index ecb1c247417..921cb8cc32b 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -37,12 +37,36 @@ use stacks_common::address::{ }; use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; +use once_cell::sync::Lazy; extern crate alloc; +const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); +const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); +#[cfg(debug_assertions)] +const BUILD_TYPE: &'static str = "debug"; +#[cfg(not(debug_assertions))] +const BUILD_TYPE: &'static str = "release"; + + +static VERSION_STRING: Lazy = Lazy::new(|| { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + let git_branch = GIT_BRANCH.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); + format!( + "{} ({}:{}, {} build, {} [{}])", + pkg_version, + git_branch, + git_commit, + BUILD_TYPE, + std::env::consts::OS, + std::env::consts::ARCH + ) +}); + #[derive(Parser, Debug)] #[command(author, version, about)] -#[command(long_version = option_env!("SIGNER_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")))] +#[command(long_version = VERSION_STRING.as_str())] /// The CLI arguments for the stacks signer pub struct Cli { From 2cc9ff3cb98a04aa4af72c0a0bc8ecfa89eac0e8 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:14:04 -0700 Subject: [PATCH 032/910] fix formatting --- stacks-signer/src/cli.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 921cb8cc32b..971e5ba8d72 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,6 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -37,7 +38,6 @@ use stacks_common::address::{ }; use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -use once_cell::sync::Lazy; extern crate alloc; @@ -48,11 +48,10 @@ const BUILD_TYPE: &'static str = "debug"; #[cfg(not(debug_assertions))] const BUILD_TYPE: &'static str = "release"; - static VERSION_STRING: Lazy = Lazy::new(|| { let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); format!( "{} ({}:{}, {} build, {} [{}])", pkg_version, From 8b3ee439e12ea3acdc1027efb85577e297344e71 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 30 Jul 2024 01:06:50 +0300 Subject: [PATCH 033/910] move static from `once_cell` to `lazy_static` --- stacks-signer/Cargo.toml | 1 - stacks-signer/src/cli.rs | 32 +++++++++++++++++--------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index b4f77abca1e..1d1af6da783 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -25,7 +25,6 @@ clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } lazy_static = "1.4.0" -once_cell = "1.8.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 971e5ba8d72..d3e998e15c5 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,7 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; -use once_cell::sync::Lazy; +use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -48,20 +48,22 @@ const BUILD_TYPE: &'static str = "debug"; #[cfg(not(debug_assertions))] const BUILD_TYPE: &'static str = "release"; -static VERSION_STRING: Lazy = Lazy::new(|| { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); - format!( - "{} ({}:{}, {} build, {} [{}])", - pkg_version, - git_branch, - git_commit, - BUILD_TYPE, - std::env::consts::OS, - std::env::consts::ARCH - ) -}); +lazy_static! { + static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + let git_branch = GIT_BRANCH.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); + format!( + "{} ({}:{}, {} build, {} [{}])", + pkg_version, + git_branch, + git_commit, + BUILD_TYPE, + std::env::consts::OS, + std::env::consts::ARCH + ) + }; +} #[derive(Parser, Debug)] #[command(author, version, about)] From c36af7316d16204265ba157ae0ed201a755f8ac6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Jul 2024 19:22:26 -0400 Subject: [PATCH 034/910] WIP: integration test for mock signing Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 2 +- stacks-signer/src/v0/signer.rs | 19 +- .../src/tests/nakamoto_integrations.rs | 55 ++- testnet/stacks-node/src/tests/signer/mod.rs | 3 +- testnet/stacks-node/src/tests/signer/v0.rs | 370 +++++++++++++++++- 5 files changed, 427 insertions(+), 22 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 76dee99ded4..d4311f8aa0c 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -238,7 +238,7 @@ pub struct MockSignature { /// The signature across the stacks consensus hash signature: MessageSignature, /// The block hash that the signature is across - stacks_consensus_hash: ConsensusHash, + pub stacks_consensus_hash: ConsensusHash, } impl MockSignature { diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 0ab444b78f8..cb3100674c1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -86,7 +86,7 @@ impl SignerTrait for Signer { sortition_state: &mut Option, event: Option<&SignerEvent>, _res: Sender>, - _current_reward_cycle: u64, + current_reward_cycle: u64, ) { let event_parity = match event { // Block proposal events do have reward cycles, but each proposal has its own cycle, @@ -155,7 +155,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + debug!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -168,9 +168,12 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { - // We are in epoch 25, so we should mock mine to prove we are still alive. - self.mock_mine(stacks_client); + if self.reward_cycle == current_reward_cycle { + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + debug!("Mock signing for burn block {burn_height:?}"); + self.mock_sign(stacks_client); + } }; } } @@ -470,13 +473,15 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_mine(&mut self, stacks_client: &StacksClient) { + fn mock_sign(&mut self, stacks_client: &StacksClient) { let Ok(peer_info) = stacks_client.get_peer_info() else { warn!("{self}: Failed to get peer info. Cannot mock mine."); return; }; + let consensus_hash = peer_info.stacks_tip_consensus_hash; + debug!("Mock signing using stacks tip {consensus_hash:?}"); let mock_signature = - MockSignature::new(peer_info.stacks_tip_consensus_hash, &self.private_key); + MockSignature::new(consensus_hash, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 492646a84de..92ee54ab614 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -110,7 +110,7 @@ use crate::tests::{ use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; -static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; +pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -166,13 +166,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 201, - end_height: 231, + end_height: 251, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 231, + start_height: 251, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 @@ -621,9 +621,9 @@ pub fn boot_to_epoch_3( let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -995,6 +995,47 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); } +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_25( + naka_conf: &Config, + blocks_processed: &Arc, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_25_start_height = epoch_25.start_height; + assert!( + epoch_25_start_height > 0, + "Epoch 2.5 start height must be greater than 0" + ); + // stack enough to activate pox-4 + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + debug!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_25_start_height" => {epoch_25_start_height}, + ); + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_25_start_height, + &naka_conf, + ); + info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); +} + /// /// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order /// for pox-4 to activate @@ -1517,9 +1558,9 @@ fn correct_burn_outs() { let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89ab..bad0b499ea0 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -604,7 +604,8 @@ fn setup_stx_btc_node( let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); + // Bootstrap the chain to BEFORE epoch 2.5 to enable mock mining of blocks in Epoch 2.5 tests + btc_regtest_controller.bootstrap_chain(195); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 75856593bb0..cb0c73265f0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -18,20 +18,27 @@ use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::vm::types::PrincipalData; +use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::address::AddressHashMode; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; +use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; +use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; +use stacks::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -44,14 +51,185 @@ use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; -use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; +use crate::tests::nakamoto_integrations::{ + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, +}; use crate::tests::neon_integrations::{ - get_chain_info, next_block_and_wait, submit_tx, test_observer, + get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController}; impl SignerTest { + /// Run the test until the first epoch 2.5 reward cycle. + /// Will activate pox-4 and register signers for the first full Epoch 2.5 reward cycle. + fn boot_to_epoch_25_reward_cycle(&mut self) { + boot_to_epoch_25( + &self.running_nodes.conf, + &self.running_nodes.blocks_processed, + &mut self.running_nodes.btc_regtest_controller, + ); + + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let lock_period = 12; + + let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = + &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_25_start_height = epoch_25.start_height; + // stack enough to activate pox-4 + let block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + for stacker_sk in self.signer_stacks_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + lock_period, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(lock_period), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let reward_cycle_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64; + let prepare_phase_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + + let epoch_25_reward_cycle_boundary = + epoch_25_start_height.saturating_sub(epoch_25_start_height % reward_cycle_len); + let epoch_25_reward_set_calculation_boundary = epoch_25_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .wrapping_add(reward_cycle_len) + .wrapping_add(1); + + let next_reward_cycle_boundary = + epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + epoch_25_reward_set_calculation_boundary, + &self.running_nodes.conf, + ); + debug!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = self.get_current_reward_cycle().wrapping_add(1); + while !reward_set_calculated { + let reward_set = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + debug!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + debug!("Signer set calculated"); + // Manually consume one more block to ensure signers refresh their state + debug!("Waiting for signers to initialize."); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + let now = std::time::Instant::now(); + loop { + self.send_status_request(); + let states = self.wait_for_states(short_timeout); + if states + .iter() + .all(|state_info| state_info.runloop_state == State::RegisteredSigners) + { + break; + } + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for signers to be registered" + ); + std::thread::sleep(Duration::from_secs(1)); + } + debug!("Signers initialized"); + + info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + next_reward_cycle_boundary, + &self.running_nodes.conf, + ); + + info!("Ready to mine the first Epoch 2.5 reward cycle!"); + } + /// Run the test until the epoch 3 boundary fn boot_to_epoch_3(&mut self) { boot_to_epoch_3_reward_set( @@ -1087,13 +1265,193 @@ fn mock_mine_epoch_25() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(5)), ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - todo!("BOOT TO EPOCH 2.5 AND VERIFY WE RECEIVE A MOCK SIGNATURE PER SORTITION"); + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + + let mut reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let mut signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // Give the peer info some time to update + let poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .burn_block_height + + 1 + < signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + { + std::thread::sleep(Duration::from_secs(1)); + assert!( + poll_time.elapsed() <= Duration::from_secs(15), + "Timed out waiting for peer info to update" + ); + } + // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition + let main_poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .burn_block_height + + 1 + < epoch_3_start_height + { + let old_consensus_hash = signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash; + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let peer_poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash + == old_consensus_hash + { + std::thread::sleep(Duration::from_millis(100)); + assert!( + peer_poll_time.elapsed() < Duration::from_secs(5), + "Timed out waiting for peer info to update" + ); + } + let expected_consensus_hash = signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash; + let mut mock_signatures = vec![]; + let mock_poll_time = Instant::now(); + while mock_signatures.len() != num_signers { + std::thread::sleep(Duration::from_millis(100)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::MockSignature(mock_signature) = message { + debug!("MOCK SIGNATURE: {:?}", mock_signature); + if mock_signature.stacks_consensus_hash == expected_consensus_hash { + mock_signatures.push(mock_signature); + } + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock signatures within timeout" + ); + } + let current_reward_cycle = signer_test.get_current_reward_cycle(); + if current_reward_cycle != reward_cycle { + debug!("Rolling over reward cycle to {:?}", current_reward_cycle); + reward_cycle = current_reward_cycle; + stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + signer_slot_ids = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + } + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0" + ); + } + + info!("------------------------- Test Processing Epoch 3.0 Tenure -------------------------"); + let old_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let old_signatures = old_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + // Wait a bit to ensure no new mock signatures show up + std::thread::sleep(Duration::from_secs(5)); + let new_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let new_signatures = new_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + assert_eq!(old_signatures, new_signatures); } From 397318c6a832a33e2631c2c3d132509a103d3a25 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Jul 2024 21:34:14 -0400 Subject: [PATCH 035/910] WIP: fix if check order in mock sign scenario Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index cb3100674c1..71caf8bf51f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -168,8 +168,8 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if self.reward_cycle == current_reward_cycle { - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + if self.reward_cycle == current_reward_cycle { // We are in epoch 2.5, so we should mock mine to prove we are still alive. debug!("Mock signing for burn block {burn_height:?}"); self.mock_sign(stacks_client); @@ -480,8 +480,7 @@ impl Signer { }; let consensus_hash = peer_info.stacks_tip_consensus_hash; debug!("Mock signing using stacks tip {consensus_hash:?}"); - let mock_signature = - MockSignature::new(consensus_hash, &self.private_key); + let mock_signature = MockSignature::new(consensus_hash, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb From df7e9e165718e103766d0c0d3515375356bc04cd Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:35:11 +0200 Subject: [PATCH 036/910] fix: update max_tx_sizes for regtest ops --- .../burnchains/bitcoin_regtest_controller.rs | 20 ++++++++++++------- testnet/stacks-node/src/config.rs | 14 +++++++++---- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d4aa528fbe4..8e1fafcdd39 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -57,7 +57,11 @@ use stacks_common::util::sleep_ms; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; +use crate::config::{ + BurnchainConfig, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE, +}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -950,7 +954,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_TRANSFER_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( Transaction { @@ -1032,7 +1036,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_DELEGATE_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1110,7 +1114,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_VOTE_AGG_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1204,9 +1208,11 @@ impl BitcoinRegtestController { signer: &mut BurnchainOpSigner, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 280; + let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; + + let max_tx_size_any_op = 380; + let output_amt = DUST_UTXO_LIMIT + max_tx_size_any_op * get_satoshis_per_byte(&self.config); - let output_amt = DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = self.prepare_tx(epoch_id, &public_key, output_amt, None, None, 0)?; @@ -1271,7 +1277,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 250; + let max_tx_size = OP_TX_STACK_STX_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f52c5d6ba90..2bfef69e906 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -49,10 +49,16 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use crate::chain_data::MinerStats; pub const DEFAULT_SATS_PER_VB: u64 = 50; +pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; +pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 350; +pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_DELEGATE_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; +pub const OP_TX_PRE_STACKS_ESTIM_SIZE: u64 = 280; +pub const OP_TX_STACK_STX_ESTIM_SIZE: u64 = 250; + const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; -const LEADER_KEY_TX_ESTIM_SIZE: u64 = 290; -const BLOCK_COMMIT_TX_ESTIM_SIZE: u64 = 350; const INV_REWARD_CYCLES_TESTNET: u64 = 6; #[derive(Clone, Deserialize, Default, Debug)] @@ -1427,8 +1433,8 @@ impl BurnchainConfig { poll_time_secs: 10, // TODO: this is a testnet specific value. satoshis_per_byte: DEFAULT_SATS_PER_VB, max_rbf: DEFAULT_MAX_RBF_RATE, - leader_key_tx_estimated_size: LEADER_KEY_TX_ESTIM_SIZE, - block_commit_tx_estimated_size: BLOCK_COMMIT_TX_ESTIM_SIZE, + leader_key_tx_estimated_size: OP_TX_LEADER_KEY_ESTIM_SIZE, + block_commit_tx_estimated_size: OP_TX_BLOCK_COMMIT_ESTIM_SIZE, rbf_fee_increment: DEFAULT_RBF_FEE_RATE_INCREMENT, first_burn_block_height: None, first_burn_block_timestamp: None, From eb80166aa8e62af6f0f24f8395f24e0e8730723c Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:47:24 +0200 Subject: [PATCH 037/910] refactor: update estimated tx size consts --- .../burnchains/bitcoin_regtest_controller.rs | 8 ++++---- testnet/stacks-node/src/config.rs | 18 ++++++++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 8e1fafcdd39..7de1d09dceb 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -58,9 +58,9 @@ use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; use crate::config::{ - BurnchainConfig, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, - OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, - OP_TX_VOTE_AGG_ESTIM_SIZE, + BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, + OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, + OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have @@ -1210,7 +1210,7 @@ impl BitcoinRegtestController { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; - let max_tx_size_any_op = 380; + let max_tx_size_any_op = OP_TX_ANY_ESTIM_SIZE; let output_amt = DUST_UTXO_LIMIT + max_tx_size_any_op * get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 2bfef69e906..4eef0bbdd07 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -49,13 +49,23 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use crate::chain_data::MinerStats; pub const DEFAULT_SATS_PER_VB: u64 = 50; -pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; -pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 350; -pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; pub const OP_TX_DELEGATE_STACKS_ESTIM_SIZE: u64 = 230; -pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; +pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; pub const OP_TX_PRE_STACKS_ESTIM_SIZE: u64 = 280; pub const OP_TX_STACK_STX_ESTIM_SIZE: u64 = 250; +pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; + +pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( + OP_TX_BLOCK_COMMIT_ESTIM_SIZE, + OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_LEADER_KEY_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, + OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE +); const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; From 0468efd7d63ac7e72af95e4ff88196bf6c4b2692 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:51:43 +0200 Subject: [PATCH 038/910] chore: remove unused import --- .../src/burnchains/bitcoin_regtest_controller.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 7de1d09dceb..b09a71e5cfa 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -58,9 +58,9 @@ use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; use crate::config::{ - BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, - OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, - OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, + BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have From fda2e863af93ec3bbb65ecae7b067d41875bda1d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 15:56:20 -0500 Subject: [PATCH 039/910] test: multi-miner & multi-signer scenario * assert that both stacks-nodes have same chain height, and that they produced blocks in each bitcoin block of nakamoto * signers are distributed as event observers across 2 stacks-nodes --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 116 +++++++++++++---- testnet/stacks-node/src/tests/signer/v0.rs | 137 +++++++++++++++++++- 3 files changed, 224 insertions(+), 30 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b1e81a71128..2df168ee165 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -90,6 +90,7 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::bitcoind_forking_test + - tests::signer::v0::multiple_miners - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89ab..a4816138647 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -44,7 +44,7 @@ use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::types::chainstate::StacksAddress; -use stacks::util::secp256k1::MessageSignature; +use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -105,14 +105,26 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, ) -> Self { - Self::new_with_config_modifications(num_signers, initial_balances, wait_on_signers, |_| {}) + Self::new_with_config_modifications( + num_signers, + initial_balances, + wait_on_signers, + |_| {}, + |_| {}, + &[], + ) } - fn new_with_config_modifications ()>( + fn new_with_config_modifications< + F: FnMut(&mut SignerConfig) -> (), + G: FnMut(&mut NeonConfig) -> (), + >( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, wait_on_signers: Option, - modifier: F, + mut signer_config_modifier: F, + node_config_modifier: G, + btc_miner_pubkeys: &[Secp256k1PublicKey], ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) @@ -136,11 +148,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = build_signer_config_tomls( &signer_stacks_private_keys, &naka_conf.node.rpc_bind, Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. @@ -151,23 +162,45 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = (0..num_signers) - .into_iter() - .map(|i| { - info!("spawning signer"); - let mut signer_config = - SignerConfig::load_from_str(&signer_configs[i as usize]).unwrap(); - modifier(&mut signer_config); - SpawnedSigner::new(signer_config) - }) + ) + .into_iter() + .map(|toml| { + let mut signer_config = SignerConfig::load_from_str(&toml).unwrap(); + signer_config_modifier(&mut signer_config); + signer_config + }) + .collect(); + assert_eq!(signer_configs.len(), num_signers); + + let spawned_signers = signer_configs + .iter() + .cloned() + .map(SpawnedSigner::new) .collect(); // Setup the nodes and deploy the contract to it - let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); - let config = SignerConfig::load_from_str(&signer_configs[0]).unwrap(); - let stacks_client = StacksClient::from(&config); + let btc_miner_pubkeys = if btc_miner_pubkeys.is_empty() { + let pk = Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(); + &[pk] + } else { + btc_miner_pubkeys + }; + let node = setup_stx_btc_node( + naka_conf, + &signer_stacks_private_keys, + &signer_configs, + btc_miner_pubkeys, + node_config_modifier, + ); + let config = signer_configs.first().unwrap(); + let stacks_client = StacksClient::from(config); Self { running_nodes: node, @@ -294,6 +327,33 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], - signer_config_tomls: &[String], + signer_configs: &[SignerConfig], + btc_miner_pubkeys: &[Secp256k1PublicKey], + mut node_config_modifier: G, ) -> RunningNodes { // Spawn the endpoints for observing signers - for toml in signer_config_tomls { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - + for signer_config in signer_configs { naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), + endpoint: signer_config.endpoint.to_string(), events_keys: vec![ EventKeyType::StackerDBChunks, EventKeyType::BlockProposal, @@ -593,6 +653,8 @@ fn setup_stx_btc_node( } } } + node_config_modifier(&mut naka_conf); + info!("Make new BitcoinCoreController"); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -604,7 +666,7 @@ fn setup_stx_btc_node( let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); + btc_regtest_controller.bootstrap_chain_to_pks(201, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ef66eec523..9457a3d6b63 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -28,7 +29,7 @@ use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; +use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; @@ -44,6 +45,7 @@ use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; +use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, submit_tx, test_observer, @@ -611,6 +613,8 @@ fn forked_tenure_testing( // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; }, + |_| {}, + &[], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -799,11 +803,10 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(15)), - |_config| {}, ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -936,6 +939,134 @@ fn bitcoind_forking_test() { signer_test.shutdown(); } +#[test] +#[ignore] +fn multiple_miners() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let nakamoto_tenures = 20; + for _i in 0..nakamoto_tenures { + let _mined_block = signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + } + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + nakamoto_tenures); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From 568ada6ea870b31ea45f7f5913a763a1b18cfa80 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Jul 2024 14:15:48 -0400 Subject: [PATCH 040/910] Use SIP-18 structured data for mock signature, add peer info to mock signature struct, and update integration test to use burn block height Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + libsigner/src/v0/messages.rs | 194 ++++++++++++++++++--- stacks-signer/src/v0/signer.rs | 28 ++- testnet/stacks-node/src/tests/signer/v0.rs | 120 +++++-------- 4 files changed, 230 insertions(+), 113 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b1e81a71128..d301e227ce0 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -90,6 +90,7 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::bitcoind_forking_test + - tests::signer::v0::mock_sign_epoch_25 - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d4311f8aa0c..f16dd6d4ed7 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -34,16 +34,24 @@ use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::chainstate::stacks::StacksTransaction; +use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey, StacksPublicKey}; +use blockstack_lib::util_lib::signed_structured_data::{ + make_structured_data_domain, structured_data_message_hash, +}; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksPrivateKey, StacksPublicKey, +}; use clarity::types::PrivateKey; +use clarity::util::hash::Sha256Sum; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::Value; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha512_256}; @@ -231,25 +239,103 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } -/// A signer's mock signature across its last seen Stacks Consensus Hash. This is only used -/// by Epoch 2.5 signers to simulate the signing of a block for every sortition. +/// A snapshot of the signer view of the stacks node to be used for mock signing. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignData { + /// The stacks tip consensus hash at the time of the mock signature + pub stacks_tip_consensus_hash: ConsensusHash, + /// The stacks tip header hash at the time of the mock signature + pub stacks_tip: BlockHeaderHash, + /// The server version + pub server_version: String, + /// The burn block height that triggered the mock signature + pub burn_block_height: u64, + /// The burn block height of the peer view at the time of the mock signature. Note + /// that this may be different from the burn_block_height if the peer view is stale. + pub peer_burn_block_height: u64, + /// The POX consensus hash at the time of the mock signature + pub pox_consensus: ConsensusHash, + /// The chain id for the mock signature + pub chain_id: u32, +} + +impl MockSignData { + fn new(peer_view: RPCPeerInfoData, burn_block_height: u64, chain_id: u32) -> Self { + Self { + stacks_tip_consensus_hash: peer_view.stacks_tip_consensus_hash, + stacks_tip: peer_view.stacks_tip, + server_version: peer_view.server_version, + burn_block_height, + peer_burn_block_height: peer_view.burn_block_height, + pox_consensus: peer_view.pox_consensus, + chain_id, + } + } +} + +impl StacksMessageCodec for MockSignData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, self.stacks_tip_consensus_hash.as_bytes())?; + write_next(fd, &self.stacks_tip)?; + write_next(fd, &(self.server_version.as_bytes().len() as u8))?; + fd.write_all(self.server_version.as_bytes()) + .map_err(CodecError::WriteError)?; + write_next(fd, &self.burn_block_height)?; + write_next(fd, &self.peer_burn_block_height)?; + write_next(fd, &self.pox_consensus)?; + write_next(fd, &self.chain_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let stacks_tip_consensus_hash = read_next::(fd)?; + let stacks_tip = read_next::(fd)?; + let len_byte: u8 = read_next(fd)?; + let mut bytes = vec![0u8; len_byte as usize]; + fd.read_exact(&mut bytes).map_err(CodecError::ReadError)?; + // must encode a valid string + let server_version = String::from_utf8(bytes).map_err(|_e| { + CodecError::DeserializeError( + "Failed to parse server version name: could not contruct from utf8".to_string(), + ) + })?; + let burn_block_height = read_next::(fd)?; + let peer_burn_block_height = read_next::(fd)?; + let pox_consensus = read_next::(fd)?; + let chain_id = read_next::(fd)?; + Ok(Self { + stacks_tip_consensus_hash, + stacks_tip, + server_version, + burn_block_height, + peer_burn_block_height, + pox_consensus, + chain_id, + }) + } +} + +/// A mock signature for the stacks node to be used for mock signing. +/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MockSignature { - /// The signature across the stacks consensus hash + /// The signature of the mock signature signature: MessageSignature, - /// The block hash that the signature is across - pub stacks_consensus_hash: ConsensusHash, + /// The data that was signed across + pub sign_data: MockSignData, } impl MockSignature { - /// Create a new mock signature with the provided stacks consensus hash and private key + /// Create a new mock sign data struct from the provided peer info, burn block height, chain id, and private key. pub fn new( - stacks_consensus_hash: ConsensusHash, + peer_view: RPCPeerInfoData, + burn_block_height: u64, + chain_id: u32, stacks_private_key: &StacksPrivateKey, ) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: MockSignData::new(peer_view, burn_block_height, chain_id), }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -257,16 +343,43 @@ impl MockSignature { } /// The signature hash for the mock signature - pub fn signature_hash(&self) -> Result { - let mut hasher = Sha512_256::new(); - let fd = &mut hasher; - write_next(fd, &self.stacks_consensus_hash)?; - Ok(Sha512Trunc256Sum::from_hasher(hasher)) + pub fn signature_hash(&self) -> Sha256Sum { + let domain_tuple = + make_structured_data_domain("mock-signer", "1.0.0", self.sign_data.chain_id); + let data_tuple = Value::Tuple( + TupleData::from_data(vec![ + ( + "stacks-tip-consensus-hash".into(), + Value::buff_from(self.sign_data.stacks_tip_consensus_hash.as_bytes().into()) + .unwrap(), + ), + ( + "stacks-tip".into(), + Value::buff_from(self.sign_data.stacks_tip.as_bytes().into()).unwrap(), + ), + ( + "server-version".into(), + Value::string_ascii_from_bytes(self.sign_data.server_version.clone().into()) + .unwrap(), + ), + ( + "burn-block-height".into(), + Value::UInt(self.sign_data.burn_block_height.into()), + ), + ( + "pox-consensus".into(), + Value::buff_from(self.sign_data.pox_consensus.as_bytes().into()).unwrap(), + ), + ]) + .expect("Error creating signature hash"), + ); + structured_data_message_hash(data_tuple, domain_tuple) } + /// Sign the mock signature and set the internal signature field fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { - let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; - self.signature = private_key.sign(&signature_hash.0)?; + let signature_hash = self.signature_hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; Ok(()) } /// Verify the mock signature against the provided public key @@ -274,7 +387,7 @@ impl MockSignature { if self.signature == MessageSignature::empty() { return Ok(false); } - let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + let signature_hash = self.signature_hash(); public_key .verify(&signature_hash.0, &self.signature) .map_err(|e| e.to_string()) @@ -284,16 +397,16 @@ impl MockSignature { impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; - write_next(fd, &self.stacks_consensus_hash)?; + self.sign_data.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; - let stacks_consensus_hash = read_next::(fd)?; + let sign_data = read_next::(fd)?; Ok(Self { signature, - stacks_consensus_hash, + sign_data, }) } } @@ -591,6 +704,7 @@ mod test { TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; + use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; @@ -708,6 +822,27 @@ mod test { assert_eq!(signer_message, deserialized_signer_message); } + fn random_mock_sign_data() -> MockSignData { + let stacks_tip_consensus_byte: u8 = thread_rng().gen(); + let stacks_tip_byte: u8 = thread_rng().gen(); + let pox_consensus_byte: u8 = thread_rng().gen(); + let chain_byte: u8 = thread_rng().gen_range(0..=1); + let chain_id = if chain_byte == 1 { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_MAINNET + }; + MockSignData { + stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), + stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), + server_version: "0.0.0".to_string(), + burn_block_height: thread_rng().next_u64(), + peer_burn_block_height: thread_rng().next_u64(), + pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + chain_id, + } + } + #[test] fn verify_sign_mock_signature() { let private_key = StacksPrivateKey::new(); @@ -716,11 +851,9 @@ mod test { let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let byte: u8 = thread_rng().gen(); - let stacks_consensus_hash = ConsensusHash([byte; 20]); let mut mock_signature = MockSignature { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: random_mock_sign_data(), }; assert!(!mock_signature .verify(&public_key) @@ -740,15 +873,22 @@ mod test { #[test] fn serde_mock_signature() { - let byte: u8 = thread_rng().gen(); - let stacks_consensus_hash = ConsensusHash([byte; 20]); let mock_signature = MockSignature { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: random_mock_sign_data(), }; let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) .expect("Failed to deserialize MockSignature"); assert_eq!(mock_signature, deserialized_signature); } + + #[test] + fn serde_sign_data() { + let sign_data = random_mock_sign_data(); + let serialized_data = sign_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize MockSignData"); + assert_eq!(sign_data, deserialized_data); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 71caf8bf51f..8a0d4772e21 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,6 +16,7 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; @@ -171,8 +172,7 @@ impl SignerTrait for Signer { if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { if self.reward_cycle == current_reward_cycle { // We are in epoch 2.5, so we should mock mine to prove we are still alive. - debug!("Mock signing for burn block {burn_height:?}"); - self.mock_sign(stacks_client); + self.mock_sign(*burn_height, stacks_client); } }; } @@ -473,14 +473,26 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_sign(&mut self, stacks_client: &StacksClient) { - let Ok(peer_info) = stacks_client.get_peer_info() else { - warn!("{self}: Failed to get peer info. Cannot mock mine."); + fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { + let Ok(peer_view) = stacks_client.get_peer_info() else { + warn!("{self}: Failed to get peer info. Cannot mock sign."); return; }; - let consensus_hash = peer_info.stacks_tip_consensus_hash; - debug!("Mock signing using stacks tip {consensus_hash:?}"); - let mock_signature = MockSignature::new(consensus_hash, &self.private_key); + let chain_id = if self.mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + debug!("Mock signing for burn block {burn_block_height:?}"; + "stacks_tip_consensus_hash" => ?peer_view.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_view.stacks_tip.clone(), + "peer_burn_block_height" => peer_view.burn_block_height, + "pox_consensus" => ?peer_view.pox_consensus.clone(), + "server_version" => peer_view.server_version.clone(), + "chain_id" => chain_id + ); + let mock_signature = + MockSignature::new(peer_view, burn_block_height, chain_id, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a8491c46382..410942f033c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -165,8 +165,9 @@ impl SignerTest { .wrapping_add(reward_cycle_len) .wrapping_add(1); - let next_reward_cycle_boundary = - epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary + .wrapping_add(reward_cycle_len) + .saturating_sub(1); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -228,7 +229,11 @@ impl SignerTest { &self.running_nodes.conf, ); - info!("Ready to mine the first Epoch 2.5 reward cycle!"); + let current_burn_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + info!("At burn block height {current_burn_block_height}. Ready to mine the first Epoch 2.5 reward cycle!"); } /// Run the test until the epoch 3 boundary @@ -1398,8 +1403,8 @@ fn retry_on_timeout() { #[test] #[ignore] -/// This test checks that the miner will retry when signature collection times out. -fn mock_mine_epoch_25() { +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1452,68 +1457,44 @@ fn mock_mine_epoch_25() { .map(|id| id.0) .collect(); assert_eq!(signer_slot_ids.len(), num_signers); - - // Give the peer info some time to update - let poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .burn_block_height - + 1 - < signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - { - std::thread::sleep(Duration::from_secs(1)); - assert!( - poll_time.elapsed() <= Duration::from_secs(15), - "Timed out waiting for peer info to update" - ); - } // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition let main_poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .burn_block_height - + 1 - < epoch_3_start_height - { - let old_consensus_hash = signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash; + let mut current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + while current_burn_block_height + 1 < epoch_3_start_height { + current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let current_reward_cycle = signer_test.get_current_reward_cycle(); + if current_reward_cycle != reward_cycle { + debug!("Rolling over reward cycle to {:?}", current_reward_cycle); + reward_cycle = current_reward_cycle; + stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + signer_slot_ids = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + } next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || Ok(true), ) .unwrap(); - let peer_poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash - == old_consensus_hash - { - std::thread::sleep(Duration::from_millis(100)); - assert!( - peer_poll_time.elapsed() < Duration::from_secs(5), - "Timed out waiting for peer info to update" - ); - } - let expected_consensus_hash = signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash; let mut mock_signatures = vec![]; let mock_poll_time = Instant::now(); + debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); while mock_signatures.len() != num_signers { std::thread::sleep(Duration::from_millis(100)); let messages: Vec = StackerDB::get_messages( @@ -1525,9 +1506,10 @@ fn mock_mine_epoch_25() { .expect("Failed to get message from stackerdb"); for message in messages { if let SignerMessage::MockSignature(mock_signature) = message { - debug!("MOCK SIGNATURE: {:?}", mock_signature); - if mock_signature.stacks_consensus_hash == expected_consensus_hash { - mock_signatures.push(mock_signature); + if mock_signature.sign_data.burn_block_height == current_burn_block_height { + if !mock_signatures.contains(&mock_signature) { + mock_signatures.push(mock_signature); + } } } } @@ -1536,24 +1518,6 @@ fn mock_mine_epoch_25() { "Failed to find mock signatures within timeout" ); } - let current_reward_cycle = signer_test.get_current_reward_cycle(); - if current_reward_cycle != reward_cycle { - debug!("Rolling over reward cycle to {:?}", current_reward_cycle); - reward_cycle = current_reward_cycle; - stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - signer_slot_ids = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); - } assert!( main_poll_time.elapsed() <= Duration::from_secs(45), "Timed out waiting to advance epoch 3.0" From 6acd51086a1f27585fdfa775d9ba832eb490da3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:19:37 -0400 Subject: [PATCH 041/910] fix: test_debug --> debug --- stackslib/src/net/chat.rs | 151 ++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 78 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 1e1fa79f5c9..8d8dc7ca5c9 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -735,10 +735,9 @@ impl ConversationP2P { } }; if bhh != their_burn_header_hash { - test_debug!( + debug!( "Burn header hash mismatch in preamble: {} != {}", - bhh, - their_burn_header_hash + bhh, their_burn_header_hash ); return true; } @@ -764,18 +763,16 @@ impl ConversationP2P { if my_epoch <= remote_epoch { // remote node supports same epochs we do - test_debug!( + debug!( "Remote peer has epoch {}, which is newer than our epoch {}", - remote_epoch, - my_epoch + remote_epoch, my_epoch ); return true; } - test_debug!( + debug!( "Remote peer has old network version {} (epoch {})", - remote_peer_version, - remote_epoch + remote_peer_version, remote_epoch ); // what epoch are we in? @@ -786,10 +783,9 @@ impl ConversationP2P { if cur_epoch <= remote_epoch { // epoch shift hasn't happened yet, and this peer supports the current epoch - test_debug!( + debug!( "Remote peer has epoch {} and current epoch is {}, so still valid", - remote_epoch, - cur_epoch + remote_epoch, cur_epoch ); return true; } @@ -828,11 +824,9 @@ impl ConversationP2P { } if (msg.preamble.peer_version & 0xff000000) != (self.version & 0xff000000) { // major version mismatch - test_debug!( + debug!( "{:?}: Preamble invalid: wrong peer version: {:x} != {:x}", - &self, - msg.preamble.peer_version, - self.version + &self, msg.preamble.peer_version, self.version ); return Err(net_error::InvalidMessage); } @@ -1366,11 +1360,6 @@ impl ConversationP2P { }; if let Some(stackerdb_accept) = stackerdb_accept { - test_debug!( - "{} =?= {}", - &stackerdb_accept.rc_consensus_hash, - &burnchain_view.rc_consensus_hash - ); if stackerdb_accept.rc_consensus_hash == burnchain_view.rc_consensus_hash { // remote peer is in the same reward cycle as us. self.update_from_stacker_db_handshake_data(stackerdb_accept); @@ -1457,7 +1446,7 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_chat_neighbors { // never report neighbors if this is disabled by a test - test_debug!( + debug!( "{:?}: Neighbor crawl is disabled; reporting 0 neighbors", &local_peer ); @@ -1694,7 +1683,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1759,11 +1748,9 @@ impl ConversationP2P { e })?; - test_debug!( + debug!( "Reply NakamotoInv for {} (rc {}): {:?}", - &get_nakamoto_inv.consensus_hash, - reward_cycle, - &nakamoto_inv + &get_nakamoto_inv.consensus_hash, reward_cycle, &nakamoto_inv ); Ok(StacksMessageType::NakamotoInv(nakamoto_inv)) @@ -1798,7 +1785,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1837,10 +1824,9 @@ impl ConversationP2P { Ok(Some(sn)) => { if !sn.pox_valid { // invalid consensus hash - test_debug!( + debug!( "{:?}: Snapshot {:?} is not on a valid PoX fork", - local_peer, - sn.burn_header_hash + local_peer, sn.burn_header_hash ); return Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1852,7 +1838,7 @@ impl ConversationP2P { % (burnchain.pox_constants.reward_cycle_length as u64) != 1 { - test_debug!( + debug!( "{:?}: block height ({} - {}) % {} != 1", local_peer, sn.block_height, @@ -1896,10 +1882,9 @@ impl ConversationP2P { } } Ok(None) | Err(db_error::NotFoundError) => { - test_debug!( + debug!( "{:?}: snapshot for consensus hash {} not found", - local_peer, - getpoxinv.consensus_hash + local_peer, getpoxinv.consensus_hash ); Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1999,9 +1984,29 @@ impl ConversationP2P { ) { Ok(Some(chunk)) => chunk, Ok(None) => { - // request for a stale chunk + // TODO: this is racey + if let Ok(Some(actual_version)) = + stacker_dbs.get_slot_version(&getchunk.contract_id, getchunk.slot_id) + { + // request for a stale chunk + debug!("{:?}: NACK StackerDBGetChunk; version mismatch for requested slot {}.{} for {}. Expected {}", local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id, actual_version); + if actual_version > getchunk.slot_version { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::StaleVersion, + ))); + } else { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::FutureVersion, + ))); + } + } + // if we hit a DB error, just treat it as if the DB doesn't exist + debug!( + "{:?}: NACK StackerDBGetChunk; unloadable slot {}.{} for {}", + local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id + ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleVersion, + NackErrorCodes::NoSuchDB, ))); } Err(e) => { @@ -2435,7 +2440,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: received {} bytes", self, total_recved); + debug!("{:?}: received {} bytes", self, total_recved); Ok(total_recved) } @@ -2463,7 +2468,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: sent {} bytes", self, total_sent); + debug!("{:?}: sent {} bytes", self, total_sent); Ok(total_sent) } @@ -2554,12 +2559,12 @@ impl ConversationP2P { Ok(handshake_opt) } StacksMessageType::HandshakeAccept(ref data) => { - test_debug!("{:?}: Got HandshakeAccept", &self); + debug!("{:?}: Got HandshakeAccept", &self); self.handle_handshake_accept(network.get_chain_view(), &msg.preamble, data, None) .and_then(|_| Ok(None)) } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { - test_debug!("{:?}: Got StackerDBHandshakeAccept", &self); + debug!("{:?}: Got StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2569,21 +2574,21 @@ impl ConversationP2P { .and_then(|_| Ok(None)) } StacksMessageType::Ping(_) => { - test_debug!("{:?}: Got Ping", &self); + debug!("{:?}: Got Ping", &self); // consume here if unsolicited consume = true; self.handle_ping(network.get_chain_view(), msg) } StacksMessageType::Pong(_) => { - test_debug!("{:?}: Got Pong", &self); + debug!("{:?}: Got Pong", &self); Ok(None) } StacksMessageType::NatPunchRequest(ref nonce) => { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchRequest({})", &self, nonce); + debug!("{:?}: Got NatPunchRequest({})", &self, nonce); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2593,11 +2598,11 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); + debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); Ok(None) } _ => { - test_debug!( + debug!( "{:?}: Got a data-plane message (type {})", &self, msg.payload.get_message_name() @@ -2626,14 +2631,14 @@ impl ConversationP2P { let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); - test_debug!("{:?}: Got unauthenticated Handshake", &self); + debug!("{:?}: Got unauthenticated Handshake", &self); let (reply_opt, handled) = self.handle_handshake(network, msg, false, ibd)?; consume = handled; Ok(reply_opt) } StacksMessageType::HandshakeAccept(ref data) => { if solicited { - test_debug!("{:?}: Got unauthenticated HandshakeAccept", &self); + debug!("{:?}: Got unauthenticated HandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2642,7 +2647,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); + debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); // don't update stats or state, and don't pass back consume = true; @@ -2651,7 +2656,7 @@ impl ConversationP2P { } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { if solicited { - test_debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); + debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2660,7 +2665,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!( + debug!( "{:?}: Unsolicited unauthenticated StackerDBHandshakeAccept", &self ); @@ -2671,14 +2676,14 @@ impl ConversationP2P { } } StacksMessageType::HandshakeReject => { - test_debug!("{:?}: Got unauthenticated HandshakeReject", &self); + debug!("{:?}: Got unauthenticated HandshakeReject", &self); // don't NACK this back just because we were rejected. // But, it's okay to forward this back (i.e. don't consume). Ok(None) } StacksMessageType::Nack(_) => { - test_debug!("{:?}: Got unauthenticated Nack", &self); + debug!("{:?}: Got unauthenticated Nack", &self); // don't NACK back. // But, it's okay to forward this back (i.e. don't consume). @@ -2688,10 +2693,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchRequest({})", - &self, - *nonce + &self, *nonce ); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2701,10 +2705,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchReply({})", - &self, - _m.nonce + &self, _m.nonce ); // it's okay to forward this back (i.e. don't consume) @@ -2939,7 +2942,7 @@ impl ConversationP2P { ibd: bool, ) -> Result, net_error> { let num_inbound = self.connection.inbox_len(); - test_debug!("{:?}: {} messages pending", &self, num_inbound); + debug!("{:?}: {} messages pending", &self, num_inbound); let mut unsolicited = vec![]; for _ in 0..num_inbound { @@ -2972,7 +2975,7 @@ impl ConversationP2P { if let Some(mut reply) = reply_opt.take() { // handler generated a reply. // send back this message to the remote peer. - test_debug!( + debug!( "{:?}: Send control-plane reply type {}", &self, reply.payload.get_message_name() @@ -2988,11 +2991,9 @@ impl ConversationP2P { let _relayers = format!("{:?}", &msg.relayers); let _seq = msg.request_id(); - test_debug!( + debug!( "{:?}: Received message {}, relayed by {}", - &self, - &_msgtype, - &_relayers + &self, &_msgtype, &_relayers ); // Is there someone else waiting for this message? If so, pass it along. @@ -3004,33 +3005,27 @@ impl ConversationP2P { &self, _msgtype, _seq ); } else { - test_debug!( + debug!( "{:?}: Try handling message (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); if let Some(msg) = self.handle_data_message(network, sortdb, chainstate, msg)? { // this message was unsolicited - test_debug!( + debug!( "{:?}: Did not handle message (type {} seq {}); passing upstream", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); unsolicited.push(msg); } else { // expected and handled the message - test_debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); + debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); } } } else { // no one was waiting for this reply, so just drop it - test_debug!( + debug!( "{:?}: Fulfilled pending message request (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); } } From 5df7845da13436664df757168868f8efe7d4ce10 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:19:51 -0400 Subject: [PATCH 042/910] fix: dead/broken peers were mixed up --- stackslib/src/net/inv/epoch2x.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 62a5d024706..b3092d8f121 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -2629,7 +2629,7 @@ impl PeerNetwork { } // synchronize peer block inventories - let (done, throttled, dead_neighbors, broken_neighbors) = + let (done, throttled, broken_neighbors, dead_neighbors) = self.sync_inventories_epoch2x(sortdb, ibd); // disconnect and ban broken peers From afcfa00bfc9a5d8d7bc291add035d5f50bd000b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:11 -0400 Subject: [PATCH 043/910] chore: new nack code for requesting a chunk from the future --- stackslib/src/net/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index e836bdfec21..da323be3e75 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1044,6 +1044,7 @@ pub mod NackErrorCodes { pub const NoSuchDB: u32 = 6; pub const StaleVersion: u32 = 7; pub const StaleView: u32 = 8; + pub const FutureVersion: u32 = 9; } #[derive(Debug, Clone, PartialEq)] From d74772ad0387c9c59c557ac899db4c591cd8b54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:32 -0400 Subject: [PATCH 044/910] chore: log stackerdb contract and inventory --- stackslib/src/net/stackerdb/mod.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 847363b2e31..d310998a194 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -423,6 +423,8 @@ pub struct StackerDBSync { /// whether or not we should immediately re-fetch chunks because we learned about new chunks /// from our peers when they replied to our chunk-pushes with new inventory state need_resync: bool, + /// whether or not the fetched inventory was determined to be stale + stale_inv: bool, /// Track stale neighbors pub(crate) stale_neighbors: HashSet, /// How many attempted connections have been made in the last pass (gets reset) @@ -505,7 +507,9 @@ impl PeerNetwork { Err(e) => { debug!( "{:?}: failed to get chunk versions for {}: {:?}", - self.local_peer, contract_id, &e + self.get_local_peer(), + contract_id, + &e ); // most likely indicates that this DB doesn't exist @@ -514,6 +518,14 @@ impl PeerNetwork { }; let num_outbound_replicas = self.count_outbound_stackerdb_replicas(contract_id) as u32; + + debug!( + "{:?}: inventory for {} has {} outbound replicas; versions are {:?}", + self.get_local_peer(), + contract_id, + num_outbound_replicas, + &slot_versions + ); StacksMessageType::StackerDBChunkInv(StackerDBChunkInvData { slot_versions, num_outbound_replicas, From bc8311611f0c17a35d77fb3c0a3329675e19ae55 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:51 -0400 Subject: [PATCH 045/910] fix: don't disconnect a replica if it nacked us for asking for a stale version; instead, immediately re-sync --- stackslib/src/net/stackerdb/sync.rs | 45 ++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 53a1f67c469..8444ed5e551 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -71,6 +71,7 @@ impl StackerDBSync { total_pushed: 0, last_run_ts: 0, need_resync: false, + stale_inv: false, stale_neighbors: HashSet::new(), num_connections: 0, num_attempted_connections: 0, @@ -212,6 +213,7 @@ impl StackerDBSync { self.write_freq = config.write_freq; self.need_resync = false; + self.stale_inv = false; self.last_run_ts = get_epoch_time_secs(); self.state = StackerDBSyncState::ConnectBegin; @@ -256,7 +258,7 @@ impl StackerDBSync { .get_slot_write_timestamps(&self.smart_contract_id)?; if local_slot_versions.len() != local_write_timestamps.len() { - let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}); abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len()); + let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}) for {}; abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len(), &self.smart_contract_id); warn!("{}", &msg); return Err(net_error::Transient(msg)); } @@ -270,12 +272,13 @@ impl StackerDBSync { let write_ts = local_write_timestamps[i]; if write_ts + self.write_freq > now { debug!( - "{:?}: Chunk {} was written too frequently ({} + {} >= {}), so will not fetch chunk", + "{:?}: Chunk {} was written too frequently ({} + {} >= {}) in {}, so will not fetch chunk", network.get_local_peer(), i, write_ts, self.write_freq, - now + now, + &self.smart_contract_id, ); continue; } @@ -343,10 +346,11 @@ impl StackerDBSync { schedule.reverse(); debug!( - "{:?}: Will request up to {} chunks for {}", + "{:?}: Will request up to {} chunks for {}. Schedule: {:?}", network.get_local_peer(), &schedule.len(), &self.smart_contract_id, + &schedule ); Ok(schedule) } @@ -520,12 +524,13 @@ impl StackerDBSync { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. debug!( - "{:?}: peer {:?} has a newer version of slot {} ({} < {})", + "{:?}: peer {:?} has a newer version of slot {} ({} < {}) in {}", _network.get_local_peer(), &naddr, old_slot_id, old_version, - new_inv.slot_versions[old_slot_id] + new_inv.slot_versions[old_slot_id], + &self.smart_contract_id, ); resync = true; break; @@ -833,9 +838,10 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us (on {}) with code {}", &network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); self.connected_replicas.remove(&naddr); @@ -851,9 +857,10 @@ impl StackerDBSync { } }; debug!( - "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}", + "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", network.get_local_peer(), - &naddr + &naddr, + &chunk_inv_opt ); if let Some(chunk_inv) = chunk_inv_opt { @@ -969,14 +976,17 @@ impl StackerDBSync { StacksMessageType::StackerDBChunk(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk (on {}) with code {}", network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); + } else if data.error_code == NackErrorCodes::StaleVersion { + // try again immediately, without throttling + self.stale_inv = true; } continue; } @@ -1079,7 +1089,6 @@ impl StackerDBSync { &selected_neighbor, &e ); - self.connected_replicas.remove(&selected_neighbor); continue; } @@ -1119,7 +1128,6 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); } @@ -1279,8 +1287,19 @@ impl StackerDBSync { } } StackerDBSyncState::Finished => { + let stale_inv = self.stale_inv; + let result = self.reset(Some(network), config); self.state = StackerDBSyncState::ConnectBegin; + + if stale_inv { + debug!( + "{:?}: immediately retry StackerDB sync on {} due to stale inventory", + network.get_local_peer(), + &self.smart_contract_id + ); + self.wakeup(); + } return Ok(Some(result)); } }; From fb16102413695cc124be2a0102da81cd9fcb5888 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:22 -0400 Subject: [PATCH 046/910] fix: remove very noisy debug message --- testnet/stacks-node/src/config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f52c5d6ba90..bdf3bd4c3dc 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2125,7 +2125,6 @@ impl NodeConfig { let contract_name = NakamotoSigners::make_signers_db_name(signer_set, message_id); let contract_id = boot_code_id(contract_name.as_str(), is_mainnet); if !self.stacker_dbs.contains(&contract_id) { - debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); self.stacker_dbs.push(contract_id); } } @@ -2135,7 +2134,6 @@ impl NodeConfig { pub fn add_miner_stackerdb(&mut self, is_mainnet: bool) { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); if !self.stacker_dbs.contains(&miners_contract_id) { - debug!("A miner/stacker must subscribe to the {miners_contract_id} stacker db contract. Forcibly subscribing..."); self.stacker_dbs.push(miners_contract_id); } } From 3835a184772a76477ed64fa61a561171a35267ca Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:35 -0400 Subject: [PATCH 047/910] chore: log stackerdb events --- testnet/stacks-node/src/event_dispatcher.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a9..06bde17d406 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1277,6 +1277,11 @@ impl EventDispatcher { contract_id: QualifiedContractIdentifier, modified_slots: Vec, ) { + debug!( + "event_dispatcher: New StackerDB chunk events for {}: {:?}", + contract_id, modified_slots + ); + let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); @@ -1294,7 +1299,7 @@ impl EventDispatcher { if let Some(channel) = interested_receiver { if let Err(send_err) = channel.send(event) { warn!( - "Failed to send StackerDB event to WSTS coordinator channel. Miner thread may have exited."; + "Failed to send StackerDB event to signer coordinator channel. Miner thread may have exited."; "err" => ?send_err ); } From 99d55364ae0281dfc509f9abb4548c743dd24878 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:44 -0400 Subject: [PATCH 048/910] feat: allow other threads to raise the relayer's initiative to commit --- testnet/stacks-node/src/globals.rs | 33 ++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b0f338032a5..675a7474809 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -69,6 +69,9 @@ pub struct Globals { /// previously-selected best tips /// maps stacks height to tip candidate previous_best_tips: Arc>>, + /// Initiative flag. + /// Raised when the main loop should wake up and do something. + initiative: Arc>, } // Need to manually implement Clone, because [derive(Clone)] requires @@ -90,6 +93,7 @@ impl Clone for Globals { start_mining_height: self.start_mining_height.clone(), estimated_winning_probs: self.estimated_winning_probs.clone(), previous_best_tips: self.previous_best_tips.clone(), + initiative: self.initiative.clone(), } } } @@ -119,6 +123,7 @@ impl Globals { start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())), + initiative: Arc::new(Mutex::new(false)), } } @@ -428,4 +433,32 @@ impl Globals { } } } + + /// Raise the initiative flag + pub fn raise_initiative(&self) { + match self.initiative.lock() { + Ok(mut initiative) => { + *initiative = true; + } + Err(_e) => { + error!("FATAL: failed to lock initiative"); + panic!(); + } + } + } + + /// Clear the initiative flag and return its value + pub fn take_initiative(&self) -> bool { + match self.initiative.lock() { + Ok(mut initiative) => { + let ret = *initiative; + *initiative = false; + ret + } + Err(_e) => { + error!("FATAL: failed to lock initiative"); + panic!(); + } + } + } } From ef38358f50395ffcf22b6f2bec04a98c1ddf8f19 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:22:20 -0400 Subject: [PATCH 049/910] fix: new burnchain block means new initiative to commit --- testnet/stacks-node/src/nakamoto_node.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index c57d630a583..d9f44cc67bd 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -269,7 +269,10 @@ impl StacksNode { snapshot.parent_burn_header_hash, snapshot.winning_stacks_block_hash, )) - .map_err(|_| Error::ChannelClosed) + .map_err(|_| Error::ChannelClosed)?; + + self.globals.raise_initiative(); + Ok(()) } /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp From d1374d9a698d2d41e92be1d1e95e5692461b1221 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:22:35 -0400 Subject: [PATCH 050/910] fix: poll for 1s (since that's the stackerdb minimum sync time, and thus its maximum time between steps), and raise relayer initiative when there's a new network result --- testnet/stacks-node/src/nakamoto_node/peer.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index dc060e06b6d..1fd53256237 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -190,7 +190,7 @@ impl PeerThread { info!("`PeerNetwork::bind()` skipped, already bound"); } - let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + let poll_timeout = cmp::min(1000, config.miner.first_attempt_time_ms / 2); PeerThread { config, @@ -347,7 +347,11 @@ impl PeerThread { } } } else { - debug!("P2P: Dispatched result to Relayer!"); + debug!( + "P2P: Dispatched result to Relayer! {} results remaining", + self.results_with_data.len() + ); + self.globals.raise_initiative(); } } From d37f2bc28bd7696fb8c69ba9c63db65b896ed605 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 30 Jul 2024 21:22:40 +0300 Subject: [PATCH 051/910] move to debug block proposal log --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b38467d4549..a4716e4c800 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - info!("{self}: Received a block proposal: {block_proposal:?}"); + debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( From a071614565324a3c1e928658b0d0584f785ab236 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:23:13 -0400 Subject: [PATCH 052/910] chore: process a new initiative based on either a timeout, or on another thread's prompting --- .../stacks-node/src/nakamoto_node/relayer.rs | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e94..f9edd3db1ae 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -48,9 +48,9 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFPublicKey; +use stacks_common::util::{get_epoch_time_ms, sleep_ms}; use super::miner::MinerReason; use super::{ @@ -1065,8 +1065,11 @@ impl RelayerThread { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); + while self.globals.keep_running() { - let directive = if Instant::now() >= self.next_initiative { + let raised_initiative = self.globals.take_initiative(); + let timed_out = Instant::now() >= self.next_initiative; + let directive = if raised_initiative || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1074,19 +1077,19 @@ impl RelayerThread { None }; - let Some(timeout) = self.next_initiative.checked_duration_since(Instant::now()) else { - // next_initiative timeout occurred, so go to next loop iteration. - continue; - }; - let directive = if let Some(directive) = directive { directive } else { - match relay_rcv.recv_timeout(timeout) { + match relay_rcv.recv_timeout(Duration::from_millis( + self.config.node.next_initiative_delay, + )) { Ok(directive) => directive, - // timed out, so go to next loop iteration - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, + Err(RecvTimeoutError::Timeout) => { + continue; + } + Err(RecvTimeoutError::Disconnected) => { + break; + } } }; From 1a02c613195584d2893539d41b70b3d7179d14ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:23:34 -0400 Subject: [PATCH 053/910] chore: new burnchain block means new initiative --- testnet/stacks-node/src/run_loop/nakamoto.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 511b6c84b2d..3ecd4f1e7de 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -711,6 +711,7 @@ impl RunLoop { sortition_db_height ); last_tenure_sortition_height = sortition_db_height; + globals.raise_initiative(); } } } From 9e9aa77752703acd330479b83a2daf662778541d Mon Sep 17 00:00:00 2001 From: janniks Date: Wed, 31 Jul 2024 00:13:29 +0200 Subject: [PATCH 054/910] fix: only force change output for block commits --- .../src/burnchains/bitcoin_regtest_controller.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index b09a71e5cfa..39ef40490b4 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -872,6 +872,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1009,6 +1010,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1092,6 +1094,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1166,6 +1169,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1244,6 +1248,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1331,6 +1336,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1421,6 +1427,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + true, // only block commit op requires change output to exist )?; let serialized_tx = SerializedTx::new(tx.clone()); @@ -1691,6 +1698,7 @@ impl BitcoinRegtestController { fee_rate: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> Option<()> { // spend UTXOs in order by confirmations. Spend the least-confirmed UTXO first, and in the // event of a tie, spend the smallest-value UTXO first. @@ -1721,7 +1729,7 @@ impl BitcoinRegtestController { spent_in_outputs + min_tx_size * fee_rate + estimated_rbf, &mut utxos_cloned, signer, - true, + force_change_output, ); let serialized_tx = SerializedTx::new(tx_cloned); cmp::max(min_tx_size, serialized_tx.bytes.len() as u64) @@ -1738,7 +1746,7 @@ impl BitcoinRegtestController { spent_in_outputs + tx_size * fee_rate + rbf_fee, utxos_set, signer, - true, + force_change_output, ); signer.dispose(); Some(()) From 0a0c5a462e0eb2b448743e2d5570ffb925a488f4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 31 Jul 2024 00:43:34 -0400 Subject: [PATCH 055/910] chore: improve signer logging --- stacks-signer/src/v0/signer.rs | 5 ++++- stackslib/src/net/api/postblock_proposal.rs | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a4716e4c800..77f49bf9f9f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -445,7 +445,10 @@ impl Signer { } }; // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {response:?}"); + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + "signer_sighash" => %block_info.signer_signature_hash(), + ); match self .stackerdb .send_message_with_retry::(response.clone().into()) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 901720ee814..6c1d5526b5d 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -512,6 +512,15 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { .take() .ok_or(NetError::SendError("`block_proposal` not set".into()))?; + info!( + "Received block proposal request"; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_header_hash" => %block_proposal.block.header.block_hash(), + "height" => block_proposal.block.header.chain_length, + "tx_count" => block_proposal.block.txs.len(), + "parent_stacks_block_id" => %block_proposal.block.header.parent_block_id, + ); + let res = node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { if network.is_proposal_thread_running() { return Err(( From f505399d1c3df52471bf65bd627f7ada501fad8b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 10:57:27 -0400 Subject: [PATCH 056/910] WIP: add initial test outline with a bunch of TODOs Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 115 +++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5ebb8937175..fe6c4ff619e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -36,6 +36,7 @@ use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; +use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -1373,3 +1374,117 @@ fn empty_sortition() { } signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test asserts that signer set rollover works as expected. +/// Specifically, if a new set of signers are registered for an upcoming reward cycle, +/// old signers shut down operation and the new signers take over with the commencement of +/// the next reward cycle. +fn signer_set_rollover() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let new_num_signers = 5; + + let new_signer_private_keys: Vec<_> = (0..new_num_signers) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| tests::to_addr(sk)) + .collect(); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + let run_stamp = rand::random(); + + // Setup the new signers that will take over + let new_signer_configs = build_signer_config_tomls( + &new_signer_private_keys, + &signer_test.running_nodes.conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000), + ); + + let new_spawned_signers: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| { + info!("spawning signer"); + let mut signer_config = + SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); + + // TODO: may need to modify signer_test to not auto stack and delegate the way it does right now. I think it delegates for 12 reward cycles. and we should delegate only for one before transferring to the new signer set + + // TODO: Advance to the first reward cycle, stacking and delegating to the old signers beforehand + signer_test.boot_to_epoch_3(); + + // TODO: verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let old_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + // TODO: manually trigger a stacks transaction and verify that only OLD signer signatures are found in the signed block + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + // submit a tx so that the miner will mine an extra block + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + // TODO: verify the mined_block signatures against the OLD signer set (might need to update event to take vector of message signatures?) + + //TODO: advance to the next reward cycle, stacking and delegating to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); + let new_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + // submit a tx so that the miner will mine an extra block + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 1; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + // TODO: verify the mined_block signatures against the NEW signer set + + signer_test.shutdown(); + // TODO: shutdown the new signers as well +} From 71cb039f850b832cde54851e0a879eb9902a965b Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 29 Jul 2024 10:58:59 -0400 Subject: [PATCH 057/910] test: Add integration test for mock mining --- .github/workflows/bitcoin-tests.yml | 1 + .../stacks-node/src/nakamoto_node/miner.rs | 34 ++- .../stacks-node/src/nakamoto_node/relayer.rs | 22 +- .../src/tests/nakamoto_integrations.rs | 280 ++++++++++++++++++ 4 files changed, 321 insertions(+), 16 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5604efd7dd..b90a06c209e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -97,6 +97,7 @@ jobs: - tests::nakamoto_integrations::check_block_info - tests::nakamoto_integrations::check_block_info_rewards - tests::nakamoto_integrations::continue_tenure_extend + - tests::nakamoto_integrations::mock_mining # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 70a6f7b3e3b..06a6e37006d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -222,6 +222,32 @@ impl BlockMinerThread { // now, actually run this tenure loop { let new_block = loop { + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = SortitionDB::open( + &burn_db_path, + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + continue; + } + Err(e) => { + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); + } + } + } match self.mine_block(&stackerdbs) { Ok(x) => break Some(x), Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { @@ -401,6 +427,10 @@ impl BlockMinerThread { )); }; + if self.config.get_node_config(false).mock_mining { + return Ok((reward_set, Vec::new())); + } + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( @@ -411,10 +441,6 @@ impl BlockMinerThread { }, )?; - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 0d8567a95d7..be1ff546985 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -794,7 +794,7 @@ impl RelayerThread { fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to stop tenure: {e:?}"); return Ok(()); } debug!("Relayer: successfully stopped tenure."); @@ -867,7 +867,7 @@ impl RelayerThread { debug!("Relayer: successfully started new tenure."); } Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); + error!("Relayer: Failed to start new tenure: {e:?}"); } } Ok(()) @@ -879,13 +879,11 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let miner_instruction = - match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { - Ok(mi) => mi, - Err(_) => { - return false; - } - }; + let Ok(miner_instruction) = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash) + else { + return false; + }; match miner_instruction { MinerDirective::BeginTenure { @@ -901,7 +899,7 @@ impl RelayerThread { debug!("Relayer: successfully started new tenure."); } Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); + error!("Relayer: Failed to start new tenure: {e:?}"); } }, MinerDirective::ContinueTenure { new_burn_view } => { @@ -910,7 +908,7 @@ impl RelayerThread { debug!("Relayer: successfully handled continue tenure."); } Err(e) => { - error!("Relayer: Failed to continue tenure: {:?}", e); + error!("Relayer: Failed to continue tenure: {e:?}"); return false; } } @@ -920,7 +918,7 @@ impl RelayerThread { debug!("Relayer: successfully stopped tenure."); } Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to stop tenure: {e:?}"); } }, } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b15b83afb4..49000abc2c0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6589,3 +6589,283 @@ fn check_block_info_rewards() { run_loop_thread.join().unwrap(); } + +/// Test Nakamoto mock miner by booting a follower node +#[test] +#[ignore] +fn mock_mining() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut follower_conf = naka_conf.clone(); + follower_conf.node.mock_mining = true; + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + let Counters { + naka_mined_blocks: follower_naka_mined_blocks, + .. + } = follower_run_loop.counters(); + + let mock_mining_blocks_start = follower_naka_mined_blocks.load(Ordering::SeqCst); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + let follower_naka_mined_blocks_before = follower_naka_mined_blocks.load(Ordering::SeqCst); + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let mock_miner_timeout = Instant::now(); + while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before + { + if mock_miner_timeout.elapsed() >= Duration::from_secs(30) { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ); + } + thread::sleep(Duration::from_millis(100)); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + let expected_blocks_mined = (inter_blocks_per_tenure + 1) * tenure_count; + let expected_tip_height = block_height_pre_3_0 + expected_blocks_mined; + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, expected_tip_height, + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + // Check follower's mock miner + let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); + let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; + assert_eq!( + blocks_mock_mined, tenure_count, + "Should have mock mined `tenure_count` nakamoto blocks" + ); + + // wait for follower to reach the chain tip + loop { + sleep_ms(1000); + let follower_node_info = get_chain_info(&follower_conf); + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash() + { + break; + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} From c50dff82555df388279fc1accf87d4eef9269b02 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 11:51:44 -0500 Subject: [PATCH 058/910] chore: use 231 for default epoch25/epoch30 transition in int tests, customize for mock_signing --- .../src/tests/nakamoto_integrations.rs | 4 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3ab7248db6e..c2fd6245f2a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -167,13 +167,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 201, - end_height: 251, + end_height: 231, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 251, + start_height: 231, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 17c3fdf814d..98e2d64b551 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,8 +13,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::str::FromStr; use std::ops::Add; +use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -1566,6 +1566,8 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, + &[], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(20); @@ -1709,10 +1711,23 @@ fn mock_sign_epoch_25() { let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(5)), + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + &[], ); let epochs = signer_test From 7f6e541cb9dce67ba0111c03dcdb950781ef9188 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 14:24:39 -0400 Subject: [PATCH 059/910] Add some logs to mock sign checks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ad2a459b960..b645b46a73d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -169,12 +169,19 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { - if self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. - self.mock_sign(*burn_height, stacks_client); - } + let Ok(epoch) = stacks_client.get_node_epoch() else { + warn!("{self}: Failed to determine node epoch. Cannot mock sign."); + return; }; + debug!("{self}: Epoch 2.5 signer received a new burn block event."; + "burn_height" => burn_height, + "current_reward_cycle" => current_reward_cycle, + "epoch" => ?epoch + ); + if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + self.mock_sign(*burn_height, stacks_client); + } } } } From 5085bdad6d8b5611783093f426e8f41f159f7c09 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 15:19:59 -0400 Subject: [PATCH 060/910] Add error log when failing to determine node epoch in mock sign Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b645b46a73d..574c4d8df9c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -169,9 +169,12 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - let Ok(epoch) = stacks_client.get_node_epoch() else { - warn!("{self}: Failed to determine node epoch. Cannot mock sign."); - return; + let epoch = match stacks_client.get_node_epoch() { + Ok(epoch) => epoch, + Err(e) => { + warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); + return; + } }; debug!("{self}: Epoch 2.5 signer received a new burn block event."; "burn_height" => burn_height, From bf30fa62e8257d8c702788761cd7c4ceefef02a9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 1 Aug 2024 08:24:59 -0400 Subject: [PATCH 061/910] Deserialize only the necessary info from peer info to be more resilient to peer info updates in the signer Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 161 ++++++++++++++------- stacks-signer/src/cli.rs | 4 +- stacks-signer/src/client/stacks_client.rs | 19 ++- stacks-signer/src/v0/signer.rs | 14 +- testnet/stacks-node/src/tests/signer/v0.rs | 3 +- 5 files changed, 131 insertions(+), 70 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f16dd6d4ed7..7d411f89b5b 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -239,57 +239,41 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } -/// A snapshot of the signer view of the stacks node to be used for mock signing. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignData { - /// The stacks tip consensus hash at the time of the mock signature +/// The signer relevant peer information from the stacks node +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PeerInfo { + /// The burn block height + pub burn_block_height: u64, + /// The consensus hash of the stacks tip pub stacks_tip_consensus_hash: ConsensusHash, - /// The stacks tip header hash at the time of the mock signature + /// The stacks tip pub stacks_tip: BlockHeaderHash, + /// The stacks tip height + pub stacks_tip_height: u64, + /// The pox consensus + pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, - /// The burn block height that triggered the mock signature - pub burn_block_height: u64, - /// The burn block height of the peer view at the time of the mock signature. Note - /// that this may be different from the burn_block_height if the peer view is stale. - pub peer_burn_block_height: u64, - /// The POX consensus hash at the time of the mock signature - pub pox_consensus: ConsensusHash, - /// The chain id for the mock signature - pub chain_id: u32, -} - -impl MockSignData { - fn new(peer_view: RPCPeerInfoData, burn_block_height: u64, chain_id: u32) -> Self { - Self { - stacks_tip_consensus_hash: peer_view.stacks_tip_consensus_hash, - stacks_tip: peer_view.stacks_tip, - server_version: peer_view.server_version, - burn_block_height, - peer_burn_block_height: peer_view.burn_block_height, - pox_consensus: peer_view.pox_consensus, - chain_id, - } - } } -impl StacksMessageCodec for MockSignData { +impl StacksMessageCodec for PeerInfo { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.burn_block_height)?; write_next(fd, self.stacks_tip_consensus_hash.as_bytes())?; write_next(fd, &self.stacks_tip)?; + write_next(fd, &self.stacks_tip_height)?; write_next(fd, &(self.server_version.as_bytes().len() as u8))?; fd.write_all(self.server_version.as_bytes()) .map_err(CodecError::WriteError)?; - write_next(fd, &self.burn_block_height)?; - write_next(fd, &self.peer_burn_block_height)?; write_next(fd, &self.pox_consensus)?; - write_next(fd, &self.chain_id)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { + let burn_block_height = read_next::(fd)?; let stacks_tip_consensus_hash = read_next::(fd)?; let stacks_tip = read_next::(fd)?; + let stacks_tip_height = read_next::(fd)?; let len_byte: u8 = read_next(fd)?; let mut bytes = vec![0u8; len_byte as usize]; fd.read_exact(&mut bytes).map_err(CodecError::ReadError)?; @@ -299,17 +283,44 @@ impl StacksMessageCodec for MockSignData { "Failed to parse server version name: could not contruct from utf8".to_string(), ) })?; - let burn_block_height = read_next::(fd)?; - let peer_burn_block_height = read_next::(fd)?; let pox_consensus = read_next::(fd)?; - let chain_id = read_next::(fd)?; Ok(Self { + burn_block_height, stacks_tip_consensus_hash, stacks_tip, + stacks_tip_height, server_version, - burn_block_height, - peer_burn_block_height, pox_consensus, + }) + } +} + +/// A snapshot of the signer view of the stacks node to be used for mock signing. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignData { + /// The view of the stacks node peer information at the time of the mock signature + pub peer_info: PeerInfo, + /// The burn block height of the event that triggered the mock signature + pub event_burn_block_height: u64, + /// The chain id for the mock signature + pub chain_id: u32, +} + +impl StacksMessageCodec for MockSignData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.event_burn_block_height)?; + write_next(fd, &self.chain_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let event_burn_block_height = read_next::(fd)?; + let chain_id = read_next::(fd)?; + Ok(Self { + peer_info, + event_burn_block_height, chain_id, }) } @@ -326,16 +337,21 @@ pub struct MockSignature { } impl MockSignature { - /// Create a new mock sign data struct from the provided peer info, burn block height, chain id, and private key. + /// Create a new mock sign data struct from the provided event burn block height, peer info, chain id, and private key. + /// Note that peer burn block height and event burn block height may not be the same if the peer view is stale. pub fn new( - peer_view: RPCPeerInfoData, - burn_block_height: u64, + event_burn_block_height: u64, + peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey, ) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - sign_data: MockSignData::new(peer_view, burn_block_height, chain_id), + sign_data: MockSignData { + peer_info, + event_burn_block_height, + chain_id, + }, }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -350,25 +366,39 @@ impl MockSignature { TupleData::from_data(vec![ ( "stacks-tip-consensus-hash".into(), - Value::buff_from(self.sign_data.stacks_tip_consensus_hash.as_bytes().into()) - .unwrap(), + Value::buff_from( + self.sign_data + .peer_info + .stacks_tip_consensus_hash + .as_bytes() + .into(), + ) + .unwrap(), ), ( "stacks-tip".into(), - Value::buff_from(self.sign_data.stacks_tip.as_bytes().into()).unwrap(), + Value::buff_from(self.sign_data.peer_info.stacks_tip.as_bytes().into()) + .unwrap(), + ), + ( + "stacks-tip-height".into(), + Value::UInt(self.sign_data.peer_info.stacks_tip_height.into()), ), ( "server-version".into(), - Value::string_ascii_from_bytes(self.sign_data.server_version.clone().into()) - .unwrap(), + Value::string_ascii_from_bytes( + self.sign_data.peer_info.server_version.clone().into(), + ) + .unwrap(), ), ( - "burn-block-height".into(), - Value::UInt(self.sign_data.burn_block_height.into()), + "event-burn-block-height".into(), + Value::UInt(self.sign_data.event_burn_block_height.into()), ), ( "pox-consensus".into(), - Value::buff_from(self.sign_data.pox_consensus.as_bytes().into()).unwrap(), + Value::buff_from(self.sign_data.peer_info.pox_consensus.as_bytes().into()) + .unwrap(), ), ]) .expect("Error creating signature hash"), @@ -822,23 +852,33 @@ mod test { assert_eq!(signer_message, deserialized_signer_message); } - fn random_mock_sign_data() -> MockSignData { + fn random_peer_data() -> PeerInfo { + let burn_block_height = thread_rng().next_u64(); let stacks_tip_consensus_byte: u8 = thread_rng().gen(); let stacks_tip_byte: u8 = thread_rng().gen(); + let stacks_tip_height = thread_rng().next_u64(); + let server_version = "0.0.0".to_string(); let pox_consensus_byte: u8 = thread_rng().gen(); + PeerInfo { + burn_block_height, + stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), + stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), + stacks_tip_height, + server_version, + pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + } + } + fn random_mock_sign_data() -> MockSignData { let chain_byte: u8 = thread_rng().gen_range(0..=1); let chain_id = if chain_byte == 1 { CHAIN_ID_TESTNET } else { CHAIN_ID_MAINNET }; + let peer_info = random_peer_data(); MockSignData { - stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), - stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), - server_version: "0.0.0".to_string(), - burn_block_height: thread_rng().next_u64(), - peer_burn_block_height: thread_rng().next_u64(), - pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + peer_info, + event_burn_block_height: thread_rng().next_u64(), chain_id, } } @@ -871,6 +911,15 @@ mod test { .expect("Failed to verify MockSignature")); } + #[test] + fn serde_peer_data() { + let peer_data = random_peer_data(); + let serialized_data = peer_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize PeerInfo"); + assert_eq!(peer_data, deserialized_data); + } + #[test] fn serde_mock_signature() { let mock_signature = MockSignature { diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index d3e998e15c5..74e2cd2344c 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -44,9 +44,9 @@ extern crate alloc; const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); #[cfg(debug_assertions)] -const BUILD_TYPE: &'static str = "debug"; +const BUILD_TYPE: &str = "debug"; #[cfg(not(debug_assertions))] -const BUILD_TYPE: &'static str = "release"; +const BUILD_TYPE: &str = "release"; lazy_static! { static ref VERSION_STRING: String = { diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8c63e181f78..b6337364dbd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -31,7 +31,6 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, }; use blockstack_lib::net::api::getaccount::AccountEntryResponse; -use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; use blockstack_lib::net::api::getstackers::GetStackersResponse; @@ -43,6 +42,7 @@ use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; use clarity::util::hash::to_hex; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; +use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; use serde_json::json; use slog::{slog_debug, slog_warn}; @@ -463,7 +463,7 @@ impl StacksClient { } /// Get the current peer info data from the stacks node - pub fn get_peer_info(&self) -> Result { + pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); @@ -478,7 +478,7 @@ impl StacksClient { if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } - let peer_info_data = response.json::()?; + let peer_info_data = response.json::()?; Ok(peer_info_data) } @@ -1387,7 +1387,18 @@ mod tests { let (response, peer_info) = build_get_peer_info_response(None, None); let h = spawn(move || mock.client.get_peer_info()); write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), peer_info); + let reduced_peer_info = h.join().unwrap().unwrap(); + assert_eq!( + reduced_peer_info.burn_block_height, + peer_info.burn_block_height + ); + assert_eq!(reduced_peer_info.pox_consensus, peer_info.pox_consensus); + assert_eq!( + reduced_peer_info.stacks_tip_consensus_hash, + peer_info.stacks_tip_consensus_hash + ); + assert_eq!(reduced_peer_info.stacks_tip, peer_info.stacks_tip); + assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } #[test] diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 574c4d8df9c..94e8fa04990 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -484,7 +484,7 @@ impl Signer { /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { - let Ok(peer_view) = stacks_client.get_peer_info() else { + let Ok(peer_info) = stacks_client.get_peer_info() else { warn!("{self}: Failed to get peer info. Cannot mock sign."); return; }; @@ -494,15 +494,15 @@ impl Signer { CHAIN_ID_TESTNET }; info!("Mock signing for burn block {burn_block_height:?}"; - "stacks_tip_consensus_hash" => ?peer_view.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_view.stacks_tip.clone(), - "peer_burn_block_height" => peer_view.burn_block_height, - "pox_consensus" => ?peer_view.pox_consensus.clone(), - "server_version" => peer_view.server_version.clone(), + "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_info.stacks_tip.clone(), + "peer_burn_block_height" => peer_info.burn_block_height, + "pox_consensus" => ?peer_info.pox_consensus.clone(), + "server_version" => peer_info.server_version.clone(), "chain_id" => chain_id ); let mock_signature = - MockSignature::new(peer_view, burn_block_height, chain_id, &self.private_key); + MockSignature::new(burn_block_height, peer_info, chain_id, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e1f57097e9a..edd9300ff1a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1663,7 +1663,8 @@ fn mock_sign_epoch_25() { .expect("Failed to get message from stackerdb"); for message in messages { if let SignerMessage::MockSignature(mock_signature) = message { - if mock_signature.sign_data.burn_block_height == current_burn_block_height { + if mock_signature.sign_data.event_burn_block_height == current_burn_block_height + { if !mock_signatures.contains(&mock_signature) { mock_signatures.push(mock_signature); } From d65182512cc28584d83a8a0fcd60dcaca1e80ba0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 14:13:18 -0500 Subject: [PATCH 062/910] test: move BTC commit stall to counters() --- .../stacks-node/src/nakamoto_node/relayer.rs | 15 ++++++----- testnet/stacks-node/src/run_loop/neon.rs | 14 +++++++++++ .../src/tests/nakamoto_integrations.rs | 11 ++++---- testnet/stacks-node/src/tests/signer/mod.rs | 5 +++- testnet/stacks-node/src/tests/signer/v0.rs | 25 ++++++++++++++++--- 5 files changed, 54 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e94..1234ad20cc9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -66,11 +66,6 @@ use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; -#[cfg(test)] -lazy_static::lazy_static! { - pub static ref TEST_SKIP_COMMIT_OP: std::sync::Mutex> = std::sync::Mutex::new(None); -} - /// Command types for the Nakamoto relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and @@ -937,7 +932,15 @@ impl RelayerThread { let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; #[cfg(test)] { - if TEST_SKIP_COMMIT_OP.lock().unwrap().unwrap_or(false) { + if self + .globals + .counters + .naka_skip_commit_op + .0 + .lock() + .unwrap() + .unwrap_or(false) + { warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); return Ok(()); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index d4aea34f0e6..663c14e27ba 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -82,6 +82,17 @@ impl std::ops::Deref for RunLoopCounter { } } +#[cfg(test)] +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +#[cfg(test)] +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(std::sync::Mutex::new(None))) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -95,6 +106,9 @@ pub struct Counters { pub naka_mined_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, + + #[cfg(test)] + pub naka_skip_commit_op: TestFlag, } impl Counters { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c2fd6245f2a..f6056f1b6fd 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -95,7 +95,6 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; -use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -3723,6 +3722,7 @@ fn forked_tenure_is_ignored() { naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, naka_mined_blocks: mined_blocks, + naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); @@ -3791,7 +3791,7 @@ fn forked_tenure_is_ignored() { info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + test_skip_commit_op.0.lock().unwrap().replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -3816,7 +3816,7 @@ fn forked_tenure_is_ignored() { let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); next_block_and(&mut btc_regtest_controller, 60, || { - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + test_skip_commit_op.0.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); Ok(commits_count > commits_before && blocks_count > blocks_before) @@ -5478,6 +5478,7 @@ fn continue_tenure_extend() { blocks_processed, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, + naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); @@ -5549,7 +5550,7 @@ fn continue_tenure_extend() { ); info!("Pausing commit ops to trigger a tenure extend."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + test_skip_commit_op.0.lock().unwrap().replace(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5604,7 +5605,7 @@ fn continue_tenure_extend() { ); info!("Resuming commit ops to mine regular tenures."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + test_skip_commit_op.0.lock().unwrap().replace(false); // Mine 15 more regular nakamoto tenures for _i in 0..15 { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 08c4004ed09..78ed2e7c7ab 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -57,7 +57,7 @@ use wsts::state_machine::PublicKeys; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::neon::Counters; +use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -81,6 +81,7 @@ pub struct RunningNodes { pub blocks_processed: Arc, pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -679,6 +680,7 @@ fn setup_stx_btc_node ()>( naka_submitted_commits: commits_submitted, naka_proposed_blocks: naka_blocks_proposed, naka_mined_blocks: naka_blocks_mined, + naka_skip_commit_op: nakamoto_test_skip_commit_op, .. } = run_loop.counters(); @@ -711,6 +713,7 @@ fn setup_stx_btc_node ()>( blocks_processed: blocks_processed.0, nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, + nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 98e2d64b551..13bcd575f52 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -52,7 +52,6 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; -use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, @@ -859,7 +858,13 @@ fn forked_tenure_testing( info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -892,7 +897,13 @@ fn forked_tenure_testing( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = if expect_tenure_c { mined_blocks.load(Ordering::SeqCst) @@ -1624,7 +1635,13 @@ fn empty_sortition() { TEST_BROADCAST_STALL.lock().unwrap().replace(true); info!("Pausing commit op to prevent tenure C from starting..."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); let blocks_after = signer_test .running_nodes From ac99ec9d58c8bfec594a729ed6723af70adf4192 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 19:51:38 -0500 Subject: [PATCH 063/910] test: add a test with 2 miners, one of whom tries to fork the other and is prevented by the signer set --- .../src/tests/nakamoto_integrations.rs | 15 + testnet/stacks-node/src/tests/signer/mod.rs | 3 + testnet/stacks-node/src/tests/signer/v0.rs | 326 +++++++++++++++++- 3 files changed, 340 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f6056f1b6fd..31dfa3e4144 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -625,6 +625,21 @@ where Ok(()) } +pub fn wait_for(timeout_secs: u64, mut check: F) -> Result<(), String> +where + F: FnMut() -> Result, +{ + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for check to process"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + /// Mine a bitcoin block, and wait until: /// (1) a new block has been processed by the coordinator pub fn next_block_and_process_new_stacks_block( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 78ed2e7c7ab..7fe508407b4 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -92,6 +92,8 @@ pub struct SignerTest { pub running_nodes: RunningNodes, // The spawned signers and their threads pub spawned_signers: Vec, + // The spawned signers and their threads + pub signer_configs: Vec, // the private keys of the signers pub signer_stacks_private_keys: Vec, // link to the stacks node @@ -209,6 +211,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest. +use std::collections::HashMap; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::Ordering; @@ -26,7 +27,10 @@ use libsigner::v0::messages::{ }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, NakamotoChainState, +}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -34,9 +38,11 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::Sha512Trunc256Sum; +use stacks::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -45,6 +51,7 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -52,9 +59,11 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, wait_for, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -1264,6 +1273,315 @@ fn multiple_miners() { signer_test.shutdown(); } +#[test] +#[ignore] +fn miner_forking() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // we're deliberately stalling proposals: don't punish this in this test! + signer_config.block_proposal_timeout = Duration::from_secs(240); + // make sure that we don't allow forking due to burn block timing + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_skip_commit_op, + naka_submitted_commits: second_miner_commits_submitted, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + naka_skip_commit_op.0.lock().unwrap().replace(false); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let mut sortitions_seen = Vec::new(); + let run_sortition = || { + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + naka_skip_commit_op.0.lock().unwrap().replace(false); + + // wait until a commit is submitted by run_loop_2 + wait_for(60, || { + let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > rl2_commits_before) + }) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // block commits from RL2 -- this will block until the start of the next iteration + // in this loop. + naka_skip_commit_op.0.lock().unwrap().replace(true); + // ensure RL1 performs an RBF after unblock block broadcast + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // wait for a block to be processed (or timeout!) + if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { + info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); + return (sort_tip, false); + } + + info!("Nakamoto block processed, waiting for commit from RL1"); + + // wait for a commit from RL1 + wait_for(60, || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > rl1_commits_before) + }) + .unwrap(); + + // sleep for 1 second to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(1)); + (sort_tip, true) + }; + + let mut won_by_miner_2_but_no_tenure = false; + let mut won_by_miner_1_after_tenureless_miner_2 = false; + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + // miner 2 is expected to be valid iff: + // (a) its the first nakamoto tenure + // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) + let mut expects_miner_2_to_be_valid = true; + + // due to the random nature of mining sortitions, the way this test is structured + // is that keeps track of two scenarios that we want to cover, and once enough sortitions + // have been produced to cover those scenarios, it stops and checks the results at the end. + while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { + if sortitions_seen.len() >= 20 { + panic!("Produced 20 sortitions, but didn't cover the test scenarios, aborting"); + } + let (sortition_data, had_tenure) = run_sortition(); + sortitions_seen.push((sortition_data.clone(), had_tenure)); + + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + if block_json + .as_object() + .unwrap() + .get("miner_signature") + .is_none() + { + return None; + } + let block_id = StacksBlockId::from_hex( + &block_json + .as_object() + .unwrap() + .get("index_block_hash") + .unwrap() + .as_str() + .unwrap()[2..], + ) + .unwrap(); + Some(block_id) + }) + .collect(); + + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let nakamoto_headers: HashMap<_, _> = nakamoto_block_ids + .into_iter() + .map(|block_id| { + let header_info = NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap(); + (header_info.consensus_hash.clone(), header_info) + }) + .collect(); + + if had_tenure { + let header_info = nakamoto_headers + .get(&sortition_data.consensus_hash) + .unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + let mined_by_miner_1 = miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("Block check"; + "height" => header.chain_length, + "consensus_hash" => %header.consensus_hash, + "block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id(), + "mined_by_miner_1?" => mined_by_miner_1, + "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + if !mined_by_miner_1 { + assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); + } else if won_by_miner_2_but_no_tenure { + // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't + // mine during because they tried to fork. + won_by_miner_1_after_tenureless_miner_2 = true; + } + + // even if it was mined by miner 2, their next block commit should be invalid! + expects_miner_2_to_be_valid = false; + } else { + info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + assert!(nakamoto_headers + .get(&sortition_data.consensus_hash) + .is_none()); + assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); + won_by_miner_2_but_no_tenure = true; + expects_miner_2_to_be_valid = true; + } + } + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + block_json + .as_object() + .unwrap() + .get("miner_signature") + .map(|x| x.as_str().unwrap().to_string()) + }) + .collect(); + + assert_eq!( + peer_1_height - pre_nakamoto_peer_1_height, + u64::try_from(nakamoto_block_ids.len()).unwrap(), + "There should be no forks in this test" + ); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From a567db23c84adce33205b92e3e3c65dd87f7fa05 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 10:26:21 -0500 Subject: [PATCH 064/910] tests: improve forking tests --- .../src/tests/nakamoto_integrations.rs | 33 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 178 +++++++++++------- 2 files changed, 133 insertions(+), 78 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 31dfa3e4144..542ff7511c4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1641,6 +1641,10 @@ fn multiple_miners() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.node.local_peer_seed = vec![1, 1, 1, 1]; + naka_conf.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + let node_2_rpc = 51026; + let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1665,7 +1669,11 @@ fn multiple_miners() { let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); - set_random_binds(&mut conf_node_2); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -1674,6 +1682,8 @@ fn multiple_miners() { ); conf_node_2.node.local_peer_seed = vec![2, 2, 2, 2]; conf_node_2.node.miner = true; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.events_observers.clear(); let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); @@ -1813,16 +1823,14 @@ fn multiple_miners() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { + wait_for(20, || { let blocks_processed = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); let info = get_chain_info_result(&naka_conf).unwrap(); assert_ne!(info.stacks_tip, last_tip); @@ -1832,13 +1840,10 @@ fn multiple_miners() { last_tip_height = info.stacks_tip_height; } - let start_time = Instant::now(); - while commits_submitted.load(Ordering::SeqCst) <= commits_before { - if start_time.elapsed() >= Duration::from_secs(20) { - panic!("Timed out waiting for block-commit"); - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27b1df7450d..c9b5cf4854b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::Ordering; @@ -28,9 +28,7 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, NakamotoChainState, -}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -40,9 +38,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::get_epoch_time_secs; -use stacks::util::hash::Sha512Trunc256Sum; -use stacks::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -51,7 +47,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; -use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -70,7 +65,7 @@ use crate::tests::neon_integrations::{ test_observer, }; use crate::tests::{self, make_stacks_transfer}; -use crate::{nakamoto_node, BurnchainController, Keychain}; +use crate::{nakamoto_node, BurnchainController, Config, Keychain}; impl SignerTest { /// Run the test until the first epoch 2.5 reward cycle. @@ -1197,6 +1192,7 @@ fn multiple_miners() { config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -1225,6 +1221,7 @@ fn multiple_miners() { conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); conf_node_2.node.miner = true; conf_node_2.events_observers.clear(); conf_node_2.events_observers.extend(node_2_listeners); @@ -1252,9 +1249,59 @@ fn multiple_miners() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let nakamoto_tenures = 20; - for _i in 0..nakamoto_tenures { - let _mined_block = signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + let max_nakamoto_tenures = 20; + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + btc_blocks_mined += 1; + let blocks = get_nakamoto_headers(&conf); + // for this test, there should be one block per tenure + let consensus_hash_set: HashSet<_> = blocks + .iter() + .map(|header| header.consensus_hash.clone()) + .collect(); + assert_eq!( + consensus_hash_set.len(), + blocks.len(), + "In this test, there should only be one block per tenure" + ); + miner_1_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); + miner_2_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); } info!( @@ -1268,11 +1315,61 @@ fn multiple_miners() { let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + nakamoto_tenures); + assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); signer_test.shutdown(); } +/// Read processed nakamoto block IDs from the test observer, and use `config` to open +/// a chainstate DB and returns their corresponding StacksHeaderInfos +fn get_nakamoto_headers(config: &Config) -> Vec { + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + if block_json + .as_object() + .unwrap() + .get("miner_signature") + .is_none() + { + return None; + } + let block_id = StacksBlockId::from_hex( + &block_json + .as_object() + .unwrap() + .get("index_block_hash") + .unwrap() + .as_str() + .unwrap()[2..], + ) + .unwrap(); + Some(block_id) + }) + .collect(); + + let (chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + None, + ) + .unwrap(); + + nakamoto_block_ids + .into_iter() + .map(|block_id| { + NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap() + }) + .collect() +} + #[test] #[ignore] fn miner_forking() { @@ -1470,47 +1567,9 @@ fn miner_forking() { let (sortition_data, had_tenure) = run_sortition(); sortitions_seen.push((sortition_data.clone(), had_tenure)); - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() - .into_iter() - .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } - let block_id = StacksBlockId::from_hex( - &block_json - .as_object() - .unwrap() - .get("index_block_hash") - .unwrap() - .as_str() - .unwrap()[2..], - ) - .unwrap(); - Some(block_id) - }) - .collect(); - - let (chainstate, _) = StacksChainState::open( - conf.is_mainnet(), - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - let nakamoto_headers: HashMap<_, _> = nakamoto_block_ids + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) .into_iter() - .map(|block_id| { - let header_info = NakamotoChainState::get_block_header(chainstate.db(), &block_id) - .unwrap() - .unwrap(); - (header_info.consensus_hash.clone(), header_info) - }) + .map(|header| (header.consensus_hash.clone(), header)) .collect(); if had_tenure { @@ -1562,20 +1621,11 @@ fn miner_forking() { info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() - .into_iter() - .filter_map(|block_json| { - block_json - .as_object() - .unwrap() - .get("miner_signature") - .map(|x| x.as_str().unwrap().to_string()) - }) - .collect(); + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_block_ids.len()).unwrap(), + u64::try_from(nakamoto_blocks_count).unwrap(), "There should be no forks in this test" ); From 13e73fb93d3970f7c3635c2092925aa2b857f150 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 11:02:14 -0500 Subject: [PATCH 065/910] ci: add new test to CI, comment for test --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 5c9de283617..15c2e125b06 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -93,6 +93,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::miner_forking - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c9b5cf4854b..fe53ca20cfe 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1372,6 +1372,16 @@ fn get_nakamoto_headers(config: &Config) -> Vec { #[test] #[ignore] +// Test two nakamoto miners, with the signer set split between them. +// One of the miners (run-loop-2) is prevented from submitting "good" block commits +// using the "commit stall" test flag in combination with "block broadcast stalls". +// (Because RL2 isn't able to RBF their initial commits after the tip is broadcasted). +// This test works by tracking two different scenarios: +// 1. RL2 must win a sortition that this block commit behavior would lead to a fork in. +// 2. After such a sortition, RL1 must win another block. +// The test asserts that every nakamoto sortition either has a successful tenure, or if +// RL2 wins and they would be expected to fork, no blocks are produced. The test asserts +// that every block produced increments the chain length. fn miner_forking() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From 8e4031cbbdba4d17c5e3f36a3c49489bc57553a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 11:36:47 -0500 Subject: [PATCH 066/910] chore: add comment to mock miner check for tenure parent --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 06a6e37006d..e2511ff388a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -222,6 +222,9 @@ impl BlockMinerThread { // now, actually run this tenure loop { let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. if self.config.get_node_config(false).mock_mining { let burn_db_path = self.config.get_burn_db_file_path(); let mut burn_db = SortitionDB::open( From 8a7768df3ac7da055f0552ac2c436e06481def3e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 1 Aug 2024 15:32:32 -0400 Subject: [PATCH 067/910] fix: release the chainstate db before locking the sortition db This ordering needs to be maintained to avoid deadlock. --- stackslib/src/chainstate/nakamoto/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 09794c47759..64c367bd01c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1992,6 +1992,15 @@ impl NakamotoChainState { next_ready_block.header.consensus_hash ); + // this will panic if the Clarity commit fails. + clarity_commit.commit(); + chainstate_tx.commit() + .unwrap_or_else(|e| { + error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + // set stacks block accepted let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( @@ -2000,15 +2009,6 @@ impl NakamotoChainState { next_ready_block.header.chain_length, )?; - // this will panic if the Clarity commit fails. - clarity_commit.commit(); - chainstate_tx.commit() - .unwrap_or_else(|e| { - error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; - "error" => ?e); - panic!() - }); - // as a separate transaction, mark this block as processed. // This is done separately so that the staging blocks DB, which receives writes // from the network to store blocks, will be available for writes while a block is From e65ad15af62a5616716455bb68f8365191993882 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 1 Aug 2024 17:22:03 -0400 Subject: [PATCH 068/910] chore: move `set_stacks_block_accepted` later Also, `commit` before announcing the block via the dispatcher. --- stackslib/src/chainstate/nakamoto/mod.rs | 32 ++++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 64c367bd01c..1fab3ba9b1f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2001,14 +2001,6 @@ impl NakamotoChainState { panic!() }); - // set stacks block accepted - let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; - sort_tx.set_stacks_block_accepted( - &next_ready_block.header.consensus_hash, - &next_ready_block.header.block_hash(), - next_ready_block.header.chain_length, - )?; - // as a separate transaction, mark this block as processed. // This is done separately so that the staging blocks DB, which receives writes // from the network to store blocks, will be available for writes while a block is @@ -2019,6 +2011,22 @@ impl NakamotoChainState { let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + // set stacks block accepted + let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; + sort_tx.set_stacks_block_accepted( + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + next_ready_block.header.chain_length, + )?; + + sort_tx + .commit() + .unwrap_or_else(|e| { + error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2045,14 +2053,6 @@ impl NakamotoChainState { ); } - sort_tx - .commit() - .unwrap_or_else(|e| { - error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; - "error" => ?e); - panic!() - }); - Ok(Some(receipt)) } From fd7158708dfe58e8aac8019f50994baea75acd26 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:05:39 -0700 Subject: [PATCH 069/910] wip: integration test on signer set handoff --- libsigner/src/runloop.rs | 2 +- stacks-signer/src/chainstate.rs | 3 +- stacks-signer/src/lib.rs | 5 +- testnet/stacks-node/src/event_dispatcher.rs | 2 + .../stacks-node/src/nakamoto_node/miner.rs | 49 ++-- .../src/nakamoto_node/sign_coordinator.rs | 14 +- .../src/tests/nakamoto_integrations.rs | 19 +- testnet/stacks-node/src/tests/signer/mod.rs | 34 ++- testnet/stacks-node/src/tests/signer/v0.rs | 273 +++++++++++++++--- testnet/stacks-node/src/tests/signer/v1.rs | 3 + 10 files changed, 328 insertions(+), 76 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index b0f026f35fd..e548db89e3a 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -262,7 +262,7 @@ impl< // start receiving events and doing stuff with them let runloop_thread = thread::Builder::new() - .name("signer_runloop".to_string()) + .name(format!("signer_runloop:{}", bind_addr.port())) .stack_size(THREAD_STACK_SIZE) .spawn(move || { signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4fc2de4cb8f..c35ceb67e03 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -303,11 +303,12 @@ impl SortitionsView { let last_in_tenure = signer_db .get_last_signed_block_in_tenure(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if last_in_tenure.is_some() { + if let Some(last_in_tenure) = last_in_tenure { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), ); return Ok(false); } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 15c0a25c3d5..abc2db331be 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -100,6 +100,8 @@ pub struct SpawnedSigner + Send, T: SignerEventTrait> { pub cmd_send: Sender, /// The result receiver for interacting with the running signer pub res_recv: Receiver>, + /// The spawned signer's config + pub config: GlobalConfig, /// Phantom data for the signer type _phantom: std::marker::PhantomData, } @@ -136,7 +138,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner { crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); } - let runloop = RunLoop::new(config); + let runloop = RunLoop::new(config.clone()); let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, cmd_recv, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); @@ -145,6 +147,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner cmd_send, res_recv, _phantom: std::marker::PhantomData, + config: config.clone(), } } } diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a9..be0107e1045 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -138,6 +138,7 @@ pub struct MinedNakamotoBlockEvent { pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, pub signer_bitvec: String, + pub signer_signature: Vec, } impl InnerStackerDBChannel { @@ -1261,6 +1262,7 @@ impl EventDispatcher { tx_events, miner_signature: block.header.miner_signature.clone(), signer_signature_hash: block.header.signer_signature_hash(), + signer_signature: block.header.signer_signature.clone(), signer_bitvec, }) .unwrap(); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 527117fb4dc..29b2195af98 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -270,18 +270,16 @@ impl BlockMinerThread { } } - let (reward_set, signer_signature) = match self.gather_signatures( - &mut new_block, - self.burn_block.block_height, - &mut stackerdbs, - &mut attempts, - ) { - Ok(x) => x, - Err(e) => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; - } - }; + let (reward_set, signer_signature) = + match self.gather_signatures(&mut new_block, &mut stackerdbs, &mut attempts) { + Ok(x) => x, + Err(e) => { + error!( + "Error while gathering signatures: {e:?}. Will try mining again." + ); + continue; + } + }; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { @@ -354,10 +352,21 @@ impl BlockMinerThread { let burn_election_height = self.burn_election_block.block_height; + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(burn_election_height) + .expect("FATAL: no reward cycle for sortition"); + + #[cfg(test)] + { + info!( + "---- Fetching reward info at height {} for cycle {} ----", + burn_election_height, reward_cycle + ); + } + let reward_info = match load_nakamoto_reward_set( - self.burnchain - .pox_reward_cycle(burn_election_height) - .expect("FATAL: no reward cycle for sortition"), + reward_cycle, &self.burn_election_block.sortition_id, &self.burnchain, &mut chain_state, @@ -384,6 +393,14 @@ impl BlockMinerThread { )); }; + #[cfg(test)] + { + info!( + "---- New reward set has {} signers ----", + reward_set.clone().signers.unwrap_or(vec![]).len(), + ); + } + self.signer_set_cache = Some(reward_set.clone()); Ok(reward_set) } @@ -392,7 +409,6 @@ impl BlockMinerThread { fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, - burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { @@ -442,7 +458,6 @@ impl BlockMinerThread { *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, - burn_block_height, *attempts, &tip, &self.burnchain, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b6e42b87ee1..3afe36fa297 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -634,7 +634,6 @@ impl SignCoordinator { pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, - burn_block_height: u64, block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, @@ -643,6 +642,13 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { + #[cfg(test)] + { + info!( + "---- Sign coordinator starting. Burn tip height: {} ----", + burn_tip.block_height + ); + } let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; let reward_cycle_id = burnchain @@ -653,7 +659,7 @@ impl SignCoordinator { let block_proposal = BlockProposal { block: block.clone(), - burn_height: burn_block_height, + burn_height: burn_tip.block_height, reward_cycle: reward_cycle_id, }; @@ -736,7 +742,7 @@ impl SignCoordinator { continue; }; if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); + info!("Received signer event for other reward cycle. Ignoring."); continue; }; let slot_ids = modified_slots @@ -776,6 +782,8 @@ impl SignCoordinator { "signature" => %signature, "block_signer_signature_hash" => %block_sighash, "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + "response_hash" => %response_hash ); continue; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2441380b2be..bf1b10c41b9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -111,7 +111,7 @@ use crate::tests::{ use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; -static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; +pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -1028,6 +1028,7 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); @@ -1058,7 +1059,7 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( .get_burnchain() .block_height_to_reward_cycle(block_height) .unwrap(); - let lock_period = 12; + let lock_period: u128 = num_stacking_cycles.unwrap_or(12_u64).into(); debug!("Test Cycle Info"; "prepare_phase_len" => {prepare_phase_len}, "reward_cycle_len" => {reward_cycle_len}, @@ -1130,6 +1131,7 @@ pub fn boot_to_epoch_3_reward_set( stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, ) { boot_to_epoch_3_reward_set_calculation_boundary( naka_conf, @@ -1137,6 +1139,7 @@ pub fn boot_to_epoch_3_reward_set( stacker_sks, signer_sks, btc_regtest_controller, + num_stacking_cycles, ); let epoch_3_reward_set_calculation = btc_regtest_controller.get_headers_height().wrapping_add(1); @@ -4925,6 +4928,18 @@ fn signer_chainstate() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; let prom_http_origin = format!("http://{}", prom_bind); let client = reqwest::blocking::Client::new(); let res = client diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89ab..eb5b337ada9 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -49,7 +49,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; -use stacks_signer::client::{SignerSlotID, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; @@ -97,6 +97,8 @@ pub struct SignerTest { pub stacks_client: StacksClient, // Unique number used to isolate files created during the test pub run_stamp: u16, + /// The number of cycles to stack for + pub num_stacking_cycles: u64, } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { @@ -105,14 +107,24 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, ) -> Self { - Self::new_with_config_modifications(num_signers, initial_balances, wait_on_signers, |_| {}) + Self::new_with_config_modifications( + num_signers, + initial_balances, + wait_on_signers, + |_| {}, + |_| {}, + ) } - fn new_with_config_modifications ()>( + fn new_with_config_modifications< + SignerModifier: Fn(&mut SignerConfig) -> (), + NakaModifier: Fn(&mut NeonConfig) -> (), + >( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, wait_on_signers: Option, - modifier: F, + modifier: SignerModifier, + naka_modifier: NakaModifier, ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) @@ -121,6 +133,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { + fn get_signer_slots( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { let valid_signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); self.stacks_client .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + } + + fn get_signer_indices(&self, reward_cycle: u64) -> Vec { + self.get_signer_slots(reward_cycle) .expect("FATAL: failed to get signer slots from stackerdb") .iter() .enumerate() .map(|(pos, _)| { SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) }) - .collect() + .collect::>() } /// Get the wsts public keys for the given reward cycle diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fe6c4ff619e..c8b98510c99 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,16 +23,24 @@ use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use rand::RngCore; +use stacks::address::AddressHashMode; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; +use stacks::core::CHAIN_ID_TESTNET; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; +use stacks::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -43,10 +51,14 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; +use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; -use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; +use crate::tests::nakamoto_integrations::{ + boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, +}; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, submit_tx, test_observer, }; @@ -62,6 +74,7 @@ impl SignerTest { &self.signer_stacks_private_keys, &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; @@ -128,6 +141,10 @@ impl SignerTest { // Only call after already past the epoch 3.0 boundary fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); + // Get the current signers _before_ the new block + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response @@ -144,9 +161,6 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - let reward_cycle = self.get_current_reward_cycle(); - let signers = self.get_reward_set_signers(reward_cycle); - // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; @@ -614,6 +628,7 @@ fn forked_tenure_testing( // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; }, + |_| {}, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -802,11 +817,10 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(15)), - |_config| {}, ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -1252,6 +1266,7 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(20); @@ -1389,12 +1404,16 @@ fn signer_set_rollover() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let new_num_signers = 5; + let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) .into_iter() .map(|_| StacksPrivateKey::new()) .collect(); + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); let new_signer_addresses: Vec<_> = new_signer_private_keys .iter() .map(|sk| tests::to_addr(sk)) @@ -1405,21 +1424,25 @@ fn signer_set_rollover() { let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - // Boot with some initial signer set - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - None, - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(20); + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); + + initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); let run_stamp = rand::random(); + let mut rng = rand::thread_rng(); + + let mut buf = [0u8; 2]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf.try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let rpc_bind = format!("127.0.0.1:{}", rpc_port); // Setup the new signers that will take over let new_signer_configs = build_signer_config_tomls( &new_signer_private_keys, - &signer_test.running_nodes.conf.node.rpc_bind, + &rpc_bind, Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. &Network::Testnet, "12345", @@ -1427,64 +1450,224 @@ fn signer_set_rollover() { 3000 + num_signers, Some(100_000), None, - Some(9000), + Some(9000 + num_signers), ); - let new_spawned_signers: Vec<_> = (0..num_signers) + let new_spawned_signers: Vec<_> = (0..new_num_signers) .into_iter() .map(|i| { info!("spawning signer"); - let mut signer_config = + let signer_config = SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); SpawnedSigner::new(signer_config) }) .collect(); - // TODO: may need to modify signer_test to not auto stack and delegate the way it does right now. I think it delegates for 12 reward cycles. and we should delegate only for one before transferring to the new signer set + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + None, + |_| {}, + |naka_conf| { + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + }); + } + naka_conf.node.rpc_bind = rpc_bind.clone(); + }, + ); + assert_eq!( + new_spawned_signers[0].config.node_host, + signer_test.running_nodes.conf.node.rpc_bind + ); + // Only stack for one cycle so that the signer set changes + signer_test.num_stacking_cycles = 1_u64; - // TODO: Advance to the first reward cycle, stacking and delegating to the old signers beforehand + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + // Verify that naka_conf has our new signer's event observers + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + } + + // Advance to the first reward cycle, stacking to the old signers beforehand + + info!("---- Booting to epoch 3 -----"); signer_test.boot_to_epoch_3(); - // TODO: verify that the first reward cycle has the old signers in the reward set + // verify that the first reward cycle has the old signers in the reward set let reward_cycle = signer_test.get_current_reward_cycle(); - let old_signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys .iter() - .map(|id| id.0) + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); - // TODO: manually trigger a stacks transaction and verify that only OLD signer signatures are found in the signed block - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - // submit a tx so that the miner will mine an extra block - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers as usize); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); + submit_tx(&http_origin, &transfer_tx); let mined_block = signer_test.mine_nakamoto_block(short_timeout); - // TODO: verify the mined_block signatures against the OLD signer set (might need to update event to take vector of message signatures?) + let block_sighash = mined_block.signer_signature_hash; + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the OLD signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } - //TODO: advance to the next reward cycle, stacking and delegating to the new signers beforehand + // advance to the next reward cycle, stacking to the new signers beforehand let reward_cycle = signer_test.get_current_reward_cycle(); - let new_signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - // submit a tx so that the miner will mine an extra block - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); - // submit a tx so that the miner will mine an extra block + info!("---- Stacking new signers -----"); + + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + for stacker_sk in new_signer_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + signer_test.mine_nakamoto_block(short_timeout); + + let next_reward_cycle = reward_cycle.saturating_add(1); + + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_add(1); + + info!("---- Mining to next reward set calculation -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(3), + new_num_signers, + ); + + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + for signer in reward_set.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!( + "---- Mining to the next reward cycle (block {}) -----", + next_cycle_height + ); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height, + new_num_signers, + ); + let new_reward_cycle = signer_test.get_current_reward_cycle(); + assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); + + info!("---- Verifying that the current signers are the new signers ----"); + let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); + assert_eq!(current_signers.len(), new_num_signers as usize); + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to verify new signer set -----"); let sender_nonce = 1; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); + submit_tx(&http_origin, &transfer_tx); let mined_block = signer_test.mine_nakamoto_block(short_timeout); - // TODO: verify the mined_block signatures against the NEW signer set + + info!("---- Verifying that the new signers signed the block -----"); + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the NEW signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(!signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } signer_test.shutdown(); - // TODO: shutdown the new signers as well + for signer in new_spawned_signers { + assert!(signer.stop().is_none()); + } } diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 9a3af13081a..44bbc572282 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -71,6 +71,7 @@ impl SignerTest { &self.signer_stacks_private_keys, &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), ); let dkg_vote = self.wait_for_dkg(timeout); @@ -493,6 +494,7 @@ fn dkg() { &signer_test.signer_stacks_private_keys, &signer_test.signer_stacks_private_keys, &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), ); info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); @@ -696,6 +698,7 @@ fn delayed_dkg() { &signer_test.signer_stacks_private_keys, &signer_test.signer_stacks_private_keys, &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), ); let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); let public_keys = signer_test.get_signer_public_keys(reward_cycle); From d64405422efa92e7f0f33627de1c0d533148fa34 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:21:43 -0700 Subject: [PATCH 070/910] fix: revert behavior when verifying naka block in test code --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c8b98510c99..c7a08c25035 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -141,9 +141,6 @@ impl SignerTest { // Only call after already past the epoch 3.0 boundary fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); - // Get the current signers _before_ the new block - let reward_cycle = self.get_current_reward_cycle(); - let signers = self.get_reward_set_signers(reward_cycle); self.mine_nakamoto_block(timeout); @@ -161,6 +158,9 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; From 5853580dd50381a7fd0dbec3bdf7f02e69f4efc4 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:33:16 -0700 Subject: [PATCH 071/910] fix: issues after merge --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 10 +++++----- testnet/stacks-node/src/tests/signer/v0.rs | 8 +------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 5c9de283617..8307053fbf4 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -93,6 +93,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::signer_set_rollover - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 10b0a629a5b..35db96c845b 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -125,7 +125,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, mut signer_config_modifier: F, - node_config_modifier: G, + mut node_config_modifier: G, btc_miner_pubkeys: &[Secp256k1PublicKey], ) -> Self { // Generate Signer Data @@ -135,7 +135,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Fri, 2 Aug 2024 11:26:46 -0400 Subject: [PATCH 072/910] test(wip): add test to verify deadlock in `process_next_nakamoto_block` --- .../chainstate/nakamoto/coordinator/tests.rs | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index bf1970f7f6e..91a8d9f9659 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -2453,3 +2453,75 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe fn test_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { simple_nakamoto_coordinator_10_extended_tenures_10_sortitions(); } + +#[test] +fn process_next_nakamoto_block_deadlock() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + info!("Creating peer"); + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + + // Lock the sortdb + info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); + let mut sortition_db = peer.sortdb().reopen().unwrap(); + let sort_tx = sortition_db.tx_begin().unwrap(); + + // Start another thread that opens the sortdb, waits 10s, then tries to + // lock the chainstate db. This should cause a deadlock if the block + // processing is not acquiring the locks in the correct order. + info!(" ------------------------------- SPAWNING BLOCKER THREAD"); + let blocker_thread = std::thread::spawn(move || { + // Wait a bit, to ensure the tenure will have grabbed any locks it needs + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the chainstate db + info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let (chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); + + info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); + info!(" ------------------------------- BLOCKER THREAD FINISHED"); + }); + + info!(" ------------------------------- MINING TENURE"); + let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + info!(" ------------------------------- TENURE MINED"); + + // Wait for the blocker thread to finish + blocker_thread.join().unwrap(); +} From 5bb6af63db403afb01a2f3749fb6ce1e4dc586fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:26:30 -0400 Subject: [PATCH 073/910] fix: report _all_ tenure-start blocks for a tenure --- .../src/chainstate/nakamoto/staging_blocks.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 88e1744bb6c..1d7b2a84142 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -325,6 +325,24 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res) } + /// Get all Nakamoto blocks in a tenure that report being tenure-start blocks + /// (depending on signer behavior, there can be more than one; none are guaranteed to be + /// canonical). + /// + /// Used by the block downloader + pub fn get_nakamoto_tenure_start_blocks( + &self, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; + let args = params![consensus_hash]; + let block_data: Vec> = query_rows(self, qry, args)?; + Ok(block_data + .into_iter() + .filter_map(|block_vec| NakamotoBlock::consensus_deserialize(&mut &block_vec[..]).ok()) + .collect()) + } + /// Find the next ready-to-process Nakamoto block, given a connection to the staging blocks DB. /// NOTE: the relevant field queried from `nakamoto_staging_blocks` are updated by a separate /// tx from block-processing, so it's imperative that the thread that calls this function is From a5f12b769e4978c0f611d862cda2750aa6ef3d8e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:01 -0400 Subject: [PATCH 074/910] chore: more debugging --- stackslib/src/net/chat.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 8d8dc7ca5c9..95d6fbac82a 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -764,7 +764,7 @@ impl ConversationP2P { if my_epoch <= remote_epoch { // remote node supports same epochs we do debug!( - "Remote peer has epoch {}, which is newer than our epoch {}", + "Remote peer has epoch {}, which is at least as new as our epoch {}", remote_epoch, my_epoch ); return true; @@ -2421,14 +2421,16 @@ impl ConversationP2P { Ok(num_recved) => { total_recved += num_recved; if num_recved > 0 { + debug!("{:?}: received {} bytes", self, num_recved); self.stats.last_recv_time = get_epoch_time_secs(); self.stats.bytes_rx += num_recved as u64; } else { + debug!("{:?}: received {} bytes, stopping", self, num_recved); break; } } Err(net_error::PermanentlyDrained) => { - trace!( + debug!( "{:?}: failed to recv on P2P conversation: PermanentlyDrained", self ); @@ -3022,7 +3024,7 @@ impl ConversationP2P { } } } else { - // no one was waiting for this reply, so just drop it + // message was passed to the relevant message handle debug!( "{:?}: Fulfilled pending message request (type {} seq {})", &self, _msgtype, _seq From 53ef9528ff0ef9527506f2f9d7bc6e794f1339e1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:12 -0400 Subject: [PATCH 075/910] chore: more debugging --- stackslib/src/net/connection.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 8dc5ad77946..d3a77ebc8d9 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -167,8 +167,9 @@ impl NetworkReplyHandle

{ /// is destroyed in the process). pub fn try_recv(mut self) -> Result, net_error>> { if self.deadline > 0 && self.deadline < get_epoch_time_secs() { - test_debug!( - "Reply deadline {} exceeded (now = {})", + debug!( + "Reply deadline for event {} at {} exceeded (now = {})", + self.socket_event_id, self.deadline, get_epoch_time_secs() ); @@ -234,10 +235,9 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. - test_debug!( + debug!( "Still have data to send, drop_on_success = {}, ret = {}", - drop_on_success, - ret + drop_on_success, ret ); Some(fd) } @@ -990,7 +990,7 @@ impl ConnectionInbox

{ || e.kind() == io::ErrorKind::ConnectionReset { // write endpoint is dead - test_debug!("reader was reset: {:?}", &e); + debug!("reader was reset: {:?}", &e); socket_closed = true; blocked = true; Ok(0) @@ -1004,7 +1004,7 @@ impl ConnectionInbox

{ total_read += num_read; if num_read > 0 || total_read > 0 { - trace!("read {} bytes; {} total", num_read, total_read); + debug!("read {} bytes; {} total", num_read, total_read); } if num_read > 0 { From 6d82225fce33ba1dd8d6af2bbc84a94d157467b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:27 -0400 Subject: [PATCH 076/910] fix: more debugging, and also, don't add more entries to wanted_tenures if the sortition reward cycle is not equal to our tracked reward cycle --- .../nakamoto/download_state_machine.rs | 217 +++++++++--------- 1 file changed, 108 insertions(+), 109 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 3865e8ee398..d1510af9c19 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -164,11 +164,9 @@ impl NakamotoDownloadStateMachine { .get_block_snapshot_by_height(last_block_height.saturating_sub(1))? .ok_or(DBError::NotFoundError)?; while cursor.block_height >= first_block_height { - test_debug!( + debug!( "Load sortition {}/{} burn height {}", - &cursor.consensus_hash, - &cursor.winning_stacks_block_hash, - cursor.block_height + &cursor.consensus_hash, &cursor.winning_stacks_block_hash, cursor.block_height ); wanted_tenures.push(WantedTenure::new( cursor.consensus_hash, @@ -211,20 +209,16 @@ impl NakamotoDownloadStateMachine { .min(tip.block_height.saturating_add(1)); if highest_tenure_height > last_block_height { - test_debug!( + debug!( "Will NOT update wanted tenures for reward cycle {}: {} > {}", - cur_rc, - highest_tenure_height, - last_block_height + cur_rc, highest_tenure_height, last_block_height ); return Ok(()); } - test_debug!( + debug!( "Update reward cycle sortitions between {} and {} (rc is {})", - first_block_height, - last_block_height, - cur_rc + first_block_height, last_block_height, cur_rc ); // find all sortitions in this reward cycle @@ -276,7 +270,7 @@ impl NakamotoDownloadStateMachine { .saturating_sub(1) .min(tip.block_height.saturating_add(1)); - test_debug!( + debug!( "Load tip sortitions between {} and {} (loaded_so_far = {})", first_block_height, last_block_height, @@ -289,7 +283,7 @@ impl NakamotoDownloadStateMachine { let ih = sortdb.index_handle(&tip.sortition_id); let wanted_tenures = Self::load_wanted_tenures(&ih, first_block_height, last_block_height)?; - test_debug!( + debug!( "Loaded tip sortitions between {} and {} (loaded_so_far = {}): {:?}", first_block_height, last_block_height, @@ -315,7 +309,10 @@ impl NakamotoDownloadStateMachine { stacks_tip: &StacksBlockId, ) -> Result<(), NetError> { for wt in wanted_tenures.iter_mut() { - test_debug!("update_processed_wanted_tenures: consider {:?}", &wt); + debug!( + "update_processed_wanted_tenures: consider {:?} off of {}", + &wt, stacks_tip + ); if wt.processed { continue; } @@ -329,7 +326,7 @@ impl NakamotoDownloadStateMachine { stacks_tip, &wt.tenure_id_consensus_hash, )? { - test_debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); + debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); wt.processed = true; continue; } @@ -347,7 +344,7 @@ impl NakamotoDownloadStateMachine { chainstate: &StacksChainState, ) -> Result<(), NetError> { if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_mut() { - test_debug!("update_processed_wanted_tenures: update prev_tenures"); + debug!("update_processed_wanted_tenures: update prev_tenures"); Self::inner_update_processed_wanted_tenures( self.nakamoto_start_height, prev_wanted_tenures, @@ -355,7 +352,7 @@ impl NakamotoDownloadStateMachine { &self.nakamoto_tip, )?; } - test_debug!("update_processed_wanted_tenures: update wanted_tenures"); + debug!("update_processed_wanted_tenures: update wanted_tenures"); Self::inner_update_processed_wanted_tenures( self.nakamoto_start_height, &mut self.wanted_tenures, @@ -377,33 +374,19 @@ impl NakamotoDownloadStateMachine { pub(crate) fn load_tenure_start_blocks( wanted_tenures: &[WantedTenure], chainstate: &mut StacksChainState, - tip_block_id: &StacksBlockId, tenure_start_blocks: &mut HashMap, ) -> Result<(), NetError> { for wt in wanted_tenures { - let Some(tenure_start_block_header) = - NakamotoChainState::get_nakamoto_tenure_start_block_header( - &mut chainstate.index_conn(), - tip_block_id, - &wt.tenure_id_consensus_hash, - )? - else { - test_debug!("No tenure-start block for {}", &wt.tenure_id_consensus_hash); - continue; - }; - let Some((tenure_start_block, _)) = chainstate + let candidate_tenure_start_blocks = chainstate .nakamoto_blocks_db() - .get_nakamoto_block(&tenure_start_block_header.index_block_hash())? - else { - let msg = format!( - "Have header but no block for tenure-start of {} ({})", - &wt.tenure_id_consensus_hash, - &tenure_start_block_header.index_block_hash() + .get_nakamoto_tenure_start_blocks(&wt.tenure_id_consensus_hash)?; + + for candidate_tenure_start_block in candidate_tenure_start_blocks.into_iter() { + tenure_start_blocks.insert( + candidate_tenure_start_block.block_id(), + candidate_tenure_start_block, ); - error!("{}", &msg); - return Err(NetError::ChainstateError(msg)); - }; - tenure_start_blocks.insert(tenure_start_block.block_id(), tenure_start_block); + } } Ok(()) } @@ -416,7 +399,6 @@ impl NakamotoDownloadStateMachine { Self::load_tenure_start_blocks( &self.wanted_tenures, chainstate, - &self.nakamoto_tip, &mut self.tenure_start_blocks, ) } @@ -455,7 +437,7 @@ impl NakamotoDownloadStateMachine { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return Err(NetError::PeerNotConnected); }; @@ -491,7 +473,7 @@ impl NakamotoDownloadStateMachine { invs.inventories.values(), ) } else { - test_debug!("No prev_wanted_tenures yet"); + debug!("No prev_wanted_tenures yet"); true }; @@ -506,7 +488,7 @@ impl NakamotoDownloadStateMachine { .chain(prev_wts.into_iter()) .chain(cur_wts.into_iter()) { - test_debug!("Consider wanted tenure: {:?}", &wt); + debug!("Consider wanted tenure: {:?}", &wt); let wt_rc = sortdb .pox_constants .block_height_to_reward_cycle(sortdb.first_block_height, wt.burn_height) @@ -516,15 +498,16 @@ impl NakamotoDownloadStateMachine { } else if wt_rc == sort_rc { cur_wanted_tenures.push(wt); } else { - test_debug!("Drop wanted tenure: {:?}", &wt); + debug!("Drop wanted tenure: {:?}", &wt); } } prev_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); cur_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - test_debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); + debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); + debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); + debug!("set self.reward_cycle = {}", sort_rc); self.prev_wanted_tenures = if prev_wanted_tenures.is_empty() { None @@ -534,13 +517,13 @@ impl NakamotoDownloadStateMachine { self.wanted_tenures = cur_wanted_tenures; self.reward_cycle = sort_rc; } else { - test_debug!( + debug!( "Append {} wanted tenures: {:?}", new_wanted_tenures.len(), &new_wanted_tenures ); self.wanted_tenures.append(&mut new_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &self.wanted_tenures); + debug!("wanted_tenures is now {:?}", &self.wanted_tenures); } Ok(()) @@ -559,7 +542,7 @@ impl NakamotoDownloadStateMachine { let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), sort_tip, sortdb); if reorg { // force a reload - test_debug!("Detected reorg! Refreshing wanted tenures"); + debug!("Detected reorg! Refreshing wanted tenures"); self.prev_wanted_tenures = None; self.wanted_tenures.clear(); } @@ -587,10 +570,9 @@ impl NakamotoDownloadStateMachine { &mut prev_wanted_tenures, )?; - test_debug!( + debug!( "initial prev_wanted_tenures (rc {}): {:?}", - sort_rc, - &prev_wanted_tenures + sort_rc, &prev_wanted_tenures ); self.prev_wanted_tenures = Some(prev_wanted_tenures); } @@ -609,10 +591,9 @@ impl NakamotoDownloadStateMachine { &mut wanted_tenures, )?; - test_debug!( + debug!( "initial wanted_tenures (rc {}): {:?}", - sort_rc, - &wanted_tenures + sort_rc, &wanted_tenures ); self.wanted_tenures = wanted_tenures; self.reward_cycle = sort_rc; @@ -633,6 +614,7 @@ impl NakamotoDownloadStateMachine { inventory_iter: impl Iterator, ) -> bool { if prev_wanted_tenures.is_empty() { + debug!("prev_wanted_tenures is empty, so we have unprocessed tenures"); return true; } @@ -641,19 +623,29 @@ impl NakamotoDownloadStateMachine { // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be // true let prev_wanted_rc = prev_wanted_tenures - .first() + .last() .map(|wt| { pox_constants - .block_height_to_reward_cycle(first_burn_height, wt.burn_height) + .block_height_to_reward_cycle( + first_burn_height, + wt.burn_height.saturating_sub(1), + ) .expect("FATAL: wanted tenure before system start") }) .unwrap_or(u64::MAX); let cur_wanted_rc = prev_wanted_rc.saturating_add(1); + debug!( + "have_unprocessed_tenures: prev_wanted_rc = {}, cur_wanted_rc = {}", + prev_wanted_rc, cur_wanted_rc + ); + let mut has_prev_inv = false; let mut has_cur_inv = false; + let mut num_invs = 0; for inv in inventory_iter { + num_invs += 1; if prev_wanted_rc < first_nakamoto_rc { // assume the epoch 2.x inventory has this has_prev_inv = true; @@ -670,7 +662,7 @@ impl NakamotoDownloadStateMachine { } if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv); + debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures. Total inventories: {}", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv, num_invs); return true; } @@ -682,16 +674,26 @@ impl NakamotoDownloadStateMachine { let mut available_considered = 0; for (_naddr, available) in tenure_block_ids.iter() { available_considered += available.len(); + debug!("Consider available tenures from {}", _naddr); for (_ch, tenure_info) in available.iter() { + debug!("Consider tenure info for {}: {:?}", _ch, tenure_info); if tenure_info.start_reward_cycle == prev_wanted_rc || tenure_info.end_reward_cycle == prev_wanted_rc { has_prev_rc_block = true; + debug!( + "Consider tenure info for {}: have a tenure in prev reward cycle {}", + _ch, prev_wanted_rc + ); } if tenure_info.start_reward_cycle == cur_wanted_rc || tenure_info.end_reward_cycle == cur_wanted_rc { has_cur_rc_block = true; + debug!( + "Consider tenure info for {}: have a tenure in cur reward cycle {}", + _ch, cur_wanted_rc + ); } } } @@ -720,14 +722,13 @@ impl NakamotoDownloadStateMachine { // this check is necessary because the check for .processed requires that a // child tenure block has been processed, which isn't guaranteed at a reward // cycle boundary - test_debug!("Tenure {:?} has been fully downloaded", &tenure_info); + debug!("Tenure {:?} has been fully downloaded", &tenure_info); continue; } if !tenure_info.processed { - test_debug!( + debug!( "Tenure {:?} is available from {} but not processed", - &tenure_info, - &_naddr + &tenure_info, &_naddr ); ret = true; } @@ -764,7 +765,7 @@ impl NakamotoDownloadStateMachine { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return Err(NetError::PeerNotConnected); }; @@ -791,7 +792,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: burnchain tip is before system start") }; - test_debug!( + debug!( "last_sort_height = {}, sort_rc = {}, next_sort_rc = {}, self.reward_cycle = {}, sort_tip.block_height = {}", last_sort_height, sort_rc, @@ -800,9 +801,9 @@ impl NakamotoDownloadStateMachine { sort_tip.block_height, ); - if sort_rc == next_sort_rc { - // not at a reward cycle boundary, os just extend self.wanted_tenures - test_debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); + if self.reward_cycle == sort_rc { + // not at a reward cycle boundary, so just extend self.wanted_tenures + debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); self.extend_wanted_tenures(network, sortdb)?; self.update_tenure_start_blocks(chainstate)?; return Ok(()); @@ -826,7 +827,7 @@ impl NakamotoDownloadStateMachine { invs.inventories.values(), ) } else { - test_debug!("No prev_wanted_tenures yet"); + debug!("No prev_wanted_tenures yet"); true }; if !can_advance_wanted_tenures { @@ -850,8 +851,8 @@ impl NakamotoDownloadStateMachine { &mut new_prev_wanted_tenures, )?; - test_debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); - test_debug!( + debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); + debug!( "new_prev_wanted_tenures is now {:?}", &new_prev_wanted_tenures ); @@ -889,7 +890,7 @@ impl NakamotoDownloadStateMachine { "Peer {} has no inventory for reward cycle {}", naddr, reward_cycle ); - test_debug!("Peer {} has the following inventory data: {:?}", naddr, inv); + debug!("Peer {} has the following inventory data: {:?}", naddr, inv); continue; }; for (i, wt) in wanted_tenures.iter().enumerate() { @@ -905,12 +906,9 @@ impl NakamotoDownloadStateMachine { let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !rc_inv.get(bit).unwrap_or(false) { // this neighbor does not have this tenure - test_debug!( + debug!( "Peer {} does not have sortition #{} in reward cycle {} (wt {:?})", - naddr, - bit, - reward_cycle, - &wt + naddr, bit, reward_cycle, &wt ); continue; } @@ -1045,7 +1043,7 @@ impl NakamotoDownloadStateMachine { } if Self::count_available_tenure_neighbors(&self.available_tenures) > 0 { // still have requests to try, so don't bother computing a new set of available tenures - test_debug!("Still have requests to try"); + debug!("Still have requests to try"); return; } if self.wanted_tenures.is_empty() { @@ -1054,7 +1052,7 @@ impl NakamotoDownloadStateMachine { } if inventories.is_empty() { // nothing to do - test_debug!("No inventories available"); + debug!("No inventories available"); return; } @@ -1064,7 +1062,7 @@ impl NakamotoDownloadStateMachine { .prev_wanted_tenures .as_ref() .map(|prev_wanted_tenures| { - test_debug!( + debug!( "Load availability for prev_wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.reward_cycle.saturating_sub(1) @@ -1089,7 +1087,7 @@ impl NakamotoDownloadStateMachine { .as_ref() .map(|prev_wanted_tenures| { // have both self.prev_wanted_tenures and self.wanted_tenures - test_debug!("Load tenure block IDs for prev_wanted_tenures ({}) and wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.wanted_tenures.len(), self.reward_cycle.saturating_sub(1)); + debug!("Load tenure block IDs for prev_wanted_tenures ({}) and wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.wanted_tenures.len(), self.reward_cycle.saturating_sub(1)); Self::find_tenure_block_ids( self.reward_cycle.saturating_sub(1), prev_wanted_tenures, @@ -1102,7 +1100,7 @@ impl NakamotoDownloadStateMachine { .unwrap_or(HashMap::new()); let mut tenure_block_ids = { - test_debug!( + debug!( "Load tenure block IDs for wanted_tenures ({}) at rc {}", self.wanted_tenures.len(), self.reward_cycle @@ -1171,9 +1169,9 @@ impl NakamotoDownloadStateMachine { prev_schedule }; - test_debug!("new schedule: {:?}", schedule); - test_debug!("new available: {:?}", &available); - test_debug!("new tenure_block_ids: {:?}", &tenure_block_ids); + debug!("new schedule: {:?}", schedule); + debug!("new available: {:?}", &available); + debug!("new tenure_block_ids: {:?}", &tenure_block_ids); self.tenure_download_schedule = schedule; self.tenure_block_ids = tenure_block_ids; @@ -1217,21 +1215,20 @@ impl NakamotoDownloadStateMachine { inventory_iter: impl Iterator, ) -> bool { if sort_tip.block_height < burnchain_height { - test_debug!( + debug!( "sort_tip {} < burn tip {}", - sort_tip.block_height, - burnchain_height + sort_tip.block_height, burnchain_height ); return false; } if wanted_tenures.is_empty() { - test_debug!("No wanted tenures"); + debug!("No wanted tenures"); return false; } if prev_wanted_tenures.is_empty() { - test_debug!("No prev wanted tenures"); + debug!("No prev wanted tenures"); return false; } @@ -1247,7 +1244,7 @@ impl NakamotoDownloadStateMachine { first_burn_height, inventory_iter, ) { - test_debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); + debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); return false; } @@ -1263,7 +1260,7 @@ impl NakamotoDownloadStateMachine { if is_available && !wt.processed { // a tenure is available but not yet processed, so we can't yet transition to // fetching unconfirmed tenures (we'd have no way to validate them). - test_debug!( + debug!( "Tenure {} is available but not yet processed", &wt.tenure_id_consensus_hash ); @@ -1331,7 +1328,7 @@ impl NakamotoDownloadStateMachine { highest_processed_block_id.clone(), ); - test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); + debug!("Request unconfirmed tenure state from neighbor {}", &naddr); downloaders.insert(naddr.clone(), unconfirmed_tenure_download); added += 1; false @@ -1390,7 +1387,7 @@ impl NakamotoDownloadStateMachine { HashMap>, HashMap, ) { - test_debug!("Run unconfirmed tenure downloaders"); + debug!("Run unconfirmed tenure downloaders"); let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); let mut finished = vec![]; @@ -1419,14 +1416,20 @@ impl NakamotoDownloadStateMachine { // send requests for (naddr, downloader) in downloaders.iter_mut() { if downloader.is_done() { + debug!( + "Downloader for {:?} is done (finished {})", + &downloader.unconfirmed_tenure_id(), + naddr + ); finished.push(naddr.clone()); continue; } if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", naddr); continue; } - test_debug!( + debug!( "Send request to {} for tenure {:?} (state {})", &naddr, &downloader.unconfirmed_tenure_id(), @@ -1455,11 +1458,11 @@ impl NakamotoDownloadStateMachine { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(downloader) = downloaders.get_mut(&naddr) else { - test_debug!("Got rogue response from {}", &naddr); + debug!("Got rogue response from {}", &naddr); continue; }; - test_debug!("Got response from {}", &naddr); + debug!("Got response from {}", &naddr); let blocks_opt = match downloader.handle_next_download_response( response, sortdb, @@ -1501,7 +1504,7 @@ impl NakamotoDownloadStateMachine { // don't start this unless the downloader is actually done (this should always be // the case, but don't tempt fate with an assert!) if downloader.is_done() { - test_debug!( + debug!( "Will fetch the highest complete tenure from {:?}", &downloader.unconfirmed_tenure_id() ); @@ -1510,9 +1513,7 @@ impl NakamotoDownloadStateMachine { } } } else { - test_debug!( - "Will not make highest-complete tenure downloader (not a Nakamoto tenure)" - ); + debug!("Will not make highest-complete tenure downloader (not a Nakamoto tenure)"); } unconfirmed_blocks.insert(naddr.clone(), blocks); @@ -1676,14 +1677,12 @@ impl NakamotoDownloadStateMachine { debug!("NakamotoDownloadStateMachine in state {}", &self.state); let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; - test_debug!( + debug!( "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", - burnchain_height, - network.burnchain_tip.block_height, - &self.state + burnchain_height, network.burnchain_tip.block_height, &self.state ); self.update_available_tenures( &invs.inventories, @@ -1704,7 +1703,7 @@ impl NakamotoDownloadStateMachine { // no longer mutably borrowed. let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; @@ -1759,7 +1758,7 @@ impl NakamotoDownloadStateMachine { // no longer mutably borrowed. let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; @@ -1824,7 +1823,7 @@ impl NakamotoDownloadStateMachine { ibd: bool, ) -> Result>, NetError> { self.nakamoto_tip = network.stacks_tip.block_id(); - test_debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); + debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); self.update_wanted_tenures(&network, sortdb, chainstate)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); From b2cb072658544d93dd71516f6c3390a27e20bac0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:28:25 -0400 Subject: [PATCH 077/910] chore: more debugging --- stackslib/src/net/download/nakamoto/tenure.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 5e2e06c41a9..21d06d1b2c8 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -172,11 +172,12 @@ impl TenureStartEnd { let mut tenure_block_ids = AvailableTenures::new(); let mut last_tenure = 0; let mut last_tenure_ch = None; + debug!("Find available tenures in inventory {:?} rc {}", invs, rc); for (i, wt) in wanted_tenures.iter().enumerate() { // advance to next tenure-start sortition let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); + debug!("i={} bit not set", i); continue; } @@ -187,12 +188,12 @@ impl TenureStartEnd { let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); invbits.get(bit).unwrap_or(false) }) else { - test_debug!("i={} out of wanted_tenures", i); + debug!("i={} out of wanted_tenures", i); break; }; let Some(wt_start) = wanted_tenures.get(wt_start_idx) else { - test_debug!("i={} no start wanted tenure", i); + debug!("i={} no start wanted tenure", i); break; }; @@ -200,12 +201,12 @@ impl TenureStartEnd { let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); invbits.get(bit).unwrap_or(false) }) else { - test_debug!("i={} out of wanted_tenures", i); + debug!("i={} out of wanted_tenures", i); break; }; let Some(wt_end) = wanted_tenures.get(wt_end_index) else { - test_debug!("i={} no end wanted tenure", i); + debug!("i={} no end wanted tenure", i); break; }; @@ -217,7 +218,7 @@ impl TenureStartEnd { rc, wt.processed, ); - test_debug!( + debug!( "i={}, len={}; {:?}", i, wanted_tenures.len(), @@ -229,7 +230,7 @@ impl TenureStartEnd { let Some(next_wanted_tenures) = next_wanted_tenures else { // nothing more to do - test_debug!("No next_wanted_tenures"); + debug!("No next_wanted_tenures"); return Some(tenure_block_ids); }; @@ -237,10 +238,9 @@ impl TenureStartEnd { // the last tenure derived from it if let Some(last_tenure_ch) = last_tenure_ch.take() { if let Some(last_tenure) = tenure_block_ids.get_mut(&last_tenure_ch) { - test_debug!( + debug!( "Will directly fetch end-block {} for tenure {}", - &last_tenure.end_block_id, - &last_tenure.tenure_id_consensus_hash + &last_tenure.end_block_id, &last_tenure.tenure_id_consensus_hash ); last_tenure.fetch_end_block = true; } @@ -248,7 +248,7 @@ impl TenureStartEnd { let Some(next_invbits) = invs.tenures_inv.get(&rc.saturating_add(1)) else { // nothing more to do - test_debug!("no inventory for cycle {}", rc.saturating_add(1)); + debug!("no inventory for cycle {}", rc.saturating_add(1)); return Some(tenure_block_ids); }; @@ -256,7 +256,7 @@ impl TenureStartEnd { let iter_start = last_tenure; let iterator = wanted_tenures.get(iter_start..).unwrap_or(&[]); for (i, wt) in iterator.iter().enumerate() { - test_debug!( + debug!( "consider next wanted tenure which starts with i={} {:?}", iter_start + i, &wt @@ -265,7 +265,7 @@ impl TenureStartEnd { // advance to next tenure-start sortition let bit = u16::try_from(i + iter_start).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); + debug!("i={} bit not set", i); continue; } @@ -295,7 +295,7 @@ impl TenureStartEnd { }) }) else { - test_debug!( + debug!( "i={} out of wanted_tenures and next_wanted_tenures", iter_start + i ); @@ -314,7 +314,7 @@ impl TenureStartEnd { None } }) else { - test_debug!("i={} out of next_wanted_tenures", iter_start + i); + debug!("i={} out of next_wanted_tenures", iter_start + i); break; }; @@ -330,7 +330,7 @@ impl TenureStartEnd { ); tenure_start_end.fetch_end_block = true; - test_debug!( + debug!( "i={},len={},next_len={}; {:?}", iter_start + i, wanted_tenures.len(), From 3b2b9f90351fed7e341a451dd537dcc018140761 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:28:42 -0400 Subject: [PATCH 078/910] chore: more debugging, and also, don't set the downloader to idle unless it processes the message completely --- .../download/nakamoto/tenure_downloader.rs | 59 +++++++++---------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c6e5ee07038..7197adf0b26 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -181,12 +181,9 @@ impl NakamotoTenureDownloader { start_signer_keys: RewardSet, end_signer_keys: RewardSet, ) -> Self { - test_debug!( + debug!( "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, - &tenure_id_consensus_hash, - &tenure_start_block_id, - &tenure_end_block_id, + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, ); Self { tenure_id_consensus_hash, @@ -270,7 +267,7 @@ impl NakamotoTenureDownloader { self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); } else if let Some(tenure_end_block) = self.tenure_end_block.take() { // we already have the tenure-end block, so immediately proceed to accept it. - test_debug!( + debug!( "Preemptively process tenure-end block {} for tenure {}", tenure_end_block.block_id(), &self.tenure_id_consensus_hash @@ -312,10 +309,9 @@ impl NakamotoTenureDownloader { else { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, - &end_block_id + &self.naddr, &end_block_id ); self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); Ok(()) @@ -327,10 +323,9 @@ impl NakamotoTenureDownloader { self.state { if wait_deadline < Instant::now() { - test_debug!( + debug!( "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, - &end_block_id + &self.naddr, &end_block_id ); self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); } @@ -530,11 +525,9 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, - &block_cursor, - count + &self.tenure_id_consensus_hash, &block_cursor, count ); if earliest_block.block_id() != tenure_start_block.block_id() { // still have more blocks to download @@ -572,24 +565,23 @@ impl NakamotoTenureDownloader { ) -> Result, ()> { let request = match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - test_debug!("Request tenure-start block {}", &start_block_id); + debug!("Request tenure-start block {}", &start_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { // we're waiting for some other downloader's block-fetch to complete - test_debug!( + debug!( "Waiting for tenure-end block {} until {:?}", - &_block_id, - _deadline + &_block_id, _deadline ); return Ok(None); } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - test_debug!("Request tenure-end block {}", &end_block_id); + debug!("Request tenure-end block {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - test_debug!("Downloading tenure ending at {}", &end_block_id); + debug!("Downloading tenure ending at {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -613,7 +605,7 @@ impl NakamotoTenureDownloader { neighbor_rpc: &mut NeighborRPC, ) -> Result { if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); + debug!("Peer {} has an inflight request", &self.naddr); return Ok(true); } if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { @@ -651,10 +643,9 @@ impl NakamotoTenureDownloader { &mut self, response: StacksHttpResponse, ) -> Result>, NetError> { - self.idle = true; match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - test_debug!( + debug!( "Got download response for tenure-start block {}", &_block_id ); @@ -663,23 +654,26 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_start_block(block)?; + self.idle = true; Ok(None) } NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - test_debug!("Invalid state -- Got download response for WaitForTenureBlock"); + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + self.idle = true; Err(NetError::InvalidState) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - test_debug!("Got download response to tenure-end block {}", &_block_id); + debug!("Got download response to tenure-end block {}", &_block_id); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e })?; self.try_accept_tenure_end_block(&block)?; + self.idle = true; Ok(None) } NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - test_debug!( + debug!( "Got download response for tenure blocks ending at {}", &_end_block_id ); @@ -687,9 +681,14 @@ impl NakamotoTenureDownloader { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); e })?; - self.try_accept_tenure_blocks(blocks) + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + self.idle = true; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => { + self.idle = true; + Err(NetError::InvalidState) } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), } } From 1c9a7050587c1453576a400b9e18ec48e4d3d1cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:09 -0400 Subject: [PATCH 079/910] chore: more debugging --- .../nakamoto/tenure_downloader_set.rs | 95 +++++++++---------- 1 file changed, 44 insertions(+), 51 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 8a154637cf4..337c8d1cd6f 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -98,10 +98,9 @@ impl NakamotoTenureDownloaderSet { /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - test_debug!( + debug!( "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, - &naddr + &downloader.tenure_id_consensus_hash, &naddr ); if let Some(idx) = self.peers.get(&naddr) { self.downloaders[*idx] = Some(downloader); @@ -155,7 +154,7 @@ impl NakamotoTenureDownloaderSet { ) { for (naddr, downloader) in iter { if self.has_downloader(&naddr) { - test_debug!("Already have downloader for {}", &naddr); + debug!("Already have downloader for {}", &naddr); continue; } self.add_downloader(naddr, downloader); @@ -202,7 +201,7 @@ impl NakamotoTenureDownloaderSet { if downloader.is_done() { continue; } - test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); return false; } true @@ -220,10 +219,9 @@ impl NakamotoTenureDownloaderSet { return false; }; - test_debug!( + debug!( "Peer {} already bound to downloader for {}", - &naddr, - &_downloader.tenure_id_consensus_hash + &naddr, &_downloader.tenure_id_consensus_hash ); return true; } @@ -240,11 +238,9 @@ impl NakamotoTenureDownloaderSet { if downloader.naddr != naddr { continue; } - test_debug!( + debug!( "Assign peer {} to work on downloader for {} in state {}", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); self.peers.insert(naddr, i); return true; @@ -263,15 +259,14 @@ impl NakamotoTenureDownloaderSet { continue; }; let Some(downloader) = downloader_opt else { - test_debug!("Remove peer {} for null download {}", &naddr, i); + debug!("Remove peer {} for null download {}", &naddr, i); idled.push(naddr.clone()); continue; }; if downloader.idle || downloader.is_waiting() { - test_debug!( + debug!( "Remove idled peer {} for tenure download {}", - &naddr, - &downloader.tenure_id_consensus_hash + &naddr, &downloader.tenure_id_consensus_hash ); idled.push(naddr.clone()); } @@ -317,7 +312,7 @@ impl NakamotoTenureDownloaderSet { &mut self, tenure_start_blocks: &HashMap, ) -> Vec { - test_debug!( + debug!( "handle tenure-end blocks: {:?}", &tenure_start_blocks.keys().collect::>() ); @@ -354,7 +349,7 @@ impl NakamotoTenureDownloaderSet { continue; }; if &downloader.tenure_id_consensus_hash == tenure_id { - test_debug!( + debug!( "Have downloader for tenure {} already (idle={}, waiting={}, state={})", tenure_id, downloader.idle, @@ -407,7 +402,7 @@ impl NakamotoTenureDownloaderSet { if !last_available_tenures.contains(&downloader.tenure_end_block_id) { continue; } - test_debug!( + debug!( "Transition downloader for {} from waiting to fetching", &downloader.tenure_id_consensus_hash ); @@ -431,11 +426,11 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - test_debug!("schedule: {:?}", schedule); - test_debug!("available: {:?}", &available); - test_debug!("tenure_block_ids: {:?}", &tenure_block_ids); - test_debug!("inflight: {}", self.inflight()); - test_debug!( + debug!("schedule: {:?}", schedule); + debug!("available: {:?}", &available); + debug!("tenure_block_ids: {:?}", &tenure_block_ids); + debug!("inflight: {}", self.inflight()); + debug!( "count: {}, running: {}, scheduled: {}", count, self.num_downloaders(), @@ -450,24 +445,24 @@ impl NakamotoTenureDownloaderSet { break; }; if self.completed_tenures.contains(&ch) { - test_debug!("Already successfully downloaded tenure {}", &ch); + debug!("Already successfully downloaded tenure {}", &ch); schedule.pop_front(); continue; } let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure - test_debug!("No neighbors have tenure {}", ch); + debug!("No neighbors have tenure {}", ch); schedule.pop_front(); continue; }; if neighbors.is_empty() { // no more neighbors to try - test_debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {}", ch); schedule.pop_front(); continue; } let Some(naddr) = neighbors.pop() else { - test_debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {}", ch); schedule.pop_front(); continue; }; @@ -481,20 +476,20 @@ impl NakamotoTenureDownloaderSet { let Some(available_tenures) = tenure_block_ids.get(&naddr) else { // this peer doesn't have any known tenures, so try the others - test_debug!("No tenures available from {}", &naddr); + debug!("No tenures available from {}", &naddr); continue; }; let Some(tenure_info) = available_tenures.get(ch) else { // this peer does not have a tenure start/end block for this tenure, so try the // others. - test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { - test_debug!( + debug!( "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", tenure_info.start_reward_cycle, &tenure_info @@ -506,7 +501,7 @@ impl NakamotoTenureDownloaderSet { .get(&tenure_info.end_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { - test_debug!( + debug!( "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", tenure_info.end_reward_cycle, &tenure_info @@ -515,7 +510,7 @@ impl NakamotoTenureDownloaderSet { continue; }; - test_debug!( + debug!( "Download tenure {} (start={}, end={}) (rc {},{})", &ch, &tenure_info.start_block_id, @@ -532,7 +527,7 @@ impl NakamotoTenureDownloaderSet { end_reward_set.clone(), ); - test_debug!("Request tenure {} from neighbor {}", ch, &naddr); + debug!("Request tenure {} from neighbor {}", ch, &naddr); self.add_downloader(naddr, tenure_download); schedule.pop_front(); } @@ -561,27 +556,25 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { if neighbor_rpc.has_inflight(&naddr) { - test_debug!("Peer {} has an inflight request", &naddr); + debug!("Peer {} has an inflight request", &naddr); continue; } let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; if downloader.is_done() { - test_debug!("Downloader for {} is done", &naddr); + debug!("Downloader for {} is done", &naddr); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; } - test_debug!( + debug!( "Send request to {} for tenure {} (state {})", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - test_debug!("Downloader for {} failed; this peer is dead", &naddr); + debug!("Downloader for {} failed; this peer is dead", &naddr); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -595,12 +588,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {}", &naddr); self.clear_downloader(&naddr); } } for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {}", &done_naddr); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { @@ -610,14 +603,14 @@ impl NakamotoTenureDownloaderSet { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(index) = self.peers.get(&naddr) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; - test_debug!("Got response from {}", &naddr); + debug!("Got response from {}", &naddr); let Ok(blocks_opt) = downloader .handle_next_download_response(response) @@ -626,7 +619,7 @@ impl NakamotoTenureDownloaderSet { e }) else { - test_debug!("Failed to handle download response from {}", &naddr); + debug!("Failed to handle download response from {}", &naddr); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -635,7 +628,7 @@ impl NakamotoTenureDownloaderSet { continue; }; - test_debug!( + debug!( "Got {} blocks for tenure {}", blocks.len(), &downloader.tenure_id_consensus_hash @@ -651,12 +644,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {}", &naddr); self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {}", &done_naddr); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { From bd918aab999445633587b7bdc4e0eaf33ef73b74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:21 -0400 Subject: [PATCH 080/910] fix: a peer isn't broken if it sends us data when we expected a handshake first --- stackslib/src/net/inv/epoch2x.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index b3092d8f121..fc5f073b2e9 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -609,7 +609,9 @@ impl NeighborBlockStats { let mut broken = false; let mut stale = false; - if nack_data.error_code == NackErrorCodes::Throttled { + if nack_data.error_code == NackErrorCodes::Throttled + || nack_data.error_code == NackErrorCodes::HandshakeRequired + { // TODO: do something smarter here, like just back off return NodeStatus::Dead; } else if nack_data.error_code == NackErrorCodes::NoSuchBurnchainBlock { @@ -2125,6 +2127,7 @@ impl PeerNetwork { break; } + debug!("Inv sync state is {:?}", &stats.state); let again = match stats.state { InvWorkState::GetPoxInvBegin => self .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout) From 6f7db31987ea43cdc0718aa527bd4df046bbc521 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:55 -0400 Subject: [PATCH 081/910] fix: try to fetch the reward cycle _after_ our highest one, so the downloader has it on a reward cycle boundary. Don't treat it as an error if the node doesn't have it yet (which it won't, most of the time, except on reward cycle boundaries) --- stackslib/src/net/inv/nakamoto.rs | 63 ++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 491d0bcaca6..5b09ace3968 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -30,8 +30,8 @@ use crate::net::db::PeerDB; use crate::net::neighbors::comms::PeerNetworkComms; use crate::net::p2p::PeerNetwork; use crate::net::{ - Error as NetError, GetNakamotoInvData, NakamotoInvData, NeighborAddress, NeighborComms, - NeighborKey, StacksMessage, StacksMessageType, + Error as NetError, GetNakamotoInvData, NackErrorCodes, NakamotoInvData, NeighborAddress, + NeighborComms, NeighborKey, StacksMessage, StacksMessageType, }; use crate::util_lib::db::Error as DBError; @@ -86,14 +86,14 @@ impl InvTenureInfo { tenure_id_consensus_hash, )? .map(|tenure| { - test_debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); + debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); Self { tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, } }) .or_else(|| { - test_debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); + debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); None })) } @@ -224,12 +224,16 @@ impl InvGenerator { }; let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash.clone(); - test_debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); + debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... if cur_tenure_info.tenure_id_consensus_hash == cur_consensus_hash { // ...and this tenure started in this sortition + debug!( + "Tenure was started for {} (height {})", + cur_consensus_hash, cur_height + ); tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -238,11 +242,19 @@ impl InvGenerator { )?; } else { // ...but this tenure did not start in this sortition + debug!( + "Tenure was NOT started for {} (bit {})", + cur_consensus_hash, cur_height + ); tenure_status.push(false); } } else { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. + debug!( + "No winning sortition for {} (bit {})", + cur_consensus_hash, cur_height + ); tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -260,6 +272,10 @@ impl InvGenerator { } tenure_status.reverse(); + debug!( + "Tenure bits off of {} and {}: {:?}", + nakamoto_tip, &tip.consensus_hash, &tenure_status + ); Ok(tenure_status) } } @@ -370,7 +386,7 @@ impl NakamotoTenureInv { /// Adjust the next reward cycle to query. /// Returns the reward cycle to query. pub fn next_reward_cycle(&mut self) -> u64 { - test_debug!("Next reward cycle: {}", self.cur_reward_cycle + 1); + debug!("Next reward cycle: {}", self.cur_reward_cycle + 1); let query_rc = self.cur_reward_cycle; self.cur_reward_cycle = self.cur_reward_cycle.saturating_add(1); query_rc @@ -383,7 +399,7 @@ impl NakamotoTenureInv { if self.start_sync_time + inv_sync_interval <= now && (self.cur_reward_cycle >= cur_rc || !self.online) { - test_debug!("Reset inv comms for {}", &self.neighbor_address); + debug!("Reset inv comms for {}", &self.neighbor_address); self.online = true; self.start_sync_time = now; self.cur_reward_cycle = start_rc; @@ -473,7 +489,11 @@ impl NakamotoTenureInv { StacksMessageType::Nack(nack_data) => { info!("{:?}: remote peer NACKed our GetNakamotoInv", network.get_local_peer(); "error_code" => nack_data.error_code); - self.set_online(false); + + if nack_data.error_code != NackErrorCodes::NoSuchBurnchainBlock { + // any other error besides this one is a problem + self.set_online(false); + } return Ok(false); } _ => { @@ -557,7 +577,7 @@ impl NakamotoInvStateMachine { let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), tip, sortdb); if reorg { // drop the last two reward cycles - test_debug!("Detected reorg! Refreshing inventory consensus hashes"); + debug!("Detected reorg! Refreshing inventory consensus hashes"); let highest_rc = self .reward_cycle_consensus_hashes .last_key_value() @@ -585,10 +605,9 @@ impl NakamotoInvStateMachine { ) .expect("FATAL: snapshot occurred before system start"); - test_debug!( + debug!( "Load all reward cycle consensus hashes from {} to {}", - highest_rc, - tip_rc + highest_rc, tip_rc ); for rc in highest_rc..=tip_rc { if self.reward_cycle_consensus_hashes.contains_key(&rc) { @@ -599,7 +618,7 @@ impl NakamotoInvStateMachine { warn!("Failed to load consensus hash for reward cycle {}", rc); return Err(DBError::NotFoundError.into()); }; - test_debug!("Inv reward cycle consensus hash for {} is {}", rc, &ch); + debug!("Inv reward cycle consensus hash for {} is {}", rc, &ch); self.reward_cycle_consensus_hashes.insert(rc, ch); } Ok(tip_rc) @@ -628,6 +647,7 @@ impl NakamotoInvStateMachine { // make sure we know all consensus hashes for all reward cycles. let current_reward_cycle = self.update_reward_cycle_consensus_hashes(&network.burnchain_tip, sortdb)?; + let nakamoto_start_height = network .get_epoch_by_epoch_id(StacksEpochId::Epoch30) .start_height; @@ -639,6 +659,12 @@ impl NakamotoInvStateMachine { // we're updating inventories, so preserve the state we have let mut new_inventories = HashMap::new(); let event_ids: Vec = network.iter_peer_event_ids().map(|e_id| *e_id).collect(); + + debug!( + "Send GetNakamotoInv to up to {} peers (ibd={})", + event_ids.len(), + ibd + ); for event_id in event_ids.into_iter() { let Some(convo) = network.get_p2p_convo(event_id) else { continue; @@ -677,12 +703,15 @@ impl NakamotoInvStateMachine { ) }); - let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle); + // try to get all of the reward cycles we know about, plus the next one. We try to get + // the next one as well in case we're at a reward cycle boundary, but we're not at the + // chain tip -- the block downloader still needs that next inventory to proceed. + let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle.saturating_add(1)); let inv_rc = inv.reward_cycle(); new_inventories.insert(naddr.clone(), inv); if self.comms.has_inflight(&naddr) { - test_debug!( + debug!( "{:?}: still waiting for reply from {}", network.get_local_peer(), &naddr @@ -732,7 +761,7 @@ impl NakamotoInvStateMachine { let num_msgs = replies.len(); for (naddr, reply) in replies.into_iter() { - test_debug!( + debug!( "{:?}: got reply from {}: {:?}", network.get_local_peer(), &naddr, @@ -833,7 +862,7 @@ impl PeerNetwork { /// Return whether or not we learned something pub fn do_network_inv_sync_nakamoto(&mut self, sortdb: &SortitionDB, ibd: bool) -> bool { if cfg!(test) && self.connection_opts.disable_inv_sync { - test_debug!("{:?}: inv sync is disabled", &self.local_peer); + debug!("{:?}: inv sync is disabled", &self.local_peer); return false; } From 4df8d6d869c5a2a70bc9394a75ad01ea3a85d807 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:17 -0400 Subject: [PATCH 082/910] chore: more debug output --- .../nakamoto/tenure_downloader_unconfirmed.rs | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index d51e99d5a10..c96f718d2b9 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -194,8 +194,8 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); } - test_debug!("Got tenure info {:?}", remote_tenure_tip); - test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); // authenticate consensus hashes against canonical chain history let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( @@ -381,10 +381,9 @@ impl NakamotoUnconfirmedTenureDownloader { ); } - test_debug!( + debug!( "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, - tenure_rc + parent_tenure_rc, tenure_rc ); self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); @@ -547,7 +546,7 @@ impl NakamotoUnconfirmedTenureDownloader { break; } - test_debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); // NOTE: this field can get updated by the downloader while this state-machine is in // this state. @@ -597,7 +596,7 @@ impl NakamotoUnconfirmedTenureDownloader { let highest_processed_block_height = *self.highest_processed_block_height.as_ref().unwrap_or(&0); - test_debug!("Finished receiving unconfirmed tenure"); + debug!("Finished receiving unconfirmed tenure"); return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { blocks .into_iter() @@ -621,7 +620,7 @@ impl NakamotoUnconfirmedTenureDownloader { }; let next_block_id = earliest_block.header.parent_block_id.clone(); - test_debug!( + debug!( "Will resume fetching unconfirmed tenure blocks starting at {}", &next_block_id ); @@ -729,10 +728,9 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, - &self.naddr, + &tenure_tip.parent_consensus_hash, &self.naddr, ); let ntd = NakamotoTenureDownloader::new( tenure_tip.parent_consensus_hash.clone(), @@ -790,7 +788,7 @@ impl NakamotoUnconfirmedTenureDownloader { neighbor_rpc: &mut NeighborRPC, ) -> Result<(), NetError> { if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); + debug!("Peer {} has an inflight request", &self.naddr); return Ok(()); } if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { @@ -831,9 +829,9 @@ impl NakamotoUnconfirmedTenureDownloader { ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { - test_debug!("Got tenure-info response"); + debug!("Got tenure-info response"); let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - test_debug!("Got tenure-info response: {:?}", &remote_tenure_info); + debug!("Got tenure-info response: {:?}", &remote_tenure_info); self.try_accept_tenure_info( sortdb, local_sort_tip, @@ -844,16 +842,16 @@ impl NakamotoUnconfirmedTenureDownloader { Ok(None) } NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - test_debug!("Got tenure start-block response"); + debug!("Got tenure start-block response"); let block = response.decode_nakamoto_block()?; self.try_accept_unconfirmed_tenure_start_block(block)?; Ok(None) } NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - test_debug!("Got unconfirmed tenure blocks response"); + debug!("Got unconfirmed tenure blocks response"); let blocks = response.decode_nakamoto_tenure()?; let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - test_debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); Ok(accepted_opt) } NakamotoUnconfirmedDownloadState::Done => { From 1bbdacf73a09436ddb633c0d68e9edcca6310f71 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:45 -0400 Subject: [PATCH 083/910] chore: more debug output --- stackslib/src/net/neighbors/comms.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 31c62a1f8f0..8fdf38d87b3 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -466,12 +466,19 @@ impl PeerNetworkComms { Ok(None) => { if let Some(rh) = req_opt { // keep trying + debug!("{:?}: keep polling {}", network.get_local_peer(), naddr); inflight.insert(naddr, rh); } continue; } Err(_e) => { // peer was already marked as dead in the given network set + debug!( + "{:?}: peer {} is dead: {:?}", + network.get_local_peer(), + naddr, + &_e + ); continue; } }; From d0d90360e6082fd53e2103a85dcef15ed8b7071c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:57 -0400 Subject: [PATCH 084/910] chore: more debug output --- stackslib/src/net/neighbors/rpc.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs index c75074222d8..9b0d2a1bdd8 100644 --- a/stackslib/src/net/neighbors/rpc.rs +++ b/stackslib/src/net/neighbors/rpc.rs @@ -109,16 +109,22 @@ impl NeighborRPC { Ok(Some(response)) => response, Ok(None) => { // keep trying + debug!("Still waiting for next reply from {}", &naddr); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(NetError::WaitingForDNS) => { // keep trying + debug!( + "Could not yet poll next reply from {}: waiting for DNS", + &naddr + ); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(_e) => { // declare this neighbor as dead by default + debug!("Failed to poll next reply from {}: {:?}", &naddr, &_e); dead.push(naddr); continue; } @@ -201,6 +207,10 @@ impl NeighborRPC { }) })?; + debug!( + "Send request to {} on event {}: {:?}", + &naddr, event_id, &request + ); self.state.insert(naddr, (event_id, Some(request))); Ok(()) } From 2ec2bc0d3fb4d1c928cde698a696a65542cf0f67 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:16 -0400 Subject: [PATCH 085/910] chore: more debug output --- stackslib/src/net/p2p.rs | 127 ++++++++++++++++++++------------------- 1 file changed, 65 insertions(+), 62 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 861a6e6cfab..4a52945521f 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -659,11 +659,9 @@ impl PeerNetwork { let (p2p_handle, bound_p2p_addr) = net.bind(my_addr)?; let (http_handle, bound_http_addr) = net.bind(http_addr)?; - test_debug!( + debug!( "{:?}: bound on p2p {:?}, http {:?}", - &self.local_peer, - bound_p2p_addr, - bound_http_addr + &self.local_peer, bound_p2p_addr, bound_http_addr ); self.network = Some(net); @@ -913,6 +911,12 @@ impl PeerNetwork { return Err(e); } Ok(sz) => { + if sz > 0 { + debug!( + "Sent {} bytes on p2p socket {:?} for conversation {:?}", + sz, client_sock, convo + ); + } total_sent += sz; if sz == 0 { break; @@ -1202,7 +1206,7 @@ impl PeerNetwork { let next_event_id = match self.network { None => { - test_debug!("{:?}: network not connected", &self.local_peer); + debug!("{:?}: network not connected", &self.local_peer); return Err(net_error::NotConnected); } Some(ref mut network) => { @@ -1510,7 +1514,7 @@ impl PeerNetwork { (convo.to_neighbor_key(), Some(neighbor)) } None => { - test_debug!( + debug!( "No such neighbor in peer DB, but will ban nevertheless: {:?}", convo.to_neighbor_key() ); @@ -1674,11 +1678,9 @@ impl PeerNetwork { // already connected? if let Some(event_id) = self.get_event_id(&neighbor_key) { - test_debug!( + debug!( "{:?}: already connected to {:?} on event {}", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return Err(net_error::AlreadyConnected(event_id, neighbor_key.clone())); } @@ -1956,7 +1958,7 @@ impl PeerNetwork { match self.events.get(&peer_key) { None => { // not connected - test_debug!("Could not sign for peer {:?}: not connected", peer_key); + debug!("Could not sign for peer {:?}: not connected", peer_key); Err(net_error::PeerNotConnected) } Some(event_id) => self.sign_for_p2p(*event_id, message_payload), @@ -1976,7 +1978,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -1997,7 +1999,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -2071,7 +2073,7 @@ impl PeerNetwork { match (self.peers.remove(&event_id), self.sockets.remove(&event_id)) { (Some(convo), Some(sock)) => (convo, sock), (Some(convo), None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); self.peers.insert(event_id, convo); return Err(net_error::PeerNotConnected); } @@ -2084,7 +2086,7 @@ impl PeerNetwork { return Err(net_error::PeerNotConnected); } (None, None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); return Err(net_error::PeerNotConnected); } }; @@ -2213,7 +2215,7 @@ impl PeerNetwork { ) { Ok((convo_unhandled, alive)) => (convo_unhandled, alive), Err(_e) => { - test_debug!( + debug!( "{:?}: Connection to {:?} failed: {:?}", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2225,7 +2227,7 @@ impl PeerNetwork { }; if !alive { - test_debug!( + debug!( "{:?}: Connection to {:?} is no longer alive", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2412,11 +2414,9 @@ impl PeerNetwork { } }; if neighbor.allowed < 0 || (neighbor.allowed as u64) > now { - test_debug!( + debug!( "{:?}: event {} is allowed: {:?}", - &self.local_peer, - event_id, - &nk + &self.local_peer, event_id, &nk ); safe.insert(*event_id); } @@ -2503,17 +2503,19 @@ impl PeerNetwork { let mut relay_handles = std::mem::replace(&mut self.relay_handles, HashMap::new()); for (event_id, handle_list) in relay_handles.iter_mut() { if handle_list.len() == 0 { + debug!("No handles for event {}", event_id); drained.push(*event_id); continue; } - test_debug!( + debug!( "Flush {} relay handles to event {}", handle_list.len(), event_id ); while handle_list.len() > 0 { + debug!("Flush {} relay handles", handle_list.len()); let res = self.with_p2p_convo(*event_id, |_network, convo, client_sock| { if let Some(handle) = handle_list.front_mut() { let (num_sent, flushed) = @@ -2525,12 +2527,9 @@ impl PeerNetwork { } }; - test_debug!( + debug!( "Flushed relay handle to {:?} ({:?}): sent={}, flushed={}", - client_sock, - convo, - num_sent, - flushed + client_sock, convo, num_sent, flushed ); return Ok((num_sent, flushed)); } @@ -2541,6 +2540,7 @@ impl PeerNetwork { Ok(Ok(x)) => x, Ok(Err(_)) | Err(_) => { // connection broken; next list + debug!("Relay handle broken to event {}", event_id); broken.push(*event_id); break; } @@ -2548,7 +2548,7 @@ impl PeerNetwork { if !flushed && num_sent == 0 { // blocked on this peer's socket - test_debug!("Relay handle to event {} is blocked", event_id); + debug!("Relay handle to event {} is blocked", event_id); break; } @@ -2582,7 +2582,7 @@ impl PeerNetwork { /// Return true if we finish, and true if we're throttled fn do_network_neighbor_walk(&mut self, ibd: bool) -> bool { if cfg!(test) && self.connection_opts.disable_neighbor_walk { - test_debug!("neighbor walk is disabled"); + debug!("neighbor walk is disabled"); return true; } @@ -2780,7 +2780,7 @@ impl PeerNetwork { fn need_public_ip(&mut self) -> bool { if !self.public_ip_learned { // IP was given, not learned. nothing to do - test_debug!("{:?}: IP address was given to us", &self.local_peer); + debug!("{:?}: IP address was given to us", &self.local_peer); return false; } if self.local_peer.public_ip_address.is_some() @@ -2788,7 +2788,7 @@ impl PeerNetwork { >= get_epoch_time_secs() { // still fresh - test_debug!("{:?}: learned IP address is still fresh", &self.local_peer); + debug!("{:?}: learned IP address is still fresh", &self.local_peer); return false; } let throttle_timeout = if self.local_peer.public_ip_address.is_none() { @@ -2851,7 +2851,7 @@ impl PeerNetwork { match self.do_learn_public_ip() { Ok(b) => { if !b { - test_debug!("{:?}: try do_learn_public_ip again", &self.local_peer); + debug!("{:?}: try do_learn_public_ip again", &self.local_peer); return false; } } @@ -2938,7 +2938,7 @@ impl PeerNetwork { for (_, block, _) in network_result.blocks.iter() { if block_set.contains(&block.block_hash()) { - test_debug!("Duplicate block {}", block.block_hash()); + debug!("Duplicate block {}", block.block_hash()); } block_set.insert(block.block_hash()); } @@ -2946,7 +2946,7 @@ impl PeerNetwork { for (_, mblocks, _) in network_result.confirmed_microblocks.iter() { for mblock in mblocks.iter() { if microblock_set.contains(&mblock.block_hash()) { - test_debug!("Duplicate microblock {}", mblock.block_hash()); + debug!("Duplicate microblock {}", mblock.block_hash()); } microblock_set.insert(mblock.block_hash()); } @@ -3760,7 +3760,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -3866,7 +3866,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -3915,7 +3915,11 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!("No such neighbor event={}", event_id); + debug!( + "{:?}: No such neighbor event={}", + self.get_local_peer(), + event_id + ); return None; } }; @@ -3924,10 +3928,9 @@ impl PeerNetwork { let reciprocal_event_id = match self.find_reciprocal_event(event_id) { Some(re) => re, None => { - test_debug!( + debug!( "{:?}: no reciprocal conversation for {:?}", - &self.local_peer, - &neighbor_key + &self.local_peer, &neighbor_key ); return None; } @@ -3941,32 +3944,26 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!( + debug!( "{:?}: No reciprocal conversation for {} (event={})", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return None; } }; if !is_authenticated && !reciprocal_is_authenticated { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not authenticated", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } if !is_outbound && !reciprocal_is_outbound { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not outbound", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } @@ -3994,7 +3991,7 @@ impl PeerNetwork { /// for. Add them to our network pingbacks fn schedule_network_pingbacks(&mut self, event_ids: Vec) { if cfg!(test) && self.connection_opts.disable_pingbacks { - test_debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); + debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); return; } @@ -4076,7 +4073,7 @@ impl PeerNetwork { } } - test_debug!( + debug!( "{:?}: have {} pingbacks scheduled", &self.local_peer, self.walk_pingbacks.len() @@ -4247,7 +4244,7 @@ impl PeerNetwork { .as_stacks_nakamoto() .is_some(), }; - test_debug!( + debug!( "{:?}: Parent Stacks tip off of {} is {:?}", self.get_local_peer(), &stacks_tip_block_id, @@ -4261,7 +4258,7 @@ impl PeerNetwork { if self.current_reward_sets.len() > 3 { self.current_reward_sets.retain(|old_rc, _| { if (*old_rc).saturating_add(2) < rc { - test_debug!("Drop reward cycle info for cycle {}", old_rc); + debug!("Drop reward cycle info for cycle {}", old_rc); return false; } true @@ -4343,10 +4340,9 @@ impl PeerNetwork { anchor_block_hash: anchor_block_header.anchored_header.block_hash(), }; - test_debug!( + debug!( "Store cached reward set for reward cycle {} anchor block {}", - rc, - &rc_info.anchor_block_hash + rc, &rc_info.anchor_block_hash ); self.current_reward_sets.insert(rc, rc_info); } @@ -4469,6 +4465,13 @@ impl PeerNetwork { }; // update cached burnchain view for /v2/info + debug!( + "{:?}: chain view for burn block {} has stacks tip consensus {}", + &self.local_peer, + new_chain_view.burn_block_height, + &new_chain_view.rc_consensus_hash + ); + self.chain_view = new_chain_view; self.chain_view_stable_consensus_hash = new_chain_view_stable_consensus_hash; } @@ -4538,7 +4541,7 @@ impl PeerNetwork { .get_last_selected_anchor_block_txid()? .unwrap_or(Txid([0x00; 32])); - test_debug!( + debug!( "{:?}: chain view is {:?}", &self.get_local_peer(), &self.chain_view @@ -4588,12 +4591,12 @@ impl PeerNetwork { }; self.parent_stacks_tip = parent_stacks_tip; - test_debug!( + debug!( "{:?}: canonical Stacks tip is now {:?}", self.get_local_peer(), &self.stacks_tip ); - test_debug!( + debug!( "{:?}: parent canonical Stacks tip is now {:?}", self.get_local_peer(), &self.parent_stacks_tip From 12a2f48ed2c1dfb28ece0efad0d87a5d6112d203 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:28 -0400 Subject: [PATCH 086/910] chore: more debug output --- stackslib/src/net/stackerdb/db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index d95d3ebbdb6..1dab3f40523 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -324,6 +324,8 @@ impl<'a> StackerDBTx<'a> { } } + debug!("Reset slot {} of {}", slot_id, smart_contract); + // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; From 0ec2cb53bccca34f295660e67d93b467d2617de1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:47 -0400 Subject: [PATCH 087/910] fix: if there are no peers, then immediately reset stackerdb sync --- stackslib/src/net/stackerdb/sync.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 8444ed5e551..85e76ea5242 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -639,9 +639,10 @@ impl StackerDBSync { self.replicas = replicas; } debug!( - "{:?}: connect_begin: establish StackerDB sessions to {} neighbors", + "{:?}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", network.get_local_peer(), - self.replicas.len() + self.replicas.len(), + network.get_num_p2p_convos() ); if self.replicas.len() == 0 { // nothing to do @@ -1227,8 +1228,11 @@ impl StackerDBSync { let done = self.connect_begin(network)?; if done { self.state = StackerDBSyncState::ConnectFinish; - blocked = false; + } else { + // no replicas; try again + self.state = StackerDBSyncState::Finished; } + blocked = false; } StackerDBSyncState::ConnectFinish => { let done = self.connect_try_finish(network)?; @@ -1276,6 +1280,11 @@ impl StackerDBSync { { // someone pushed newer chunk data to us, and getting chunks is // enabled, so immediately go request them + debug!( + "{:?}: immediately retry StackerDB GetChunks on {} due to PushChunk NACK", + network.get_local_peer(), + &self.smart_contract_id + ); self.recalculate_chunk_request_schedule(network)?; self.state = StackerDBSyncState::GetChunks; } else { From 58d880e58fa01da4ab4488783ee975b9ede3810e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:49:07 -0400 Subject: [PATCH 088/910] chore: API sync --- stackslib/src/net/tests/download/nakamoto.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 9de9fb087bf..5937f433840 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1480,7 +1480,6 @@ fn test_make_tenure_downloaders() { NakamotoDownloadStateMachine::load_tenure_start_blocks( &wanted_tenures, chainstate, - &nakamoto_tip, &mut tenure_start_blocks, ) .unwrap(); From 491877a221d1900f5aa1fec082a9a57085a97188 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:49:22 -0400 Subject: [PATCH 089/910] fix: poll every second --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index bdf3bd4c3dc..d03bd422b29 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1334,7 +1334,7 @@ impl Config { /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { let poll_timeout = if self.node.miner { - cmp::min(5000, self.miner.first_attempt_time_ms / 2) + cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 5000 }; From 98b25fd5965a9800615d87f5602912acfe9bc7af Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:51:02 -0400 Subject: [PATCH 090/910] feat: store block signatures as we get them from other signers, and provide a menas of loading them back --- stacks-signer/src/signerdb.rs | 240 ++++++++++++++++++++++++++++++++-- 1 file changed, 228 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 74cefbc44b5..7266e6eb75d 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -17,9 +17,10 @@ use std::path::Path; use std::time::SystemTime; -use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::util_lib::db::{ - query_row, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, + query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, + Error as DBError, }; use clarity::types::chainstate::BurnchainHeaderHash; use clarity::util::get_epoch_time_secs; @@ -29,11 +30,42 @@ use rusqlite::{ }; use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; +use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error}; use wsts::net::NonceRequest; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +/// A vote across the signer set for a block +pub struct NakamotoBlockVote { + /// Signer signature hash (i.e. block hash) of the Nakamoto block + pub signer_signature_hash: Sha512Trunc256Sum, + /// Whether or not the block was rejected + pub rejected: bool, +} + +impl StacksMessageCodec for NakamotoBlockVote { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + if self.rejected { + write_next(fd, &1u8)?; + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next(fd)?; + let rejected_byte: Option = read_next(fd).ok(); + let rejected = rejected_byte.is_some(); + Ok(Self { + signer_signature_hash, + rejected, + }) + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq, Default)] /// Information specific to Signer V1 pub struct BlockInfoV1 { @@ -153,7 +185,7 @@ pub struct SignerDb { db: Connection, } -static CREATE_BLOCKS_TABLE: &str = " +static CREATE_BLOCKS_TABLE_1: &str = " CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, @@ -165,7 +197,7 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; -static CREATE_INDEXES: &str = " +static CREATE_INDEXES_1: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); @@ -197,19 +229,66 @@ static DROP_SCHEMA_0: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; +static DROP_SCHEMA_1: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; + +static CREATE_BLOCKS_TABLE_2: &str = " +CREATE TABLE IF NOT EXISTS blocks ( + reward_cycle INTEGER NOT NULL, + signer_signature_hash TEXT NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + PRIMARY KEY (reward_cycle, signer_signature_hash) +) STRICT"; + +static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS block_signatures ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL, + -- signtaure itself + signature TEXT NOT NULL, + PRIMARY KEY (signature) +) STRICT;"#; + +static CREATE_INDEXES_2: &str = r#" +CREATE INDEX IF NOT EXISTS block_reward_cycle_and_signature ON block_signatures(signer_signature_hash); +"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, CREATE_BURN_STATE_TABLE, - CREATE_BLOCKS_TABLE, + CREATE_BLOCKS_TABLE_1, CREATE_SIGNER_STATE_TABLE, - CREATE_INDEXES, + CREATE_INDEXES_1, "INSERT INTO db_config (version) VALUES (1);", ]; +static SCHEMA_2: &[&str] = &[ + DROP_SCHEMA_1, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE_2, + CREATE_SIGNER_STATE_TABLE, + CREATE_BLOCK_SIGNATURES_TABLE, + CREATE_INDEXES_1, + CREATE_INDEXES_2, + "INSERT INTO db_config (version) VALUES (2);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 1; + pub const SCHEMA_VERSION: u32 = 2; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -253,6 +332,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 1 to schema 2 + fn schema_2_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 2 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_2.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -262,7 +355,8 @@ impl SignerDb { let version = Self::get_schema_version(&sql_tx)?; match version { 0 => Self::schema_1_migration(&sql_tx)?, - 1 => break, + 1 => Self::schema_2_migration(&sql_tx)?, + 2 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -392,6 +486,7 @@ impl SignerDb { let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; + let broadcasted = false; let vote = block_info .vote .as_ref() @@ -403,14 +498,16 @@ impl SignerDb { "sighash" => %hash, "block_id" => %block_id, "signed" => %signed_over, + "broadcasted" => %broadcasted, "vote" => vote ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, signed_over, + &broadcasted, u64_to_sql(block_info.block.header.chain_length)?, block_info.block.header.consensus_hash.to_hex(), ], @@ -427,6 +524,70 @@ impl SignerDb { Ok(result.is_some()) } + + /// Record an observed block signature + pub fn add_block_signature( + &self, + block_sighash: &Sha512Trunc256Sum, + signature: &MessageSignature, + ) -> Result<(), DBError> { + let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; + let args = params![ + block_sighash, + serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + ]; + + debug!("Inserting block signature."; + "sighash" => %block_sighash, + "signature" => %signature); + + self.db.execute(qry, args)?; + Ok(()) + } + + /// Get all signatures for a block + pub fn get_block_signatures( + &self, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = "SELECT signature FROM block_signatures WHERE signer_signature_hash = ?1"; + let args = params![block_sighash]; + let sigs_txt: Vec = query_rows(&self.db, qry, args)?; + let mut sigs = vec![]; + for sig_txt in sigs_txt.into_iter() { + let sig = serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)?; + sigs.push(sig); + } + Ok(sigs) + } + + /// Mark a block as having been broadcasted + pub fn set_block_broadcasted( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result<(), DBError> { + let qry = "UPDATE blocks SET broadcasted = 1 WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + + debug!("Marking block {} as broadcasted", block_sighash); + self.db.execute(qry, args)?; + Ok(()) + } + + /// Is a block broadcasted already + pub fn is_block_broadcasted( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result { + let qry = + "SELECT broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + + let broadcasted: i64 = query_row(&self.db, qry, args)?.unwrap_or(0); + Ok(broadcasted != 0) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -454,13 +615,12 @@ mod tests { use std::fs; use std::path::PathBuf; - use blockstack_lib::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, - }; + use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; use super::*; + use crate::signerdb::NakamotoBlockVote; fn _wipe_db(db_path: &PathBuf) { if fs::metadata(db_path).is_ok() { @@ -703,4 +863,60 @@ mod tests { Some("3.45.0".to_string()) ); } + + #[test] + fn add_and_get_block_signatures() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let block_id = Sha512Trunc256Sum::from_data("foo".as_bytes()); + let sig1 = MessageSignature([0x11; 65]); + let sig2 = MessageSignature([0x22; 65]); + + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); + + db.add_block_signature(&block_id, &sig1).unwrap(); + assert_eq!( + db.get_block_signatures(&block_id).unwrap(), + vec![sig1.clone()] + ); + + db.add_block_signature(&block_id, &sig2).unwrap(); + assert_eq!( + db.get_block_signatures(&block_id).unwrap(), + vec![sig1.clone(), sig2.clone()] + ); + } + + #[test] + fn test_and_set_block_broadcasted() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.burn_height = 1; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert!(!db + .is_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .unwrap()); + db.set_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash(), + ) + .unwrap(); + assert!(db + .is_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .unwrap()); + } } From 3da95c6ed6d46c760c89a1271ffc017e3ce0bde2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:51:31 -0400 Subject: [PATCH 091/910] feat: watch for signer-post events from other signers, and store their signatures. If we get enough, then put them into the target block's header and upload the signed block to the node --- stacks-signer/src/v0/signer.rs | 229 +++++++++++++++++++++++++++++++-- 1 file changed, 220 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd0..ba2559d78b1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -12,9 +12,11 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::sync::mpsc::Sender; +use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; @@ -24,6 +26,8 @@ use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMe use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -50,6 +54,8 @@ pub struct Signer { pub signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, + /// Reward set signer addresses and their weights + pub signer_weights: HashMap, /// SignerDB for state management pub signer_db: SignerDb, /// Configuration for proposal evaluation @@ -109,13 +115,23 @@ impl SignerTrait for Signer { match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response(block_validate_response) + self.handle_block_validate_response(stacks_client, block_validate_response) } SignerEvent::SignerMessages(_signer_set, messages) => { debug!( - "{self}: Received {} messages from the other signers. Ignoring...", + "{self}: Received {} messages from the other signers", messages.len() ); + // try and gather signatures + for message in messages { + let SignerMessage::BlockResponse(block_response) = message else { + continue; + }; + let BlockResponse::Accepted((block_hash, signature)) = block_response else { + continue; + }; + self.handle_block_signature(stacks_client, block_hash, signature); + } } SignerEvent::MinerMessages(messages, miner_pubkey) => { debug!( @@ -202,16 +218,41 @@ impl From for Signer { let signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); + + // compute signer addresses *in reward cycle order* + let signer_ids_and_addrs: BTreeMap<_, _> = signer_config + .signer_entries + .signer_ids + .iter() + .map(|(addr, id)| (*id, addr.clone())) + .collect(); + + let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); + + let signer_weights = signer_addresses + .iter() + .map(|addr| { + let Some(signer_id) = signer_config.signer_entries.signer_ids.get(addr) else { + panic!("Malformed config: no signer ID for {}", addr); + }; + let Some(key_ids) = signer_config.signer_entries.signer_key_ids.get(signer_id) + else { + panic!( + "Malformed config: no key IDs for signer ID {} ({})", + signer_id, addr + ); + }; + (addr.clone(), key_ids.len()) + }) + .collect(); + Self { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_addresses: signer_config - .signer_entries - .signer_ids - .into_keys() - .collect(), + signer_addresses, + signer_weights, signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, @@ -260,7 +301,7 @@ impl Signer { ); return; } - // TODO: should add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self @@ -387,8 +428,13 @@ impl Signer { } /// Handle the block validate response returned from our prior calls to submit a block for validation - fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { + fn handle_block_validate_response( + &mut self, + stacks_client: &StacksClient, + block_validate_response: &BlockValidateResponse, + ) { debug!("{self}: Received a block validate response: {block_validate_response:?}"); + let mut signature_opt = None; let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); @@ -414,6 +460,8 @@ impl Signer { .private_key .sign(&signer_signature_hash.0) .expect("Failed to sign block"); + + signature_opt = Some(signature.clone()); ( BlockResponse::accepted(signer_signature_hash, signature), block_info, @@ -461,5 +509,168 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + + if let Some(signature) = signature_opt { + // have to save the signature _after_ the block info + self.handle_block_signature( + stacks_client, + &block_info.signer_signature_hash(), + &signature, + ); + } + } + + /// Compute the signing weight and total weight, given a list of signatures + fn compute_signature_weight( + &self, + block_hash: &Sha512Trunc256Sum, + sigs: &[MessageSignature], + ) -> (u32, u32) { + let signing_weight = sigs.iter().fold(0usize, |signing_weight, sig| { + let weight = if let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), sig) + { + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); + let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + *stacker_weight + } else { + 0 + }; + signing_weight.saturating_add(weight) + }); + + let total_weight = self + .signer_weights + .values() + .fold(0usize, |acc, val| acc.saturating_add(*val)); + ( + u32::try_from(signing_weight) + .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")), + u32::try_from(total_weight) + .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")), + ) + } + + /// Handle an observed signature from another signer + fn handle_block_signature( + &mut self, + stacks_client: &StacksClient, + block_hash: &Sha512Trunc256Sum, + signature: &MessageSignature, + ) { + debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + + // authenticate the signature -- it must be signed by one of the stacking set + let is_valid_sig = self + .signer_addresses + .iter() + .find(|addr| { + let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + else { + return false; + }; + let stacker_address = StacksAddress::p2pkh(true, &public_key); + + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }) + .is_some(); + + if !is_valid_sig { + debug!("{self}: Receive invalid signature {signature}. Will not store."); + return; + } + + self.signer_db + .add_block_signature(block_hash, signature) + .unwrap_or_else(|_| panic!("{self}: Failed to save block signature")); + + // do we have enough signatures to broadcast? + let signatures = self + .signer_db + .get_block_signatures(block_hash) + .unwrap_or_else(|_| panic!("{self}: Failed to load block signatures")); + + let (signature_weight, total_weight) = + self.compute_signature_weight(block_hash, &signatures); + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) + .unwrap_or_else(|_| { + panic!("{self}: Failed to compute threshold weight for {total_weight}") + }); + + if min_weight > signature_weight { + debug!( + "{self}: Not enough signatures on block {} (have {}, need at least {}/{})", + block_hash, signature_weight, min_weight, total_weight + ); + return; + } + + // have enough signatures to broadcast! + // have we broadcasted before? + if self + .signer_db + .is_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }) + { + debug!("{self}: will not re-broadcast block {}", block_hash); + return; + } + + let Ok(Some(block_info)) = self + .signer_db + .block_lookup(self.reward_cycle, block_hash) + .map_err(|e| { + warn!("{self}: Failed to load block {block_hash}: {e:?})"); + e + }) + else { + warn!("{self}: No such block {block_hash}"); + return; + }; + + // put signatures in order by signer address (i.e. reward cycle order) + let addrs_to_sigs: HashMap<_, _> = signatures + .into_iter() + .filter_map(|sig| { + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) + else { + return None; + }; + let addr = StacksAddress::p2pkh(self.mainnet, &public_key); + Some((addr, sig)) + }) + .collect(); + + let signatures: Vec<_> = self + .signer_addresses + .iter() + .filter_map(|addr| addrs_to_sigs.get(addr).cloned()) + .collect(); + + let mut block = block_info.block; + block.header.signer_signature = signatures; + + let broadcasted = stacks_client + .post_block(&block) + .map_err(|e| { + warn!( + "{self}: Failed to post block {block_hash} (id {}): {e:?}", + &block.block_id() + ); + e + }) + .is_ok(); + + if broadcasted { + self.signer_db + .set_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }); + } } } From 7b6fce043dad0104f2884771d68a2153bb0d25ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:08 -0400 Subject: [PATCH 092/910] chore: clean up imports --- stacks-signer/src/v1/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f78f3b9e296..c23dbdda907 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -21,7 +21,7 @@ use std::time::Instant; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; @@ -57,7 +57,7 @@ use crate::chainstate::SortitionsView; use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use crate::signerdb::{BlockInfo, SignerDb}; +use crate::signerdb::{BlockInfo, NakamotoBlockVote, SignerDb}; use crate::v1::coordinator::CoordinatorSelector; use crate::Signer as SignerTrait; From d35992a60bf4f34622c4988a71452440d0902e28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:17 -0400 Subject: [PATCH 093/910] chore: NakamotoBlockVote belongs in the signer --- stackslib/src/chainstate/nakamoto/mod.rs | 35 ++++++------------------ 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 09794c47759..cc999a21587 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -377,6 +377,10 @@ pub trait StacksDBIndexed { .is_none() { // tenure not started + debug!( + "No tenure-start block for {} off of {}", + tenure_id_consensus_hash, tip + ); return Ok(None); } if self @@ -387,6 +391,10 @@ pub trait StacksDBIndexed { .is_none() { // tenure has started, but is not done yet + debug!( + "Tenure {} not finished off of {}", + tenure_id_consensus_hash, tip + ); return Ok(Some(false)); } @@ -609,33 +617,6 @@ impl FromRow for NakamotoBlockHeader { } } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -/// A vote across the signer set for a block -pub struct NakamotoBlockVote { - pub signer_signature_hash: Sha512Trunc256Sum, - pub rejected: bool, -} - -impl StacksMessageCodec for NakamotoBlockVote { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.signer_signature_hash)?; - if self.rejected { - write_next(fd, &1u8)?; - } - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let signer_signature_hash = read_next(fd)?; - let rejected_byte: Option = read_next(fd).ok(); - let rejected = rejected_byte.is_some(); - Ok(Self { - signer_signature_hash, - rejected, - }) - } -} - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlock { pub header: NakamotoBlockHeader, From 322e04cddaf6bf56d800aef5b9b67d95cb085580 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:33 -0400 Subject: [PATCH 094/910] chore: pass chainstate to v0 signer loop so we can poll for signatures from a processed block posted by the signers --- testnet/stacks-node/src/nakamoto_node/miner.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 527117fb4dc..a1c1fedd957 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -439,14 +439,22 @@ impl BlockMinerThread { }, )?; + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + *attempts += 1; - let signature = coordinator.begin_sign_v0( + let signature = coordinator.run_sign_v0( new_block, burn_block_height, *attempts, &tip, &self.burnchain, &sort_db, + &mut chain_state, &stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, From 812deb736fdcd732bd99f47936e2ed9b8a53c530 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:53:12 -0400 Subject: [PATCH 095/910] feat: look in the chainstate for a staging block posted by signers with the signatures we're waiting for, so we can unblock the miner if the block arrives from any signer before their signatures --- .../src/nakamoto_node/sign_coordinator.rs | 66 +++++++++++++++++-- 1 file changed, 60 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b6e42b87ee1..a5e0fc0e8a6 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -26,6 +26,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; +use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; @@ -53,7 +54,7 @@ use crate::Config; /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(50); +static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); /// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose /// sole function is to serve as the coordinator for Nakamoto block signing. @@ -202,7 +203,6 @@ impl SignCoordinator { reward_set: &RewardSet, message_key: Scalar, config: &Config, - // v1: bool, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -630,8 +630,13 @@ impl SignCoordinator { /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond - /// with their signatures. - pub fn begin_sign_v0( + /// with their signatures. It does so in two ways, concurrently: + /// * It waits for signer StackerDB messages with signatures. If enough signatures can be + /// found, then the block can be broadcast. + /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. + pub fn run_sign_v0( &mut self, block: &NakamotoBlock, burn_block_height: u64, @@ -639,6 +644,7 @@ impl SignCoordinator { burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, + chain_state: &mut StacksChainState, stackerdbs: &StackerDBs, counters: &Counters, election_sortition: &ConsensusHash, @@ -699,12 +705,16 @@ impl SignCoordinator { let mut total_weight_signed: u32 = 0; let mut gathered_signatures = BTreeMap::new(); - info!("SignCoordinator: beginning to watch for block signatures."; + info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; "threshold" => self.weight_threshold, ); let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { + // one of two things can happen: + // * we get enough signatures from stackerdb from the signers, OR + // * we see our block get processed in our chainstate (meaning, the signers broadcasted + // the block and our node got it and processed it) let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { @@ -716,9 +726,52 @@ impl SignCoordinator { )) } }; + // look in the nakamoto staging db -- a block can only get stored there if it has + // enough signing weight to clear the threshold + if let Ok(Some((block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(&block.block_id()) + .map_err(|e| { + warn!( + "Failed to query chainstate for block {}: {:?}", + &block.block_id(), + &e + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + return Ok(block.header.signer_signature); + } + // we don't have the block we ostensibly mined, but perhaps the tenure has advanced + // anyway? If so, then give up. + let canonical_stacks_header = + NakamotoChainState::get_canonical_block_header(chain_state.db(), sortdb) + .map_err(|e| { + let msg = format!("Failed to query canonical stacks tip: {:?}", &e); + warn!("{}", &msg); + NakamotoNodeError::SignerSignatureError(msg) + })? + .ok_or_else(|| { + let msg = "No canonical stacks tip".to_string(); + warn!("{}", &msg); + NakamotoNodeError::SignerSignatureError(msg) + })?; + + if canonical_stacks_header.anchored_header.height() > block.header.chain_length { + info!( + "SignCoordinator: our block {} is superseded by block {}", + block.header.block_id(), + canonical_stacks_header.index_block_hash() + ); + break; + } + + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + if !is_signer_event { debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); continue; @@ -775,6 +828,7 @@ impl SignCoordinator { "Processed signature but didn't validate over the expected block. Returning error."; "signature" => %signature, "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, "slot_id" => slot_id, ); continue; @@ -809,7 +863,7 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } - debug!("Signature Added to block"; + debug!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, From 522dcf27f35bd0357e2865eac69d5b1e8d3cf70c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 14:06:33 -0400 Subject: [PATCH 096/910] feat: timout connections to observers Without a timout for these connections, the node can get stuck here and it is not obvious what is happening. With this timeout, we will see log messages that will point to the problem. --- testnet/stacks-node/src/event_dispatcher.rs | 30 +++++++++++++-------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a9..ccff517d7b4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -318,6 +318,7 @@ impl EventObserver { }; let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); + let connection_timeout = Duration::from_secs(5); loop { let body = body.clone(); @@ -326,19 +327,26 @@ impl EventObserver { req.set_body(body); let response = async_std::task::block_on(async { - let stream = match TcpStream::connect(self.endpoint.clone()).await { - Ok(stream) => stream, - Err(err) => { + match async_std::future::timeout( + connection_timeout, + TcpStream::connect(self.endpoint.clone()), + ) + .await + { + Ok(Ok(stream)) => match client::connect(stream, req).await { + Ok(response) => Some(response), + Err(err) => { + warn!("Event dispatcher: rpc invocation failed - {:?}", err); + None + } + }, + Ok(Err(err)) => { warn!("Event dispatcher: connection failed - {:?}", err); - return None; + None } - }; - - match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - return None; + Err(_) => { + error!("Event dispatcher: connection attempt timed out"); + None } } }); From 69872cf8640f42d780edb1134f2875284db2080f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 2 Aug 2024 11:07:52 -0700 Subject: [PATCH 097/910] feat: fix block-to-reward-cycle lookup logic off-by-one error --- .../src/chainstate/nakamoto/coordinator/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 13 +++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 17cfed5cd6d..15cc7f08526 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -546,7 +546,7 @@ pub fn load_nakamoto_reward_set( "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - prepare_end_height.saturating_sub(1), + prepare_end_height, chain_state, burnchain, sort_db, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 35db96c845b..175022d68c0 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -64,7 +64,8 @@ use crate::tests::nakamoto_integrations::{ naka_neon_integration_conf, next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, }; use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, + get_chain_info, next_block_and_wait, run_until_burnchain_height, test_observer, + wait_for_runloop, }; use crate::tests::to_addr; use crate::{BitcoinRegtestController, BurnchainController}; @@ -473,15 +474,15 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest u64 { - let block_height = self + let block_height = get_chain_info(&self.running_nodes.conf).burn_block_height; + let rc = self .running_nodes - .btc_regtest_controller - .get_headers_height(); - self.running_nodes .btc_regtest_controller .get_burnchain() .block_height_to_reward_cycle(block_height) - .unwrap() + .unwrap(); + info!("Get current reward cycle: block_height = {block_height}, rc = {rc}"); + rc } fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9a6d362e59f..f08f0f87ed8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2136,8 +2136,8 @@ fn signer_set_rollover() { let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); assert_eq!(current_signers.len(), new_num_signers as usize); for signer in current_signers.iter() { - assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } info!("---- Mining a block to verify new signer set -----"); From caa0c831cd60fc8d6c9380d91d21f9c6de50a7c5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 15:32:31 -0400 Subject: [PATCH 098/910] chore: update deprecated VSCode extension --- .vscode/extensions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index be7e11c2a88..00035443cbf 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -2,7 +2,7 @@ "recommendations": [ "rust-lang.rust-analyzer", "vadimcn.vscode-lldb", - "serayuzgur.crates", + "fill-labs.dependi", "editorconfig.editorconfig", ] } From 19076c1e5aa3ee7e032c72320ee2c96aaecf11cd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 15:40:29 -0400 Subject: [PATCH 099/910] chore: only timout on the `TcpStream::connect` Don't timeout on the `client::connect` since this has the potential for an observer to receive duplicate events. --- testnet/stacks-node/src/event_dispatcher.rs | 38 ++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ccff517d7b4..58f567b4b76 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -6,7 +6,9 @@ use std::thread::sleep; use std::time::Duration; use async_h1::client; +use async_std::future::timeout; use async_std::net::TcpStream; +use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; @@ -326,26 +328,24 @@ impl EventObserver { req.append_header("Content-Type", "application/json"); req.set_body(body); - let response = async_std::task::block_on(async { - match async_std::future::timeout( - connection_timeout, - TcpStream::connect(self.endpoint.clone()), - ) - .await - { - Ok(Ok(stream)) => match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - None + let response = task::block_on(async { + let stream = + match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { + Ok(Ok(stream)) => stream, + Ok(Err(err)) => { + warn!("Event dispatcher: connection failed - {:?}", err); + return None; } - }, - Ok(Err(err)) => { - warn!("Event dispatcher: connection failed - {:?}", err); - None - } - Err(_) => { - error!("Event dispatcher: connection attempt timed out"); + Err(_) => { + error!("Event dispatcher: connection attempt timed out"); + return None; + } + }; + + match client::connect(stream, req).await { + Ok(response) => Some(response), + Err(err) => { + warn!("Event dispatcher: rpc invocation failed - {:?}", err); None } } From 74efa2ca2012f0f652ac88c662db5b45849ff4b5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 2 Aug 2024 16:15:28 -0400 Subject: [PATCH 100/910] Fix off by one error in mock_sign_epoch_25 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 42 ++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a6cc420784..c2a4d8ce67c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -175,9 +175,8 @@ impl SignerTest { .wrapping_add(reward_cycle_len) .wrapping_add(1); - let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary - .wrapping_add(reward_cycle_len) - .saturating_sub(1); + let next_reward_cycle_boundary = + epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -2146,7 +2145,6 @@ fn mock_sign_epoch_25() { info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); // Mine until epoch 3.0 and ensure that no more mock signatures are received - let mut reward_cycle = signer_test.get_current_reward_cycle(); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, @@ -2163,19 +2161,33 @@ fn mock_sign_epoch_25() { assert_eq!(signer_slot_ids.len(), num_signers); // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition let main_poll_time = Instant::now(); - let mut current_burn_block_height = signer_test + while signer_test .running_nodes .btc_regtest_controller - .get_headers_height(); - while current_burn_block_height + 1 < epoch_3_start_height { - current_burn_block_height = signer_test + .get_headers_height() + < epoch_3_start_height + { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller .get_headers_height(); - let current_reward_cycle = signer_test.get_current_reward_cycle(); - if current_reward_cycle != reward_cycle { - debug!("Rolling over reward cycle to {:?}", current_reward_cycle); - reward_cycle = current_reward_cycle; + if current_burn_block_height + % signer_test + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64 + == 0 + { + reward_cycle += 1; + debug!("Rolling over reward cycle to {:?}", reward_cycle); stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::new(), // We are just reading so don't care what the key is @@ -2190,12 +2202,6 @@ fn mock_sign_epoch_25() { .collect(); assert_eq!(signer_slot_ids.len(), num_signers); } - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); let mut mock_signatures = vec![]; let mock_poll_time = Instant::now(); debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); From 7d07a5f80254724b9e06b87de9804e93979dd117 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:20 -0400 Subject: [PATCH 101/910] feat: decode nakamoto block --- stackslib/src/main.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8660e0e9a74..fa54b34b86f 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -55,7 +55,7 @@ use blockstack_lib::chainstate::burn::db::sortdb::{ use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; -use blockstack_lib::chainstate::nakamoto::NakamotoChainState; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, @@ -243,6 +243,25 @@ fn main() { process::exit(0); } + if argv[1] == "decode-nakamoto-block" { + if argv.len() < 3 { + eprintln!("Usage: {} decode-nakamoto-block BLOCK_HEX", argv[0]); + process::exit(1); + } + + let block_hex = &argv[2]; + let block_data = hex_bytes(block_hex).unwrap_or_else(|_| panic!("Failed to decode hex")); + let block = NakamotoBlock::consensus_deserialize(&mut io::Cursor::new(&block_data)) + .map_err(|_e| { + eprintln!("Failed to decode block"); + process::exit(1); + }) + .unwrap(); + + println!("{:#?}", &block); + process::exit(0); + } + if argv[1] == "decode-net-message" { let data: String = argv[2].clone(); let buf = if data == "-" { From af274c17e93462f599a8bfa8b73b55b287caf975 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:29 -0400 Subject: [PATCH 102/910] fix: fix regressed unit test --- stackslib/src/net/tests/download/nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 5937f433840..57bd557186e 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1483,6 +1483,10 @@ fn test_make_tenure_downloaders() { &mut tenure_start_blocks, ) .unwrap(); + + // remove malleablized blocks + tenure_start_blocks.retain(|_, block| block.header.version == 0); + assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); for wt in wanted_tenures_with_blocks { From 5404200c6676bbd8692016e61aac229bab2484d8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:29 -0400 Subject: [PATCH 103/910] fix: fix regressed unit test --- stackslib/src/net/tests/download/nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 5937f433840..57bd557186e 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1483,6 +1483,10 @@ fn test_make_tenure_downloaders() { &mut tenure_start_blocks, ) .unwrap(); + + // remove malleablized blocks + tenure_start_blocks.retain(|_, block| block.header.version == 0); + assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); for wt in wanted_tenures_with_blocks { From 11feb3dfcb83501ac20d4230f68914a4f629d8f0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:39:06 -0400 Subject: [PATCH 104/910] chore: info, not debug --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4a626e8ab1a..919864f09d7 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -452,7 +452,7 @@ impl Signer { stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, ) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let mut signature_opt = None; let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { From e36bb1ebc86a10c22f7472caf01f86be0668c4c6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:16 -0400 Subject: [PATCH 105/910] chore: more debug output --- stackslib/src/net/download/nakamoto/tenure.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 21d06d1b2c8..a2a3b3eddd3 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -174,6 +174,8 @@ impl TenureStartEnd { let mut last_tenure_ch = None; debug!("Find available tenures in inventory {:?} rc {}", invs, rc); for (i, wt) in wanted_tenures.iter().enumerate() { + debug!("consider wanted tenure which starts with i={} {:?}", i, &wt); + // advance to next tenure-start sortition let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { From d86ff5c628649211cd906aa9f03ad8a778395293 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:34 -0400 Subject: [PATCH 106/910] fix: don't rely on processed tenure cache for inv generation -- it's not coherent with the canonical tip --- stackslib/src/net/inv/nakamoto.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 5b09ace3968..9fa43e448da 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -125,9 +125,12 @@ impl InvGenerator { tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { + /* + TODO: marf'ed cache? if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { return Ok((*info_opt).clone()); }; + */ // not cached so go load it let loaded_info_opt = InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; From 72c5e4863a0cde0aa68358565af6f5f38d623326 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:55 -0400 Subject: [PATCH 107/910] chore: fault injection for block push --- testnet/stacks-node/src/config.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index dc0820a39a2..0f13943b1c4 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1829,6 +1829,8 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + /// Fault injection for failing to push blocks + pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. // not part of the config file. pub fault_injection_hide_blocks: bool, @@ -2115,6 +2117,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: false, require_affirmed_anchor_blocks: true, + fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, stacker_dbs: vec![], @@ -2572,6 +2575,8 @@ pub struct NodeConfigFile { pub chain_liveness_poll_time_secs: Option, /// Stacker DBs we replicate pub stacker_dbs: Option>, + /// fault injection: fail to push blocks with this probability (0-100) + pub fault_injection_block_push_fail_probability: Option, } impl NodeConfigFile { @@ -2650,6 +2655,14 @@ impl NodeConfigFile { .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), + fault_injection_block_push_fail_probability: if self + .fault_injection_block_push_fail_probability + .is_some() + { + self.fault_injection_block_push_fail_probability + } else { + default_node_config.fault_injection_block_push_fail_probability + }, }; Ok(node_config) } From b9bf11ae859a1d53f81c46ac50e4d662f91851d8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:33:06 -0400 Subject: [PATCH 108/910] feat: drop blocks from block push with a configurable probability --- .../stacks-node/src/nakamoto_node/miner.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a1c1fedd957..891ee647174 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -25,6 +25,7 @@ use hashbrown::HashSet; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::StackerDBSession; +use rand::{thread_rng, Rng}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -619,8 +620,27 @@ impl BlockMinerThread { return Ok(()); } - // forward to p2p thread + // forward to p2p thread, but do fault injection let block_id = block.block_id(); + let drop_prob = self + .config + .node + .fault_injection_block_push_fail_probability + .unwrap_or(0) + .max(100); + let will_drop = if drop_prob > 0 { + let throw: u8 = thread_rng().gen_range(0..100); + throw < drop_prob + } else { + false + }; + + if will_drop { + info!("Fault injection: drop block {}", &block_id); + return Ok(()); + } + + debug!("Broadcasting block {}", &block_id); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { From ae946dd344ff90d8d4f84cd2de8474367f60eb09 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 4 Aug 2024 11:26:20 -0400 Subject: [PATCH 109/910] test: finish deadlock test --- .../chainstate/nakamoto/coordinator/tests.rs | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 91a8d9f9659..000ccc59ce7 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -2495,33 +2495,40 @@ fn process_next_nakamoto_block_deadlock() { info!("Creating peer"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let (chainstate, _) = &mut peer + .stacks_node + .as_mut() + .unwrap() + .chainstate + .reopen() + .unwrap(); // Lock the sortdb info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); let mut sortition_db = peer.sortdb().reopen().unwrap(); let sort_tx = sortition_db.tx_begin().unwrap(); + info!(" ------------------------------- SORTDB LOCKED"); - // Start another thread that opens the sortdb, waits 10s, then tries to - // lock the chainstate db. This should cause a deadlock if the block - // processing is not acquiring the locks in the correct order. - info!(" ------------------------------- SPAWNING BLOCKER THREAD"); - let blocker_thread = std::thread::spawn(move || { - // Wait a bit, to ensure the tenure will have grabbed any locks it needs - std::thread::sleep(std::time::Duration::from_secs(10)); + let miner_thread = std::thread::spawn(move || { + info!(" ------------------------------- MINING TENURE"); + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + peer.try_process_block(&block).unwrap(); + info!(" ------------------------------- TENURE MINED"); + }); - // Lock the chainstate db - info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let (chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); + // Wait a bit, to ensure the tenure will have grabbed any locks it needs + std::thread::sleep(std::time::Duration::from_secs(10)); - info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); - info!(" ------------------------------- BLOCKER THREAD FINISHED"); - }); + // Lock the chainstate db + info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); + let chainstate_tx = chainstate.chainstate_tx_begin().unwrap(); - info!(" ------------------------------- MINING TENURE"); - let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - info!(" ------------------------------- TENURE MINED"); + info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); + drop(chainstate_tx); + drop(sort_tx); + info!(" ------------------------------- MAIN THREAD FINISHED"); - // Wait for the blocker thread to finish - blocker_thread.join().unwrap(); + // Wait for the blocker and miner threads to finish + miner_thread.join().unwrap(); } From b557827cec222552a4a044305642160189f36737 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 4 Aug 2024 12:09:11 -0400 Subject: [PATCH 110/910] test: insert stall to ensure proper timing of test With this change in place, `process_next_nakamoto_block_deadlock` will timeout without the fix, and run successfully with the fix in place. --- .../chainstate/nakamoto/coordinator/tests.rs | 20 +++++++++++++------ stackslib/src/chainstate/nakamoto/mod.rs | 15 ++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 000ccc59ce7..aecf8c62a2c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -48,6 +48,7 @@ use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, + TEST_PROCESS_BLOCK_STALL, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; @@ -2495,6 +2496,7 @@ fn process_next_nakamoto_block_deadlock() { info!("Creating peer"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut sortition_db = peer.sortdb().reopen().unwrap(); let (chainstate, _) = &mut peer .stacks_node .as_mut() @@ -2503,20 +2505,26 @@ fn process_next_nakamoto_block_deadlock() { .reopen() .unwrap(); - // Lock the sortdb - info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); - let mut sortition_db = peer.sortdb().reopen().unwrap(); - let sort_tx = sortition_db.tx_begin().unwrap(); - info!(" ------------------------------- SORTDB LOCKED"); + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); let miner_thread = std::thread::spawn(move || { info!(" ------------------------------- MINING TENURE"); let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - peer.try_process_block(&block).unwrap(); info!(" ------------------------------- TENURE MINED"); }); + // Wait a bit, to ensure the miner has reached the stall + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the sortdb + info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); + let sort_tx = sortition_db.tx_begin().unwrap(); + info!(" ------------------------------- SORTDB LOCKED"); + + // Un-stall the block processing + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + // Wait a bit, to ensure the tenure will have grabbed any locks it needs std::thread::sleep(std::time::Duration::from_secs(10)); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 1fab3ba9b1f..0674c49b527 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -270,6 +270,10 @@ lazy_static! { ]; } +// Cause an artifical stall in block-processing, for testing. +#[cfg(any(test, feature = "testing"))] +pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + /// Trait for common MARF getters between StacksDBConn and StacksDBTx pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; @@ -1722,6 +1726,17 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { + #[cfg(any(test, feature = "testing"))] + { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); + } + } let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? From 7f9c9bf7a201ce36aa1fd7f3ba947ce55b8a3797 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 5 Aug 2024 10:45:13 -0700 Subject: [PATCH 111/910] fix: remove extra logging --- testnet/stacks-node/src/nakamoto_node/miner.rs | 16 ---------------- .../src/nakamoto_node/sign_coordinator.rs | 9 +-------- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 3 files changed, 2 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 29b2195af98..18862630da4 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -357,14 +357,6 @@ impl BlockMinerThread { .block_height_to_reward_cycle(burn_election_height) .expect("FATAL: no reward cycle for sortition"); - #[cfg(test)] - { - info!( - "---- Fetching reward info at height {} for cycle {} ----", - burn_election_height, reward_cycle - ); - } - let reward_info = match load_nakamoto_reward_set( reward_cycle, &self.burn_election_block.sortition_id, @@ -393,14 +385,6 @@ impl BlockMinerThread { )); }; - #[cfg(test)] - { - info!( - "---- New reward set has {} signers ----", - reward_set.clone().signers.unwrap_or(vec![]).len(), - ); - } - self.signer_set_cache = Some(reward_set.clone()); Ok(reward_set) } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b933c8f7e25..db442ac46b7 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -642,13 +642,6 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - #[cfg(test)] - { - info!( - "---- Sign coordinator starting. Burn tip height: {} ----", - burn_tip.block_height - ); - } let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; let reward_cycle_id = burnchain @@ -742,7 +735,7 @@ impl SignCoordinator { continue; }; if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - info!("Received signer event for other reward cycle. Ignoring."); + debug!("Received signer event for other reward cycle. Ignoring."); continue; }; let slot_ids = modified_slots diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c2a4d8ce67c..dd298d27ea8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2080,6 +2080,7 @@ fn empty_sortition() { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); found_rejection = true; } else { + error!("Unexpected message type: {:?}", message); panic!("Unexpected message type"); } } From 6682ce811cd7b04cf2e1d4a7b8f0bcf602ac1b07 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 5 Aug 2024 13:05:35 -0700 Subject: [PATCH 112/910] fix: increase timeout while waiting for mock miner to sync --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 23270867b8b..5642017dfd6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7254,7 +7254,7 @@ fn mock_mining() { let mock_miner_timeout = Instant::now(); while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before { - if mock_miner_timeout.elapsed() >= Duration::from_secs(30) { + if mock_miner_timeout.elapsed() >= Duration::from_secs(60) { panic!( "Timed out waiting for mock miner block {}", follower_naka_mined_blocks_before + 1 From 946769a6de59a1b7a6588ed36a2f79fadbf339e8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:04 -0400 Subject: [PATCH 113/910] chore: pass sender reference to avoid a clone --- libsigner/src/runloop.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index b0f026f35fd..f423c9a8dde 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -53,7 +53,7 @@ pub trait SignerRunLoop { &mut self, event: Option>, cmd: Option, - res: Sender, + res: &Sender, ) -> Option; /// This is the main loop body for the signer. It continuously receives events from @@ -70,6 +70,7 @@ pub trait SignerRunLoop { result_send: Sender, mut event_stop_signaler: EVST, ) -> Option { + info!("Signer runloop begin"); loop { let poll_timeout = self.get_event_timeout(); let next_event_opt = match event_recv.recv_timeout(poll_timeout) { @@ -83,7 +84,7 @@ pub trait SignerRunLoop { // Do not block for commands let next_command_opt = command_recv.try_recv().ok(); if let Some(final_state) = - self.run_one_pass(next_event_opt, next_command_opt, result_send.clone()) + self.run_one_pass(next_event_opt, next_command_opt, &result_send) { info!("Runloop exit; signaling event-receiver to stop"); event_stop_signaler.send(); From 66889deee2e54be0ba5f43f446ec6b6d4ef0e1d3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:24 -0400 Subject: [PATCH 114/910] chore: pass a reference to the sender end of the runloop result channel to avoid clones --- stacks-signer/src/lib.rs | 2 +- stacks-signer/src/runloop.rs | 4 ++-- stacks-signer/src/v0/signer.rs | 3 ++- stacks-signer/src/v1/signer.rs | 18 +++++++++--------- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 15c0a25c3d5..dd94b8f3bb6 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -71,7 +71,7 @@ pub trait Signer: Debug + Display { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ); /// Process a command diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 3e2ff53438f..284ea6ce19a 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -419,7 +419,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> &mut self, event: Option>, cmd: Option, - res: Sender>, + res: &Sender>, ) -> Option> { debug!( "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", @@ -452,7 +452,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> &self.stacks_client, &mut self.sortition_state, event.as_ref(), - res.clone(), + res, current_reward_cycle, ); // After processing event, run the next command for each signer diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 919864f09d7..985ddd4a268 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -92,7 +92,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - _res: Sender>, + _res: &Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -676,6 +676,7 @@ impl Signer { let mut block = block_info.block; block.header.signer_signature = signatures; + debug!("{self}: Broadcasting Stacks block {} to node", &block.block_id()); let broadcasted = stacks_client .post_block(&block) .map_err(|e| { diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 0d2834ca7e3..476bb4feabc 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -164,7 +164,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, _sortition_state: &mut Option, event: Option<&SignerEvent>, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -182,13 +182,13 @@ impl SignerTrait for Signer { return; } if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { error!("{self}: failed to refresh DKG: {e}"); } } self.refresh_coordinator(); if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { error!("{self}: failed to refresh DKG: {e}"); } } @@ -366,7 +366,7 @@ impl Signer { pub fn read_dkg_stackerdb_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { if self.state != State::Uninitialized { @@ -626,7 +626,7 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) { let mut block_info = match block_validate_response { @@ -718,7 +718,7 @@ impl Signer { fn handle_signer_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, messages: &[SignerMessage], current_reward_cycle: u64, ) { @@ -761,7 +761,7 @@ impl Signer { fn handle_packets( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, packets: &[Packet], current_reward_cycle: u64, ) { @@ -1435,7 +1435,7 @@ impl Signer { /// Send any operation results across the provided channel fn send_operation_results( &mut self, - res: Sender>, + res: &Sender>, operation_results: Vec, ) { let nmb_results = operation_results.len(); @@ -1469,7 +1469,7 @@ impl Signer { pub fn refresh_dkg( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { // First attempt to retrieve the aggregate key from the contract. From cd4be4c132e77d536fb1ca3e1b800466563d1834 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:48 -0400 Subject: [PATCH 115/910] fix: pass coordinator communication channels to relayer when processing an uploaded block, so the coordinator can be woken up if the block is new --- stackslib/src/net/api/postblock_v3.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index bcf994d4884..df7a7eae73e 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -121,7 +121,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { .ok_or(NetError::SendError("`block` not set".into()))?; let response = node - .with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + .with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { let mut handle_conn = sortdb.index_handle_at_tip(); let stacks_tip = network.stacks_tip.block_id(); Relayer::process_new_nakamoto_block( @@ -131,7 +131,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { chainstate, &stacks_tip, &block, - None, + rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, ) }) From 8657a28c35d6b4a5811aed8fda32b6f89f1ca310 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:12 -0400 Subject: [PATCH 116/910] chore: remove commented-out code --- stackslib/src/net/inv/nakamoto.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 9fa43e448da..0a2ea4dc639 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -125,12 +125,7 @@ impl InvGenerator { tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - /* - TODO: marf'ed cache? - if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { - return Ok((*info_opt).clone()); - }; - */ + // TODO: MARF-aware cache // not cached so go load it let loaded_info_opt = InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; From 0f2a1d8525d1732ff7700b79b37936a5437b9321 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:22 -0400 Subject: [PATCH 117/910] fix: pass coordinator comms to RPCHandlerArgs --- stackslib/src/net/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index da323be3e75..65a598afff2 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -96,6 +96,8 @@ use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; use crate::util_lib::strings::UrlString; +use crate::chainstate::coordinator::comm::CoordinatorChannels; + /// Implements RPC API pub mod api; /// Implements `ASEntry4` object, which is used in db.rs to store the AS number of an IP address. @@ -631,6 +633,8 @@ pub struct RPCHandlerArgs<'a> { pub fee_estimator: Option<&'a dyn FeeEstimator>, /// tx runtime cost metric pub cost_metric: Option<&'a dyn CostMetric>, + /// coordinator channels + pub coord_comms: Option<&'a CoordinatorChannels> } impl<'a> RPCHandlerArgs<'a> { From d108bece302a5f0bd827017c363f70dfbd08002d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:41 -0400 Subject: [PATCH 118/910] chore: more information on handling a nakamoto block --- stackslib/src/net/relay.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065ab..92a1ebb4801 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -805,9 +805,10 @@ impl Relayer { obtained_method: NakamotoBlockObtainMethod, ) -> Result { debug!( - "Handle incoming Nakamoto block {}/{}", + "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, - &block.header.block_hash() + &block.header.block_hash(), + &obtained_method, ); // do we have this block? don't lock the DB needlessly if so. From d1e33c630fdd6199770764319f462dbbef7a034e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:59 -0400 Subject: [PATCH 119/910] feat: StacksTipChanged and SignersRejected error variants --- testnet/stacks-node/src/nakamoto_node.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index d9f44cc67bd..cde9c1da1f5 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -76,6 +76,10 @@ pub enum Error { SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress BurnchainTipChanged, + /// The Stacks tip changed while this operation was in progress + StacksTipChanged, + /// Signers rejected a block + SignersRejected, /// Error while spawning a subordinate thread SpawnError(std::io::Error), /// Injected testing errors From eb3041eca5f117d72eca3f8318d2558b67f2dccd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:53:18 -0400 Subject: [PATCH 120/910] feat: signers rejecting or the signers choosing a different stacks tip are reasons to abort mining --- testnet/stacks-node/src/nakamoto_node/miner.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 42b2de79667..7e4e3408fa2 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -308,8 +308,20 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; + match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."); + continue; + } + } } }; @@ -661,7 +673,7 @@ impl BlockMinerThread { .node .fault_injection_block_push_fail_probability .unwrap_or(0) - .max(100); + .min(100); let will_drop = if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob From c2d61dd2bdd4fa50d91d37f82f8e8e02b531c65b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:53:40 -0400 Subject: [PATCH 121/910] chore: pass coordiantor comms to RPCHandlerArgs --- testnet/stacks-node/src/nakamoto_node/peer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 1fd53256237..7483ce5115f 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -266,6 +266,7 @@ impl PeerThread { cost_estimator: Some(cost_estimator.as_ref()), cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + coord_comms: Some(&self.globals.coord_comms), ..RPCHandlerArgs::default() }; self.net.run( From e8a0db6912d5db2fcd0281f725cd39636386ed97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:54:00 -0400 Subject: [PATCH 122/910] fix: partial fix for #5046 --- .../stacks-node/src/nakamoto_node/relayer.rs | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8a5b75463b3..3ee862364c5 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -416,8 +416,30 @@ impl RelayerThread { MinerDirective::StopTenure } } else { - MinerDirective::ContinueTenure { - new_burn_view: consensus_hash, + let ih = self.sortdb.index_handle(&sn.sortition_id); + let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( + "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", + ); + + let parent_epoch = + SortitionDB::get_stacks_epoch(self.sortdb.conn(), parent_sn.block_height) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height) + .expect("FATAL: failed to query sortition DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + if parent_epoch.epoch_id != cur_epoch.epoch_id { + // this is the first-ever sortition, so definitely mine + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::ContinueTenure { + new_burn_view: consensus_hash, + } } }; Ok(directive) @@ -748,7 +770,10 @@ impl RelayerThread { )?; let new_miner_handle = std::thread::Builder::new() - .name(format!("miner.{parent_tenure_start}")) + .name(format!( + "miner.{parent_tenure_start} (bound ({},{})", + &self.config.node.p2p_bind, &self.config.node.rpc_bind + )) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) .map_err(|e| { From c499dc324d4c0ff5efa5da78c42814798c2ae9e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:54:50 -0400 Subject: [PATCH 123/910] feat: if enough signers reject a block, abandon attempts to wait for signatures --- .../src/nakamoto_node/sign_coordinator.rs | 59 ++++++++++++++++--- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 5f2aba5b8f7..d53dfa5319b 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -71,6 +71,7 @@ pub struct SignCoordinator { signing_round_timeout: Duration, signer_entries: HashMap, weight_threshold: u32, + total_weight: u32, pub next_signer_bitvec: BitVec<4000>, } @@ -300,6 +301,7 @@ impl SignCoordinator { next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, + total_weight, }; return Ok(sign_coordinator); } @@ -321,6 +323,7 @@ impl SignCoordinator { next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, + total_weight, }) } @@ -409,6 +412,7 @@ impl SignCoordinator { } #[cfg_attr(test, mutants::skip)] + #[cfg(any(test, feature = "testing"))] pub fn begin_sign_v1( &mut self, block: &NakamotoBlock, @@ -703,6 +707,7 @@ impl SignCoordinator { }; let mut total_weight_signed: u32 = 0; + let mut total_reject_weight: u32 = 0; let mut gathered_signatures = BTreeMap::new(); info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; @@ -726,9 +731,10 @@ impl SignCoordinator { )) } }; + // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold - if let Ok(Some((block, _sz))) = chain_state + if let Ok(Some((stored_block, _sz))) = chain_state .nakamoto_blocks_db() .get_nakamoto_block(&block.block_id()) .map_err(|e| { @@ -741,7 +747,7 @@ impl SignCoordinator { }) { debug!("SignCoordinator: Found signatures in relayed block"); - return Ok(block.header.signer_signature); + return Ok(stored_block.header.signer_signature); } // we don't have the block we ostensibly mined, but perhaps the tenure has advanced @@ -759,13 +765,20 @@ impl SignCoordinator { NakamotoNodeError::SignerSignatureError(msg) })?; - if canonical_stacks_header.anchored_header.height() > block.header.chain_length { + debug!( + "run_sign_v0: our canonical tip is currently {}/{}", + &canonical_stacks_header.consensus_hash, + &canonical_stacks_header.anchored_header.block_hash() + ); + if canonical_stacks_header.anchored_header.height() >= block.header.chain_length + && canonical_stacks_header.index_block_hash() != block.header.block_id() + { info!( - "SignCoordinator: our block {} is superseded by block {}", + "SignCoordinator: our block {} is superceded by block {}", block.header.block_id(), canonical_stacks_header.index_block_hash() ); - break; + return Err(NakamotoNodeError::StacksTipChanged); } // check to see if this event we got is a signer event @@ -809,8 +822,40 @@ impl SignCoordinator { response_hash, signature, ))) => (response_hash, signature), - SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { - debug!("Received rejected block response. Ignoring."); + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + if rejected_data.signer_signature_hash + == block.header.signer_signature_hash() + { + debug!( + "Signer {} rejected our block {}/{}", + slot_id, + &block.header.consensus_hash, + &block.header.block_hash() + ); + total_reject_weight = total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if total_reject_weight.saturating_add(self.weight_threshold) + > self.total_weight + { + debug!( + "{}/{} signers vote to reject our block {}/{}", + total_reject_weight, + self.total_weight, + &block.header.consensus_hash, + &block.header.block_hash() + ); + return Err(NakamotoNodeError::SignersRejected); + } + } else { + debug!("Received rejected block response for a block besides my own. Ignoring."); + } continue; } SignerMessageV0::BlockProposal(_) => { From 7c43e3e92e061b2704f086080de7320280cdd7f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:55:07 -0400 Subject: [PATCH 124/910] chore: debug signer tests --- testnet/stacks-node/src/tests/signer/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 7fe508407b4..7ba63d4738a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -220,6 +220,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { - debug!("Waiting for Status..."); + debug!( + "Waiting for Status from {} signers...", + self.spawned_signers.len() + ); let now = std::time::Instant::now(); let mut states = Vec::with_capacity(self.spawned_signers.len()); for signer in self.spawned_signers.iter() { @@ -251,6 +256,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { + debug!( + "wait_for_states: got {}-th status, {:?}", + states.len(), + &state_info + ); states.push(state_info); } } From bd0ecd8bfc95e31fa456fbfa27f3e6d473dfd103 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:55:18 -0400 Subject: [PATCH 125/910] chore: longer timeout for waiting for signers to respond to status checks --- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 351620f36ad..07b0e895289 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -182,7 +182,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -253,7 +253,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -1265,7 +1265,15 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + + info!( + "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", + &info_1, &info_2 + ); signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure From 3f776a8a6acc570f2444ecb024a4cf88e560a8bc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:58:00 -0400 Subject: [PATCH 126/910] chore: cargo fmt --- stacks-signer/src/v0/signer.rs | 5 ++++- stackslib/src/net/mod.rs | 5 ++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 985ddd4a268..df42315635b 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -676,7 +676,10 @@ impl Signer { let mut block = block_info.block; block.header.signer_signature = signatures; - debug!("{self}: Broadcasting Stacks block {} to node", &block.block_id()); + debug!( + "{self}: Broadcasting Stacks block {} to node", + &block.block_id() + ); let broadcasted = stacks_client .post_block(&block) .map_err(|e| { diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 65a598afff2..3ba4292f1c2 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -64,6 +64,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::{Error as burnchain_error, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::{ConsensusHash, Opcodes}; +use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::Error as coordinator_error; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::boot::{ @@ -96,8 +97,6 @@ use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; use crate::util_lib::strings::UrlString; -use crate::chainstate::coordinator::comm::CoordinatorChannels; - /// Implements RPC API pub mod api; /// Implements `ASEntry4` object, which is used in db.rs to store the AS number of an IP address. @@ -634,7 +633,7 @@ pub struct RPCHandlerArgs<'a> { /// tx runtime cost metric pub cost_metric: Option<&'a dyn CostMetric>, /// coordinator channels - pub coord_comms: Option<&'a CoordinatorChannels> + pub coord_comms: Option<&'a CoordinatorChannels>, } impl<'a> RPCHandlerArgs<'a> { From bef8c905d69996b18d7fe92db95bb1b2cccee85e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 5 Aug 2024 23:59:33 -0400 Subject: [PATCH 127/910] refactor: pull stalling logic out into functions --- .../chainstate/nakamoto/coordinator/tests.rs | 7 +++-- stackslib/src/chainstate/nakamoto/mod.rs | 31 +++++++++++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index aecf8c62a2c..ae6ce991120 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -47,7 +47,8 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, + disable_process_block_stall, enable_process_block_stall, NakamotoBlock, + NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, TEST_PROCESS_BLOCK_STALL, }; use crate::chainstate::stacks::address::PoxAddress; @@ -2505,7 +2506,7 @@ fn process_next_nakamoto_block_deadlock() { .reopen() .unwrap(); - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + enable_process_block_stall(); let miner_thread = std::thread::spawn(move || { info!(" ------------------------------- MINING TENURE"); @@ -2523,7 +2524,7 @@ fn process_next_nakamoto_block_deadlock() { info!(" ------------------------------- SORTDB LOCKED"); // Un-stall the block processing - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + disable_process_block_stall(); // Wait a bit, to ensure the tenure will have grabbed any locks it needs std::thread::sleep(std::time::Duration::from_secs(10)); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0674c49b527..b2106d8e755 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -274,6 +274,25 @@ lazy_static! { #[cfg(any(test, feature = "testing"))] pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +fn stall_block_processing() { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); + } +} + +pub fn enable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); +} + +pub fn disable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); +} + /// Trait for common MARF getters between StacksDBConn and StacksDBTx pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; @@ -1727,16 +1746,8 @@ impl NakamotoChainState { dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { #[cfg(any(test, feature = "testing"))] - { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block processing is no longer stalled due to testing directive."); - } - } + stall_block_processing(); + let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? From 54ec4f5a67e130d21f644e99311c60a81cad7aaa Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 6 Aug 2024 07:34:59 -0700 Subject: [PATCH 128/910] fix: remove unnecessary clones --- stacks-signer/src/lib.rs | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index abc2db331be..13b14bd3583 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -147,7 +147,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner cmd_send, res_recv, _phantom: std::marker::PhantomData, - config: config.clone(), + config, } } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5642017dfd6..13de8a350c8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,7 +105,7 @@ use crate::tests::neon_integrations::{ }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - set_random_binds, to_addr, + to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index dd298d27ea8..2bd54ecff70 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2393,7 +2393,7 @@ fn signer_set_rollover() { let short_timeout = Duration::from_secs(20); // Verify that naka_conf has our new signer's event observers - for toml in new_signer_configs.clone() { + for toml in &new_signer_configs { let signer_config = SignerConfig::load_from_str(&toml).unwrap(); let endpoint = format!("{}", signer_config.endpoint); assert!(signer_test From a04a5133ea797d65a5b05fd32ebf8f810a217950 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Aug 2024 10:48:23 -0400 Subject: [PATCH 129/910] fix: fix build error --- libsigner/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index c584572ba7b..fbe1e590897 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -76,7 +76,7 @@ impl SignerRunLoop>, Command, T> for Sim &mut self, event: Option>, _cmd: Option, - _res: Sender>>, + _res: &Sender>>, ) -> Option>> { debug!("Got event: {:?}", &event); if let Some(event) = event { From 7dbeeea4e603ac4045cb137ad013f03850c389f2 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 6 Aug 2024 10:31:06 -0700 Subject: [PATCH 130/910] default to localhost for burnchain config samples --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index ba42fb66579..143c604a123 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -8,7 +8,7 @@ prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoin.hiro.so" +peer_host = "127.0.0.1" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 75785454dc6..9c3f609c7c2 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -10,4 +10,4 @@ prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoin.hiro.so" +peer_host = "127.0.0.1" From 588ec07c1e0f1279cc112468e29791bdcd35c370 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:46:51 -0700 Subject: [PATCH 131/910] update pgp key for encrypting messages --- SECURITY.md | 65 +++++++++++------------------------------------------ 1 file changed, 13 insertions(+), 52 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index e9001abe0ac..d3d4ada23dd 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -21,61 +21,22 @@ You may also contact us with any questions or to report a vulnerabilty using the | Name | Fingerprint | | ------- | ------------------ | -| security@stacks.org | 8A8B 3C3B 803A 0603 8FB5 3F69 357C 80AB 0885 87A | +| security@stacks.org | ABA3 7FA3 6DBB A591 B0E5 5949 0E94 D065 B32E C7E6 | ``` -----BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBGSBJLgBEACb6bnuvchM5wzmCBh8tvb5Fc90AGmUC9Bfiw85kTNr5N+6Y+fj -Gcyy2ohUEh+5hQE2pJLYzWcEM8ZFomxuwuDkjEkwJHnMofTwPgeP5P9CJUgcOHDa -v/mzzSXze8nhcADiez6QMP1f1m32FoeLpjFyAPkxSzGDv1L8gMHCJn/d1lZyyl52 -1VO6kN6eazEuA9fCCK+ZjUWz5pZCs6QVQ2+3clOoEX+ycomult4/yJhwMHucIPbL -uUGJvpKXkHEi05G2H57mz8sHvz0euRNGTiEUQeVIzaLUmUuViij9KsKk0DSGj3yq -kI+zOcgjAGTMSK00i6bdBV+XZfZlg0uIATr7EGHnb3Lzbvn8lfo/3jaJlQu5elEf -ZlA2nE2dPUfhzY3t8GoroHrbqJaJFd9eZtfTMzwW11KdOzqa0V5FRUkxcBIb81+p -jb2o/YKGWPExX2cHOTYmUdQFM6AtLpif4pMeV11d52vy8LCsjZDwUSZM6lmcg+rL -o2dbBgLvBblHXRtS4UFvx7tHitl5DOk5ZZik3r3jWQmAUXVDBBpq2gaVkponliYv -iVeG+mRLoe+qpvQRMCaw5Rlth0MhqQ26tmpGUIavaFbDqARC8FeIfdov6bUP5/sJ -gaktJrED5T5hNks/N661/AJ8N7JCHJx1exW4TK052PZ2/hHxNSuUEm96VwARAQAB -tClzZWN1cml0eUBzdGFja3Mub3JnIDxzZWN1cml0eUBzdGFja3Mub3JnPokCVAQT -AQgAPhYhBIqLPDuAOgYDj7U/aTV8gKsIhYegBQJkgSS4AhsDBQkHhh87BQsJCAcC -BhUKCQgLAgQWAgMBAh4BAheAAAoJEDV8gKsIhYegWg8P/RsoODRC8QWYnc5oq2Yb -cJSR/0uRcWZVZC/guC553ax89Aro50YsWvd8Z2uakuKKRoc8aPfC4SL1Mufrncwo -9/pIoiB9NQhTAbnp7hBnF5dnIX+Jq4lQIqwG5E36juNiU23qglx3ZZxM5wZrkRi0 -5lsFHpjU4XRkaNgNs6vyiHmtzyR+iESEBY9szfWCRTK8DgOJPLrfDAnc5JWTq7iL -H8pUpClo5p0XFN39lgdhbEISRXaMqY0HJqAI9JKE5UxxRG2uuGbdeHTYu6ji+gz+ -g+gooyVYIVzXVAJHgD9tDsazD+n61epglF0qK0hb+NaRL/2F6KBpmpzY+iDmDkPu -5TTybS52Cm8zSUAsk5w/GSnknep929Cj5hhaD9ijHcLEV0VKSiN0edIPe+Nd57KK -sfggS4l8deD1OjcTxhawRiaKcthdWjm15DzP9WuYEURSpJZAmdSd5Cqx3bSconhW -iYjxAlgZb7t/OJr6N6YQZjga14kwjxia94WNiTz2UQLr/vYAJWQj9RypxL0IrFwr -pJcFnLKec68jLk8opg4LrY9O/gKHQuPDT1EEQ4ssknJAzKxCwrOwCrDvuIzeqzIx -L1mBAtCzF4Q/J1BlmFEIZ7022BycpzPL0VuOrgmyQ6FzEqiKme7Vy/PVWN7H7HhC -qmL2/g9lYt0+gPZazsc8f3NGuQINBGSBJLgBEADTnQe5mX60ClQqigDjAhypkFZY -6k1V850Gp93YsfMYMgzLcyywMo25RT904AF0X72mjF82YZmzOE/b1oSF4ns3nBIg -vCIiEsWTtFMZgerWKcHlYPE0VWR4iGC5DiOLbmrECPQ0JucEErJZWvypgot2R3p/ -hAkEV0CjZp8qObgBf+ViZmfMAkclVtJ5AFB0SQjx6m4ounpKV9euO2db302oMIbM -ssM1F2Dsl7oicAreHOdVZ5OLUkk5nrXmLHtIt6QppPVbWkJA9ArBwAHZ39vLQTBZ -YbehZxWDxzW/HK00CEzb70BwK0HZYFYt9lQwGRUou8dvtk3+nFRsfpAlFoHSLXjp -N+uZBnqQhUeyzT81PkavHpAGTq5ExgT13nyE9vJCPuf5lpthuWiUQYBHu5tUym6G -vHRpT1OyqcbUQUlS+iK24dwxglk2S/NYYOsKyRJ8AhLFQGqMHxlpqNsQ5wxFthZo -ayiP7CwaJFfB5TUe4zWpbMM545BPNQodcB8Njb62tj0ZoAgEbhXerMGrVfUGf6AL -FxcyGhGpjkRI4+e8HfDpiObMw2notIUMXJoYQv3Yf7X/n8QPX2EZDaB8dG43r2Hh -EeEDi6+WOI77LtdVDck71ZXqLukCrusO9HZ6GlB0ohqndRgueGztP82Af3W74Ohj -dEOcK0HC26dKPWhk2wARAQABiQI8BBgBCAAmFiEEios8O4A6BgOPtT9pNXyAqwiF -h6AFAmSBJLgCGwwFCQeGHzsACgkQNXyAqwiFh6CT4A//aOMVH/XIXngvfC/xOdDy -3JnZLtu4kmLfcvxbqEGrNhz1AW4t0Uivt9dgBb4VemgQajhYZyjdLgFhYGvCf446 -V1C79qWa1lwESmSWL63+rXNZMNV+siqnVhICrXw4FhCKP2tfnZ5uT03qTbu0S+9N -4bARjXkfYSxhVqeGmO/ZwuuHXQUojt/XNWBFbbKKM1Y6PlvfWrmX/S2cDAf0QgBd -MMLu7phbUjMzQDsenwiueWaRvDnsQB5GzwOiJheQuKLS1rYlJGnW2cwqjQtQnnC3 -YVb4iCialhAL/GWwjR/r7a6ZxuAB0j2zjKsaxtEMoTaVX3EW3Aoy73dvew0wyakq -OCchiIIJVvB6uXGufqAVVBJAgG7MQIEZLt7M6YSu0gYTdsEnNo7WZYMsX+/NGQ8G -5hguIJZl3MRtax1yPK0e0ergaDaetAhfWwQH2ltAVQColm3LfuLpcyoxYMhdiN86 -ggy4c1t0dS8owuAEdoKScOkOdENYEGF4mkd7nLkU5miaOMxg2NO9prCSpwwxDtt3 -XLkl0yw+0W0rM2Wu5pC0Xw21Cva+uBm3+kfyIRqrtc1Vb3ZrGKzCNQcAvvxq9XM5 -VeE6JLwVj8OP1TFuwmpJJeD5LTZDT0SvmjRB8OuxLwEHHjYtdm0ae0n2Cbou9Y0X -hmf6grobEcyS0PCsLHn3r7Y= -=/YN2 +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEZrJ2wBYJKwYBBAHaRw8BAQdADVWSZGbVgc0SE8XmXkRonl85wXrPHkl9bN0B +jKFBIRS0KXNlY3VyaXR5QHN0YWNrcy5vcmcgPHNlY3VyaXR5QHN0YWNrcy5vcmc+ +iJAEExYIADgWIQSro3+jbbulkbDlWUkOlNBlsy7H5gUCZrJ2wAIbAwULCQgHAgYV +CgkICwIEFgIDAQIeAQIXgAAKCRAOlNBlsy7H5tznAQC6iKqtjCqn2RjtCkr2V6xe +kCe92RfwWsG0415jVpVlDgEA350TCqIT1Jwyqz2aNT2TQ9F6fyKzAiNpLVRImOLH +4Aq4OARmsnbAEgorBgEEAZdVAQUBAQdAvwusRitvUX9hSC8NKS48VTT3LVvZvn87 +JQXRc2CngAEDAQgHiHgEGBYIACAWIQSro3+jbbulkbDlWUkOlNBlsy7H5gUCZrJ2 +wAIbDAAKCRAOlNBlsy7H5oCNAQDae9VhB98HMOvZ99ZuSEyLqXxKjK7xT2P0y1Tm +GuUnNAEAhI+1BjFvO/Hy50DcZTmHWvHJ6/dzibw5Ah+oE458IQo= +=yhSO -----END PGP PUBLIC KEY BLOCK----- ``` From d165ed38eafe8426928b9eef9960f3beea67e91b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 6 Aug 2024 14:57:21 -0500 Subject: [PATCH 132/910] ci: fix the empty_sortitions signers test in CI --- testnet/stacks-node/src/tests/signer/v0.rs | 45 ++++++++++++---------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 351620f36ad..c48e5bcfc8c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2054,35 +2054,38 @@ fn empty_sortition() { .collect(); assert_eq!(signer_slot_ids.len(), num_signers); - // The miner's proposed block should get rejected by the signers - let start_polling = Instant::now(); - let mut found_rejection = false; - while !found_rejection { - std::thread::sleep(Duration::from_secs(1)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { + // The miner's proposed block should get rejected by all the signers + let mut found_rejections = Vec::new(); + wait_for(short_timeout.as_secs(), || { + for slot_id in signer_slot_ids.iter() { + if found_rejections.contains(slot_id) { + continue; + } + let mut latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[*slot_id] + ).expect("Failed to get message from stackerdb"); + assert!(latest_msgs.len() <= 1); + let Some(latest_msg) = latest_msgs.pop() else { + info!("No message yet from slot #{slot_id}, will wait to try again"); + continue; + }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code, .. - })) = message + })) = latest_msg { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); - found_rejection = true; + found_rejections.push(*slot_id); } else { - panic!("Unexpected message type"); + info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); } } - assert!( - start_polling.elapsed() <= short_timeout, - "Timed out after waiting for response from signer" - ); - } + // wait until we've found rejections for all the signers + Ok(found_rejections.len() == signer_slot_ids.len()) + }).unwrap(); signer_test.shutdown(); } From 571dfee691e1eb8a6f5978331296156104d62c4a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 6 Aug 2024 20:59:51 -0400 Subject: [PATCH 133/910] chore: resolve feature flag issue --- .../chainstate/nakamoto/coordinator/tests.rs | 5 +-- stackslib/src/chainstate/nakamoto/mod.rs | 40 ++++++++++--------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ae6ce991120..569114aa124 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -44,12 +44,11 @@ use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; +use crate::chainstate::nakamoto::test_stall::*; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - disable_process_block_stall, enable_process_block_stall, NakamotoBlock, - NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, - TEST_PROCESS_BLOCK_STALL, + NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b2106d8e755..d059a96cb63 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -270,27 +270,29 @@ lazy_static! { ]; } -// Cause an artifical stall in block-processing, for testing. -#[cfg(any(test, feature = "testing"))] -pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); - -fn stall_block_processing() { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); +#[cfg(test)] +mod test_stall { + pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = + std::sync::Mutex::new(None); + + pub fn stall_block_processing() { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); } - info!("Block processing is no longer stalled due to testing directive."); } -} -pub fn enable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); -} + pub fn enable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + } -pub fn disable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + pub fn disable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + } } /// Trait for common MARF getters between StacksDBConn and StacksDBTx @@ -1745,8 +1747,8 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(any(test, feature = "testing"))] - stall_block_processing(); + #[cfg(test)] + test_stall::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = From e7bd86c6aad868a0c98f25c0e221ef300899d07f Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Wed, 7 Aug 2024 17:41:47 +0200 Subject: [PATCH 134/910] fix: handle empty type_args in define-trait definition --- clarity/src/vm/types/signatures.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 293c36fd5aa..9ba833ba7f5 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1655,6 +1655,9 @@ impl TypeSignature { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> Result> { + if type_args.is_empty() { + return Err(CheckErrors::InvalidTypeDescription); + } let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args[0] .match_list() From 9ab4f034737e9bdf585ad95531271a2d42cc7601 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Wed, 7 Aug 2024 20:05:29 +0300 Subject: [PATCH 135/910] feat: remove `unit-tests` from the `check-tests` job --- .github/workflows/stacks-core-tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 70ef457ce79..98eb5cf92c0 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -187,7 +187,6 @@ jobs: if: always() needs: - full-genesis - - unit-tests - open-api-validation - core-contracts-clarinet-test steps: From 530b6894d1c85a267e61e9e8008971cbca285247 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 7 Aug 2024 21:16:15 +0300 Subject: [PATCH 136/910] docs & script for running cargo mutants locally on CI limitation --- contrib/tools/local-mutation-testing.sh | 84 ++++++++++++++++++ docs/mutation-testing.md | 109 ++++++++++++++++++++++++ 2 files changed, 193 insertions(+) create mode 100644 contrib/tools/local-mutation-testing.sh create mode 100644 docs/mutation-testing.md diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh new file mode 100644 index 00000000000..a3d563682cd --- /dev/null +++ b/contrib/tools/local-mutation-testing.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +set -euo pipefail + +# Install cargo-mutants +cargo install --version 24.7.1 cargo-mutants --locked + +# Create diff file between current branch and develop branch +git diff origin/develop...HEAD > git.diff + +# Remove git diff files about removed/renamed files +awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} +' git.diff > processed.diff + +# Extract mutants based on the processed diff +cargo mutants --in-diff processed.diff --list > all_mutants.txt + +# Create a directory for organizing mutants +mkdir -p mutants_by_package + +# Organize mutants into files based on their main folder +while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac +done < all_mutants.txt + +# Function to run mutants for a package +run_mutants() { + local package=$1 + local threshold=$2 + local output_dir=$3 + local mutant_file="mutants_by_package/${package}.txt" + + if [ ! -f "$mutant_file" ]; then + echo "No mutants found for $package" + return + fi + + local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) + local mutant_count=$(cargo mutants -F "$regex_pattern" -E ": replace .{1,2} with .{1,2} in " --list | wc -l) + + if [ "$mutant_count" -gt "$threshold" ]; then + echo "Running mutants for $package ($mutant_count mutants)" + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "$output_dir" \ + --test-tool=nextest \ + --package "$package" \ + -- --all-targets --test-threads 1 + + echo $? > "${output_dir}/exit_code.txt" + else + echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" + fi +} + +# Run mutants for each wanted package +run_mutants "stacks-signer" 500 "./stacks-signer_mutants" +run_mutants "stacks-node" 540 "./stacks-node_mutants" +run_mutants "stackslib" 72 "./stackslib_mutants" diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md new file mode 100644 index 00000000000..7c635f3915f --- /dev/null +++ b/docs/mutation-testing.md @@ -0,0 +1,109 @@ +# Mutation Testing + +This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. + +From the root level of the stacks-core repository run +```sh +./contrib/tools/local-mutation-testing.sh +``` + +## Prerequirements + +Install the cargo mutants library +```sh +cargo install --version 24.7.1 cargo-mutants --locked +``` + + +## Steps +1. be on source branch you would use for the PR. +2. create diff file comparing this branch with the `develop` branch + ```sh + git diff origin/develop..HEAD > git.diff + ``` +3. clean up the diff file and create auxiliary files + ```sh + awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} + ' git.diff > processed.diff + + # Extract mutants based on the processed diff + cargo mutants --in-diff processed.diff --list > all_mutants.txt + + # Create a directory for organizing mutants + mkdir -p mutants_by_package + + # Organize mutants into files based on their main folder + while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac + done < all_mutants.txt + ``` +4. based on the package required to run the mutants for + a. stackslib package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stackslib_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + b. stacks-node (testnet) package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./testnet_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + c. stacks-signer + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stacks-signer_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + d. all other packages combined + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./small-packages_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` From 14594ba59e9c95964704b37c15bc3a3189fe1a29 Mon Sep 17 00:00:00 2001 From: ASuciuX <151519329+ASuciuX@users.noreply.github.com> Date: Wed, 7 Aug 2024 21:31:13 +0300 Subject: [PATCH 137/910] Update docs/mutation-testing.md Co-authored-by: wileyj <2847772+wileyj@users.noreply.github.com> --- docs/mutation-testing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 7c635f3915f..c94b5ca36cf 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -1,7 +1,7 @@ # Mutation Testing This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. -[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run the tests locally by running the mutants created by the changes between `HEAD` and develop. It does automatically all the steps explained below. From the root level of the stacks-core repository run ```sh From e21a0f1940f9aef73783e387448237e9e847d097 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 7 Aug 2024 22:08:34 +0300 Subject: [PATCH 138/910] changed script to be executable and add capitalization to the numbered lists --- contrib/tools/local-mutation-testing.sh | 0 docs/mutation-testing.md | 19 ++++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) mode change 100644 => 100755 contrib/tools/local-mutation-testing.sh diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh old mode 100644 new mode 100755 diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 7c635f3915f..383b3ff8eb3 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -1,7 +1,8 @@ # Mutation Testing This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. -[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. +It does automatically all the steps explained below. From the root level of the stacks-core repository run ```sh @@ -17,12 +18,12 @@ cargo install --version 24.7.1 cargo-mutants --locked ## Steps -1. be on source branch you would use for the PR. -2. create diff file comparing this branch with the `develop` branch +1. Be on source branch you would use for the PR. +2. Create diff file comparing this branch with the `develop` branch ```sh git diff origin/develop..HEAD > git.diff ``` -3. clean up the diff file and create auxiliary files +3. Clean up the diff file and create auxiliary files ```sh awk ' /^diff --git/ { @@ -62,8 +63,8 @@ cargo install --version 24.7.1 cargo-mutants --locked esac done < all_mutants.txt ``` -4. based on the package required to run the mutants for - a. stackslib package +4. Based on the package required to run the mutants for + a. Stackslib package ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) @@ -74,7 +75,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - b. stacks-node (testnet) package + b. Stacks-node (testnet) package ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) @@ -85,7 +86,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - c. stacks-signer + c. Stacks-signer ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) @@ -96,7 +97,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - d. all other packages combined + d. All other packages combined ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) From f1dcde6939ab04f8683b597c5fa278548d08c26e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 7 Aug 2024 13:22:03 -0700 Subject: [PATCH 139/910] removing chain directive from sample configs --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 1 - testnet/stacks-node/conf/mainnet-miner-conf.toml | 1 - testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 1 - testnet/stacks-node/conf/testnet-follower-conf.toml | 1 - testnet/stacks-node/conf/testnet-miner-conf.toml | 1 - 5 files changed, 5 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 143c604a123..c3094633895 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index fc526f08781..4d258b33f0f 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -10,7 +10,6 @@ miner = true mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" username = "" diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 9c3f609c7c2..8e966a8a0fc 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -8,6 +8,5 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index f5fb2c04b00..de0973f2c70 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a9568 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" peer_port = 18444 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index e565fd0ee20..9b0d88ad422 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a9568 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "krypton" peer_host = "127.0.0.1" username = "" From 1c79018e40f9b093ab918bdbb15cbab7018e3606 Mon Sep 17 00:00:00 2001 From: Hugo C <911307+hugocaillard@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:52:00 +0200 Subject: [PATCH 140/910] refactor: addess review Co-authored-by: Aaron Blankstein --- clarity/src/vm/types/signatures.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 9ba833ba7f5..a214c79b403 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1655,11 +1655,10 @@ impl TypeSignature { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> Result> { - if type_args.is_empty() { - return Err(CheckErrors::InvalidTypeDescription); - } let mut trait_signature: BTreeMap = BTreeMap::new(); - let functions_types = type_args[0] + let functions_types = type_args + .get(0) + .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; From 66793b11aea36e0dbe8460b409c18e90aa5659bb Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:52:58 +0200 Subject: [PATCH 141/910] refactor: format --- clarity/src/vm/types/signatures.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index a214c79b403..280258e0266 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1657,8 +1657,8 @@ impl TypeSignature { ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args - .get(0) - .ok_or_else(|| CheckErrors::InvalidTypeDescription)? + .get(0) + .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; From 09a8427d96d32235cccb524f85e2bdfd522a4840 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:18 -0400 Subject: [PATCH 142/910] chore: fault-injection to disable block broadcast --- stacks-signer/src/config.rs | 2 ++ stacks-signer/src/runloop.rs | 1 + stacks-signer/src/v0/signer.rs | 8 ++++++++ 3 files changed, 11 insertions(+) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d5..68f6141ee82 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -157,6 +157,8 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Broadcast a block to the node if we gather enough signatures from other signers + pub broadcast_signed_blocks: bool, } /// The parsed configuration for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 284ea6ce19a..b3d467fb000 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -271,6 +271,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + broadcast_signed_blocks: true, }) } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index df42315635b..4aac1b11df2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -63,6 +63,8 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// Whether or not to broadcast signed blocks if we gather all signatures + pub broadcast_signed_blocks: bool, } impl std::fmt::Display for Signer { @@ -276,6 +278,7 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, + broadcast_signed_blocks: signer_config.broadcast_signed_blocks, } } } @@ -580,6 +583,11 @@ impl Signer { block_hash: &Sha512Trunc256Sum, signature: &MessageSignature, ) { + if !self.broadcast_signed_blocks { + debug!("{self}: Will ignore block-accept signature, since configured not to broadcast signed blocks"); + return; + } + debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); // authenticate the signature -- it must be signed by one of the stacking set From 2e9541e89fac7569172ef9ed12b5e5c0cbe0186d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:39 -0400 Subject: [PATCH 143/910] chore: copyright statements and docstrings for event dispatcher --- testnet/stacks-node/src/config.rs | 16 +++++++++ testnet/stacks-node/src/event_dispatcher.rs | 36 +++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0f13943b1c4..f5c7c7bfbd5 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f0ae639e276..0a60bd3593a 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -673,19 +689,39 @@ impl EventObserver { } } +/// Events received from block-processing. +/// Stacks events are structured as JSON, and are grouped by topic. An event observer can +/// subscribe to one or more specific event streams, or the "any" stream to receive all of them. #[derive(Clone)] pub struct EventDispatcher { + /// List of configured event observers to which events will be posted. + /// The fields below this contain indexes into this list. registered_observers: Vec, + /// Smart contract-specific events, keyed by (contract-id, event-name). Values are indexes into `registered_observers`. contract_events_observers_lookup: HashMap<(QualifiedContractIdentifier, String), HashSet>, + /// Asset event observers, keyed by fully-qualified asset identifier. Values are indexes into + /// `registered_observers. assets_observers_lookup: HashMap>, + /// Index into `registered_observers` that will receive burn block events burn_block_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive mempool events mempool_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive microblock events microblock_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive STX events stx_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive all events any_event_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive block miner events (Stacks 2.5 and + /// lower) miner_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive microblock miner events (Stacks 2.5 and + /// lower) mined_microblocks_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive StackerDB events stackerdb_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive block proposal events (Nakamoto and + /// later) block_proposal_observers_lookup: HashSet, } From ca66c0609162dc89d636b8332bd386026a9b292d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:56 -0400 Subject: [PATCH 144/910] feat: allow the initiative-raiser to list their caller ID --- testnet/stacks-node/src/globals.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 675a7474809..b1ddf2e82b1 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -71,7 +71,7 @@ pub struct Globals { previous_best_tips: Arc>>, /// Initiative flag. /// Raised when the main loop should wake up and do something. - initiative: Arc>, + initiative: Arc>>, } // Need to manually implement Clone, because [derive(Clone)] requires @@ -123,7 +123,7 @@ impl Globals { start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())), - initiative: Arc::new(Mutex::new(false)), + initiative: Arc::new(Mutex::new(None)), } } @@ -435,10 +435,10 @@ impl Globals { } /// Raise the initiative flag - pub fn raise_initiative(&self) { + pub fn raise_initiative(&self, raiser: String) { match self.initiative.lock() { Ok(mut initiative) => { - *initiative = true; + *initiative = Some(raiser); } Err(_e) => { error!("FATAL: failed to lock initiative"); @@ -448,11 +448,10 @@ impl Globals { } /// Clear the initiative flag and return its value - pub fn take_initiative(&self) -> bool { + pub fn take_initiative(&self) -> Option { match self.initiative.lock() { Ok(mut initiative) => { - let ret = *initiative; - *initiative = false; + let ret = (*initiative).take(); ret } Err(_e) => { From daf0257e0de66fd9eaa74c08858c29265ec1dc3d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:47:26 -0400 Subject: [PATCH 145/910] fix: don't raise initiative needlessly --- testnet/stacks-node/src/nakamoto_node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index cde9c1da1f5..6e57b8023e8 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -275,7 +275,6 @@ impl StacksNode { )) .map_err(|_| Error::ChannelClosed)?; - self.globals.raise_initiative(); Ok(()) } From fbf83379fb5a970b74942a0c0ff56cc5852ed16a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:47:46 -0400 Subject: [PATCH 146/910] chore: refactor fault-injection code into its own methods; add fault-injection for block-processing after block-broadcast --- .../stacks-node/src/nakamoto_node/miner.rs | 104 +++++++++++++----- 1 file changed, 77 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7e4e3408fa2..cd9d821d6b1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -63,6 +63,8 @@ use crate::run_loop::RegisteredKey; #[cfg(test)] pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] +pub static TEST_BLOCK_ANNOUNCE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(test)] pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the @@ -181,6 +183,67 @@ impl BlockMinerThread { } } + #[cfg(test)] + fn fault_injection_block_broadcast_stall(new_block: &NakamotoBlock) { + if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Fault injection: Broadcasting is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Fault injection: Broadcasting is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + + #[cfg(not(test))] + fn fault_injection_block_broadcast_stall(_ignored: &NakamotoBlock) {} + + #[cfg(test)] + fn fault_injection_block_announce_stall(new_block: &NakamotoBlock) { + if *TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Fault injection: Block announcement is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Fault injection: Block announcement is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + + #[cfg(not(test))] + fn fault_injection_block_announce_stall(_ignored: &NakamotoBlock) {} + + #[cfg(test)] + fn fault_injection_skip_block_broadcast() -> bool { + if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { + return true; + } + false + } + + #[cfg(not(test))] + fn fault_injection_skip_block_broadcast() -> bool { + false + } + /// Stop a miner tenure by blocking the miner and then joining the tenure thread pub fn stop_miner( globals: &Globals, @@ -279,27 +342,7 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - #[cfg(test)] - { - if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Broadcasting is stalled due to testing directive."; - "stacks_block_id" => %new_block.block_id(), - "stacks_block_hash" => %new_block.header.block_hash(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash - ); - while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Broadcasting is no longer stalled due to testing directive."; - "block_id" => %new_block.block_id(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash - ); - } - } - + Self::fault_injection_block_broadcast_stall(&new_block); let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, @@ -338,14 +381,20 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - self.globals.coord().announce_new_stacks_block(); } + // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); if self.mined_blocks.is_empty() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } + + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); + + // store mined block self.mined_blocks.push(new_block); } @@ -638,11 +687,12 @@ impl BlockMinerThread { block: &NakamotoBlock, reward_set: RewardSet, ) -> Result<(), ChainstateError> { - #[cfg(test)] - { - if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { - return Ok(()); - } + if Self::fault_injection_skip_block_broadcast() { + warn!( + "Fault injection: Skipping block broadcast for {}", + block.block_id() + ); + return Ok(()); } let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; From 497c773be31f7ad0d71fd382f82e49885f028810 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:48:18 -0400 Subject: [PATCH 147/910] chore: API sync --- testnet/stacks-node/src/nakamoto_node/peer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 7483ce5115f..b825cfe46f3 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -352,7 +352,9 @@ impl PeerThread { "P2P: Dispatched result to Relayer! {} results remaining", self.results_with_data.len() ); - self.globals.raise_initiative(); + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } } From ee0f56bb019861bf536bf0f7a274ba0e113145aa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:49:38 -0400 Subject: [PATCH 148/910] chore: some refactoring to separate miner directive choice from sortition handling (so the former can be tested in a unit test) --- .../stacks-node/src/nakamoto_node/relayer.rs | 184 ++++++++++++------ 1 file changed, 125 insertions(+), 59 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 3ee862364c5..888f0548bb6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -375,39 +375,16 @@ impl RelayerThread { } } - /// Given the pointer to a recently processed sortition, see if we won the sortition. - /// - /// Returns a directive to the relayer thread to either start, stop, or continue a tenure. - pub fn process_sortition( - &mut self, - consensus_hash: ConsensusHash, - burn_hash: BurnchainHeaderHash, + /// Choose a miner directive based on the outcome of a sortition + pub(crate) fn choose_miner_directive( + config: &Config, + sortdb: &SortitionDB, + sn: BlockSnapshot, + won_sortition: bool, committed_index_hash: StacksBlockId, - ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: unknown consensus hash"); - - self.globals.set_last_sortition(sn.clone()); - - let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); - - info!( - "Relayer: Process sortition"; - "sortition_ch" => %consensus_hash, - "burn_hash" => %burn_hash, - "burn_height" => sn.block_height, - "winning_txid" => %sn.winning_block_txid, - "committed_parent" => %committed_index_hash, - "won_sortition?" => won_sortition, - ); - - if won_sortition { - increment_stx_blocks_mined_counter(); - } - + ) -> MinerDirective { let directive = if sn.sortition { - if won_sortition || self.config.get_node_config(false).mock_mining { + if won_sortition || config.get_node_config(false).mock_mining { MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, @@ -416,17 +393,16 @@ impl RelayerThread { MinerDirective::StopTenure } } else { - let ih = self.sortdb.index_handle(&sn.sortition_id); + let ih = sortdb.index_handle(&sn.sortition_id); let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", ); - let parent_epoch = - SortitionDB::get_stacks_epoch(self.sortdb.conn(), parent_sn.block_height) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); + let parent_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); - let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height) + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height) .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); @@ -438,11 +414,65 @@ impl RelayerThread { } } else { MinerDirective::ContinueTenure { - new_burn_view: consensus_hash, + new_burn_view: sn.consensus_hash, } } }; - Ok(directive) + directive + } + + /// Given the pointer to a recently processed sortition, see if we won the sortition, and + /// determine what miner action (if any) to take. + /// + /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if + /// this sortition matches the sortition tip. + /// + /// Otherwise, returns None, meaning no action will be taken. + fn process_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> Result, NakamotoNodeError> { + let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + // always clear this even if this isn't the latest sortition + let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); + if won_sortition { + increment_stx_blocks_mined_counter(); + } + self.globals.set_last_sortition(sn.clone()); + + info!( + "Relayer: Process sortition"; + "sortition_ch" => %consensus_hash, + "burn_hash" => %burn_hash, + "burn_height" => sn.block_height, + "winning_txid" => %sn.winning_block_txid, + "committed_parent" => %committed_index_hash, + "won_sortition?" => won_sortition, + ); + + let cur_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB"); + + if cur_sn.consensus_hash != consensus_hash { + info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + self.globals + .raise_initiative("process_sortition".to_string()); + return Ok(None); + } + + let directive = Self::choose_miner_directive( + &self.config, + &self.sortdb, + sn, + won_sortition, + committed_index_hash, + ); + Ok(Some(directive)) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params @@ -899,11 +929,17 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let Ok(miner_instruction) = - self.process_sortition(consensus_hash, burn_hash, committed_index_hash) - else { - return false; - }; + let miner_instruction = + match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { + Ok(Some(miner_instruction)) => miner_instruction, + Ok(None) => { + return true; + } + Err(e) => { + warn!("Relayer: process_sortition returned {:?}", &e); + return false; + } + }; match miner_instruction { MinerDirective::BeginTenure { @@ -946,6 +982,22 @@ impl RelayerThread { true } + #[cfg(test)] + fn fault_injection_skip_block_commit(&self) -> bool { + self.globals + .counters + .naka_skip_commit_op + .0 + .lock() + .unwrap() + .unwrap_or(false) + } + + #[cfg(not(test))] + fn fault_injection_skip_block_commit(&self) -> bool { + false + } + /// Generate and submit the next block-commit, and record it locally fn issue_block_commit( &mut self, @@ -953,20 +1005,25 @@ impl RelayerThread { tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; - #[cfg(test)] - { - if self - .globals - .counters - .naka_skip_commit_op - .0 - .lock() - .unwrap() - .unwrap_or(false) - { - warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); - return Ok(()); - } + if self.fault_injection_skip_block_commit() { + warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); + return Ok(()); + } + + // last chance -- is this still the stacks tip? + let (cur_stacks_tip_ch, cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( + |e| { + panic!("Failed to load canonical stacks tip: {:?}", &e); + }, + ); + + if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { + info!( + "Stacks tip changed prior to commit: {}/{} != {}/{}", + &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + ); + return Err(NakamotoNodeError::StacksTipChanged); } // sign and broadcast @@ -990,6 +1047,7 @@ impl RelayerThread { "Relayer: Submitted block-commit"; "tip_consensus_hash" => %tip_block_ch, "tip_block_hash" => %tip_block_bh, + "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh), "txid" => %txid, ); @@ -1005,6 +1063,7 @@ impl RelayerThread { /// Determine what the relayer should do to advance the chain. /// * If this isn't a miner, then it's always nothing. /// * Otherwise, if we haven't done so already, go register a VRF public key + /// * If the stacks chain tip or burnchain tip has changed, then issue a block-commit fn initiative(&mut self) -> Option { if !self.is_miner { return None; @@ -1066,6 +1125,8 @@ impl RelayerThread { debug!("Relayer: initiative to commit"; "sortititon tip" => %sort_tip.consensus_hash, "stacks tip" => %stacks_tip, + "stacks_tip_ch" => %stacks_tip_ch, + "stacks_tip_bh" => %stacks_tip_bh, "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()), "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()), "burnchain view changed?" => %burnchain_changed, @@ -1095,7 +1156,7 @@ impl RelayerThread { while self.globals.keep_running() { let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; - let directive = if raised_initiative || timed_out { + let directive = if raised_initiative.is_some() || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1119,6 +1180,11 @@ impl RelayerThread { } }; + debug!("Relayer: main loop directive"; + "directive" => %directive, + "raised_initiative" => %raised_initiative.unwrap_or("relay_rcv".to_string()), + "timed_out" => %timed_out); + if !self.handle_directive(directive) { break; } From 45333547bc9d6a840f1ac0252396a4fd055d0291 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:50:48 -0400 Subject: [PATCH 149/910] feat: get event dispatcher --- testnet/stacks-node/src/run_loop/boot_nakamoto.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 97bf8dd4e00..0d509b297ca 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -26,6 +26,7 @@ use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; use stacks_common::types::{StacksEpoch, StacksEpochId}; +use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; @@ -124,6 +125,14 @@ impl BootRunLoop { } } + /// Get the event dispatcher + pub fn get_event_dispatcher(&self) -> EventDispatcher { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_event_dispatcher(), + InnerLoops::Epoch3(x) => x.get_event_dispatcher(), + } + } + /// The main entry point for the run loop. This starts either a 2.x-neon or 3.x-nakamoto /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { From aa9144622d3efaccf1659e98f5c314d6d38930b5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:51:05 -0400 Subject: [PATCH 150/910] chore: API sync --- testnet/stacks-node/src/run_loop/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 3ecd4f1e7de..44a6c0fba90 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -711,7 +711,7 @@ impl RunLoop { sortition_db_height ); last_tenure_sortition_height = sortition_db_height; - globals.raise_initiative(); + globals.raise_initiative("runloop-synced".to_string()); } } } From f845f8b7bf5572303ccc349ac63647e5e575b974 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:51:14 -0400 Subject: [PATCH 151/910] fix: fix forked-tenure-is-ignored test --- .../src/tests/nakamoto_integrations.rs | 99 ++++++++++++++++--- 1 file changed, 84 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b8cb47d579b..9534daf1580 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,7 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -3774,16 +3774,26 @@ fn forked_tenure_is_ignored() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - info!("Starting tenure A."); + info!("Starting Tenure A."); wait_for_first_naka_block_commit(60, &commits_submitted); // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + 1 + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); @@ -3791,16 +3801,23 @@ fn forked_tenure_is_ignored() { .unwrap() .unwrap(); - // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + info!("Tenure A block: {}", &block_tenure_a.index_block_hash()); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted. + // Stall the miner thread; only wait until the number of submitted commits increases. TEST_BROADCAST_STALL.lock().unwrap().replace(true); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Starting tenure B."); + + info!("Starting Tenure B."); + next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count > commits_before) }) .unwrap(); + signer_vote_if_needed( &btc_regtest_controller, &naka_conf, @@ -3808,13 +3825,15 @@ fn forked_tenure_is_ignored() { &signers, ); - info!("Commit op is submitted; unpause tenure B's block"); + info!("Commit op is submitted; unpause Tenure B's block"); - // Unpause the broadcast of Tenure B's block, do not submit commits. + // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to + // be processed test_skip_commit_op.0.lock().unwrap().replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); - // Wait for a stacks block to be broadcasted + // Wait for a stacks block to be broadcasted. + // However, it will not be processed. let start_time = Instant::now(); while mined_blocks.load(Ordering::SeqCst) <= blocks_before { assert!( @@ -3824,31 +3843,65 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - info!("Tenure B broadcasted a block. Issue the next bitcon block and unstall block commits."); - let block_tenure_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + sleep_ms(1000); + + info!("Tenure B broadcasted but did not process a block. Issue the next bitcon block and unstall block commits."); + + // the block will be stored, not processed, so load it out of staging + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("Failed to get sortition tip"); + + let block_tenure_b = chainstate + .nakamoto_blocks_db() + .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() + .get(0) + .cloned() .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); let block_b = blocks.last().unwrap(); + info!("Tenure B tip block: {}", &block_tenure_b.block_id()); + info!("Tenure B last block: {}", &block_b.block_id); - info!("Starting tenure C."); - // Submit a block commit op for tenure C + // Block B was built atop block A + assert_eq!( + block_tenure_b.header.chain_length, + block_tenure_a.stacks_block_height + 1 + ); + + info!("Starting Tenure C."); + + // Submit a block commit op for tenure C. + // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); next_block_and(&mut btc_regtest_controller, 60, || { test_skip_commit_op.0.lock().unwrap().replace(false); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); Ok(commits_count > commits_before && blocks_count > blocks_before) }) .unwrap(); + // allow blocks B and C to be processed + sleep_ms(1000); + info!("Tenure C produced a block!"); let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_c = blocks.last().unwrap(); + info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash()); + info!("Tenure C last block: {}", &block_c.block_id); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A + assert_eq!( + block_tenure_c.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); // Now let's produce a second block for tenure C and ensure it builds off of block C. let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -3869,6 +3922,9 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } + // give C's second block a moment to process + sleep_ms(1000); + info!("Tenure C produced a second block!"); let block_2_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -3877,6 +3933,12 @@ fn forked_tenure_is_ignored() { let blocks = test_observer::get_mined_nakamoto_blocks(); let block_2_c = blocks.last().unwrap(); + info!( + "Tenure C tip block: {}", + &block_2_tenure_c.index_block_hash() + ); + info!("Tenure C last block: {}", &block_2_c.block_id); + info!("Starting tenure D."); // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -3888,18 +3950,25 @@ fn forked_tenure_is_ignored() { }) .unwrap(); + // give tenure D's block a moment to process + sleep_ms(1000); + let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_d = blocks.last().unwrap(); - assert_ne!(block_tenure_b, block_tenure_a); - assert_ne!(block_tenure_b, block_tenure_c); + + info!("Tenure D tip block: {}", block_tenure_d.index_block_hash()); + info!("Tenure D last block: {}", block_d.block_id); + + assert_ne!(block_tenure_b.block_id(), block_tenure_a.index_block_hash()); + assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); assert_ne!(block_tenure_c, block_tenure_a); // Block B was built atop block A assert_eq!( - block_tenure_b.stacks_block_height, + block_tenure_b.header.chain_length, block_tenure_a.stacks_block_height + 1 ); assert_eq!( From 7b483f5e60cb1b36ccc0e07010ca4b339d4ef32b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 11:06:24 -0400 Subject: [PATCH 152/910] chore: cargo fmt --- .../stacks-node/src/nakamoto_node/miner.rs | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0525701bfe0..43a5c510408 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -349,22 +349,22 @@ impl BlockMinerThread { &mut attempts, ) { Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"); - return Err(e); - } - _ => { - error!( - "Error while gathering signatures: {e:?}. Will try mining again." - ); - continue; + Err(e) => { + match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."); + continue; + } } - }, + } }; new_block.header.signer_signature = signer_signature; From 1ad9e5e383d40f8a8461d7ecca6f8de24ccb9bf8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 18:53:53 +0300 Subject: [PATCH 153/910] add `||true` so it runs all packages, not just the first one found --- contrib/tools/local-mutation-testing.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index a3d563682cd..80f3e16e3fa 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -56,7 +56,7 @@ run_mutants() { if [ ! -f "$mutant_file" ]; then echo "No mutants found for $package" - return + return 0 fi local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) @@ -70,15 +70,17 @@ run_mutants() { --output "$output_dir" \ --test-tool=nextest \ --package "$package" \ - -- --all-targets --test-threads 1 + -- --all-targets --test-threads 1 || true echo $? > "${output_dir}/exit_code.txt" else echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" fi + + return 0 } # Run mutants for each wanted package -run_mutants "stacks-signer" 500 "./stacks-signer_mutants" -run_mutants "stacks-node" 540 "./stacks-node_mutants" -run_mutants "stackslib" 72 "./stackslib_mutants" +run_mutants "stacks-signer" 500 "./stacks-signer_mutants" || true +run_mutants "stacks-node" 540 "./stacks-node_mutants" || true +run_mutants "stackslib" 72 "./stackslib_mutants" || true From 9d137db19ebb5ffa8107fbace17c0253d2655067 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 18:57:02 +0300 Subject: [PATCH 154/910] rust backtrace and bitcoind_test added --- contrib/tools/local-mutation-testing.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index 80f3e16e3fa..41592b50302 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -64,7 +64,7 @@ run_mutants() { if [ "$mutant_count" -gt "$threshold" ]; then echo "Running mutants for $package ($mutant_count mutants)" - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ --output "$output_dir" \ From a68c4c951a1fbe9df5220838798c9029412ea980 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 12:50:19 -0400 Subject: [PATCH 155/910] fix: include broadcast_signed_blocks --- stacks-signer/src/client/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 71720a015be..ba68976d909 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -567,6 +567,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + broadcast_signed_blocks: true } } From f129669465da76626c57f4045b5d39fa09610b58 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 20:31:53 +0300 Subject: [PATCH 156/910] add example to run one mutant --- docs/mutation-testing.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 45110ce7aab..e75ba75bdf7 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -108,3 +108,20 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` + +## How to run one specific mutant to test it + +Example of output which had a missing mutant +```sh +MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in 3.0s build + 9.3s test +``` + +Example of fix for it +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace >::run_one_pass -> Option> with None in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +``` + +General command to run +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace [modify this] with [modify this] in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +``` From 151878bdfd8cfb3ae50ce98e0432754b8b8a993e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 13:50:56 -0400 Subject: [PATCH 157/910] fix: cargo fmt --- stacks-signer/src/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ba68976d909..e62fff0d5ee 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -567,7 +567,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, - broadcast_signed_blocks: true + broadcast_signed_blocks: true, } } From 5715f6796ab8457831752af510bf88a076afbd0d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 8 Aug 2024 14:06:26 -0500 Subject: [PATCH 158/910] fix: a handful of issues causing timing-related test failures in CI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make `not_available_try_again` error in `GetStackers`, and make it transient in the signer binary * make signer binary timeout on retries in client * update signer outer runloop to differentiate between 'not in signer set' and 'have not loaded info yet' * update signer outer runloop to handle errors and non-presence differently in the signer config refresh * update signer outer runloop to perform signer config refresh on the current cycle (if not loaded yet) and on the next cycle (if in prepare phase for the next cycle). This was causing an issue on exactly the first cycle of Nakamoto, because the signer set cannot be loaded for the first cycle until after the prepare phase * update the signer outer runloop to check the node’s block height on event receipt as well * update the testing harnesses to wait and check more appropriately for status checks from signers, etc. --- libsigner/src/runloop.rs | 5 +- stacks-signer/src/client/mod.rs | 3 + stacks-signer/src/client/stacks_client.rs | 38 ++- stacks-signer/src/lib.rs | 2 - stacks-signer/src/runloop.rs | 250 ++++++++++++------ stacks-signer/src/v0/signer.rs | 4 - stacks-signer/src/v1/signer.rs | 22 +- stackslib/src/net/api/getstackers.rs | 51 +++- .../src/nakamoto_node/sign_coordinator.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 139 ++++++---- testnet/stacks-node/src/tests/signer/mod.rs | 127 ++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 75 ++---- 12 files changed, 450 insertions(+), 269 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index e548db89e3a..bf786888c16 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -246,13 +246,14 @@ impl< let (event_send, event_recv) = channel(); event_receiver.add_consumer(event_send); + let bind_port = bind_addr.port(); event_receiver.bind(bind_addr)?; let stop_signaler = event_receiver.get_stop_signaler()?; let mut ret_stop_signaler = event_receiver.get_stop_signaler()?; // start a thread for the event receiver let event_thread = thread::Builder::new() - .name("event_receiver".to_string()) + .name(format!("event_receiver:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || event_receiver.main_loop()) .map_err(|e| { @@ -262,7 +263,7 @@ impl< // start receiving events and doing stuff with them let runloop_thread = thread::Builder::new() - .name(format!("signer_runloop:{}", bind_addr.port())) + .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || { signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 71720a015be..d2afbeb1751 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -34,6 +34,8 @@ use stacks_common::debug; const BACKOFF_INITIAL_INTERVAL: u64 = 128; /// Backoff timer max interval in milliseconds const BACKOFF_MAX_INTERVAL: u64 = 16384; +/// Backoff timer max elapsed seconds +const BACKOFF_MAX_ELAPSED: u64 = 5; #[derive(thiserror::Error, Debug)] /// Client error type @@ -109,6 +111,7 @@ where let backoff_timer = backoff::ExponentialBackoffBuilder::new() .with_initial_interval(Duration::from_millis(BACKOFF_INITIAL_INTERVAL)) .with_max_interval(Duration::from_millis(BACKOFF_MAX_INTERVAL)) + .with_max_elapsed_time(Some(Duration::from_secs(BACKOFF_MAX_ELAPSED))) .build(); backoff::retry_notify(backoff_timer, request_fn, notify).map_err(|_| ClientError::RetryTimeout) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6337364dbd..e41485ea40a 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -44,6 +44,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; +use serde::Deserialize; use serde_json::json; use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; @@ -80,6 +81,12 @@ pub struct StacksClient { auth_password: String, } +#[derive(Deserialize)] +struct GetStackersErrorResp { + err_type: String, + err_msg: String, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -514,23 +521,38 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("Getting reward set for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( &self.reward_set_path(reward_cycle), &self.http_origin, ); let send_request = || { - self.stacks_node_client + let response = self + .stacks_node_client .get(self.reward_set_path(reward_cycle)) .send() - .map_err(backoff::Error::transient) + .map_err(|e| backoff::Error::transient(e.into()))?; + let status = response.status(); + if status.is_success() { + return response + .json() + .map_err(|e| backoff::Error::permanent(e.into())); + } + let error_data = response.json::().map_err(|e| { + warn!("Failed to parse the GetStackers error response: {e}"); + backoff::Error::permanent(e.into()) + })?; + if error_data.err_type == "not_available_try_again" { + return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + } else { + warn!("Got error response ({status}): {}", error_data.err_msg); + return Err(backoff::Error::permanent(ClientError::RequestFailure( + status, + ))); + } }; - let response = retry_with_exponential_backoff(send_request)?; + let stackers_response = + retry_with_exponential_backoff::<_, ClientError, GetStackersResponse>(send_request)?; timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let stackers_response = response.json::()?; Ok(stackers_response.stacker_set.signers) } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 13b14bd3583..2cbdc579c92 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -61,8 +61,6 @@ use crate::runloop::{RunLoop, RunLoopCommand}; pub trait Signer: Debug + Display { /// Create a new `Signer` instance fn new(config: SignerConfig) -> Self; - /// Update the `Signer` instance's with the next reward cycle data `SignerConfig` - fn update_signer(&mut self, next_signer_config: &SignerConfig); /// Get the reward cycle of the signer fn reward_cycle(&self) -> u64; /// Process an event diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 3e2ff53438f..cd8bf5972df 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -138,6 +138,58 @@ impl RewardCycleInfo { } } +/// The configuration state for a reward cycle. +/// Allows us to track if we've registered a signer for a cycle or not +/// and to differentiate between being unregistered and simply not configured +pub enum ConfiguredSigner +where + Signer: SignerTrait, + T: StacksMessageCodec + Clone + Send + Debug, +{ + /// Signer is registered for the cycle and ready to process messages + RegisteredSigner(Signer), + /// The signer runloop isn't registered for this cycle (i.e., we've checked the + /// the signer set and we're not in it) + NotRegistered { + /// the cycle number we're not registered for + cycle: u64, + /// Phantom data for the message codec + _phantom_state: std::marker::PhantomData, + }, +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> std::fmt::Display + for ConfiguredSigner +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::RegisteredSigner(s) => write!(f, "{s}"), + Self::NotRegistered { cycle, .. } => write!(f, "NotRegistered in Cycle #{cycle}"), + } + } +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> + ConfiguredSigner +{ + /// Create a `NotRegistered` instance of the enum (so that callers do not need + /// to supply phantom_state data). + pub fn not_registered(cycle: u64) -> Self { + Self::NotRegistered { + cycle, + _phantom_state: std::marker::PhantomData, + } + } + + /// The reward cycle this signer is configured for + pub fn reward_cycle(&self) -> u64 { + match self { + ConfiguredSigner::RegisteredSigner(s) => s.reward_cycle(), + ConfiguredSigner::NotRegistered { cycle, .. } => *cycle, + } + } +} + /// The runloop for the stacks signer pub struct RunLoop where @@ -150,7 +202,7 @@ where pub stacks_client: StacksClient, /// The internal signer for an odd or even reward cycle /// Keyed by reward cycle % 2 - pub stacks_signers: HashMap, + pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, /// The commands received thus far @@ -159,8 +211,6 @@ where pub current_reward_cycle_info: Option, /// Cache sortitin data from `stacks-node` pub sortition_state: Option, - /// Phantom data for the message codec - _phantom_data: std::marker::PhantomData, } impl, T: StacksMessageCodec + Clone + Send + Debug> RunLoop { @@ -175,7 +225,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo commands: VecDeque::new(), current_reward_cycle_info: None, sortition_state: None, - _phantom_data: std::marker::PhantomData, } } /// Get the registered signers for a specific reward cycle @@ -222,25 +271,40 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(signer_slot_ids) } /// Get a signer configuration for a specific reward cycle from the stacks node - fn get_signer_config(&mut self, reward_cycle: u64) -> Option { + fn get_signer_config( + &mut self, + reward_cycle: u64, + ) -> Result, ClientError> { // We can only register for a reward cycle if a reward set exists. - let signer_entries = self.get_parsed_reward_set(reward_cycle).ok()??; - let signer_slot_ids = self - .get_parsed_signer_slots(&self.stacks_client, reward_cycle) - .ok()?; + let signer_entries = match self.get_parsed_reward_set(reward_cycle) { + Ok(Some(x)) => x, + Ok(None) => return Ok(None), + Err(e) => { + warn!("Error while fetching reward set {reward_cycle}: {e:?}"); + return Err(e); + } + }; + let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) + { + Ok(x) => x, + Err(e) => { + warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); + return Err(e); + } + }; let current_addr = self.stacks_client.get_signer_address(); let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." @@ -250,7 +314,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo .get(signer_id) .cloned() .unwrap_or_default(); - Some(SignerConfig { + Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, @@ -271,32 +335,30 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - }) + })) } /// Refresh signer configuration for a specific reward cycle fn refresh_signer_config(&mut self, reward_cycle: u64) { let reward_index = reward_cycle % 2; - if let Some(new_signer_config) = self.get_signer_config(reward_cycle) { - let signer_id = new_signer_config.signer_id; - debug!("Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initializing signer state."); - if reward_cycle != 0 { - let prior_reward_cycle = reward_cycle.saturating_sub(1); - let prior_reward_set = prior_reward_cycle % 2; - if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) { - if signer.reward_cycle() == prior_reward_cycle { - // The signers have been calculated for the next reward cycle. Update the current one - debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); - signer.update_signer(&new_signer_config); - } - } + let new_signer_config = match self.get_signer_config(reward_cycle) { + Ok(Some(new_signer_config)) => { + let signer_id = new_signer_config.signer_id; + let new_signer = Signer::new(new_signer_config); + info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initialized signer state."); + ConfiguredSigner::RegisteredSigner(new_signer) } - let new_signer = Signer::new(new_signer_config); - info!("{new_signer} initialized."); - self.stacks_signers.insert(reward_index, new_signer); - } else { - warn!("Signer is not registered for reward cycle {reward_cycle}. Waiting for confirmed registration..."); - } + Ok(None) => { + warn!("Signer is not registered for reward cycle {reward_cycle}"); + ConfiguredSigner::not_registered(reward_cycle) + } + Err(e) => { + warn!("Failed to get the reward set info: {e}. Will try again later."); + return; + } + }; + + self.stacks_signers.insert(reward_index, new_signer_config); } fn initialize_runloop(&mut self) -> Result<(), ClientError> { @@ -322,7 +384,11 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(()) } - fn refresh_runloop(&mut self, current_burn_block_height: u64) -> Result<(), ClientError> { + fn refresh_runloop(&mut self, ev_burn_block_height: u64) -> Result<(), ClientError> { + let current_burn_block_height = std::cmp::max( + self.stacks_client.get_peer_info()?.burn_block_height, + ev_burn_block_height, + ); let reward_cycle_info = self .current_reward_cycle_info .as_mut() @@ -332,48 +398,44 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo // First ensure we refresh our view of the current reward cycle information if block_reward_cycle != current_reward_cycle { - let new_reward_cycle_info = retry_with_exponential_backoff(|| { - let info = self - .stacks_client - .get_current_reward_cycle_info() - .map_err(backoff::Error::transient)?; - if info.reward_cycle < block_reward_cycle { - // If the stacks-node is still processing the burn block, the /v2/pox endpoint - // may return the previous reward cycle. In this case, we should retry. - return Err(backoff::Error::transient(ClientError::InvalidResponse( - format!("Received reward cycle ({}) does not match the expected reward cycle ({}) for block {}.", - info.reward_cycle, - block_reward_cycle, - current_burn_block_height - ), - ))); - } - Ok(info) - })?; + let new_reward_cycle_info = RewardCycleInfo { + reward_cycle: block_reward_cycle, + reward_cycle_length: reward_cycle_info.reward_cycle_length, + prepare_phase_block_length: reward_cycle_info.prepare_phase_block_length, + first_burnchain_block_height: reward_cycle_info.first_burnchain_block_height, + last_burnchain_block_height: current_burn_block_height, + }; *reward_cycle_info = new_reward_cycle_info; } + let reward_cycle_before_refresh = current_reward_cycle; let current_reward_cycle = reward_cycle_info.reward_cycle; - // We should only attempt to refresh the signer if we are not configured for the next reward cycle yet and we received a new burn block for its prepare phase - if reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height) { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if self - .stacks_signers - .get(&(next_reward_cycle % 2)) - .map(|signer| signer.reward_cycle() != next_reward_cycle) - .unwrap_or(true) - { - info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); + let is_in_next_prepare_phase = + reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height); + let next_reward_cycle = current_reward_cycle.saturating_add(1); + + info!( + "Refreshing runloop with new burn block event"; + "latest_node_burn_ht" => current_burn_block_height, + "event_ht" => ev_burn_block_height, + "reward_cycle_before_refresh" => reward_cycle_before_refresh, + "current_reward_cycle" => current_reward_cycle, + "configured_for_current" => Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle), + "configured_for_next" => Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle), + "is_in_next_prepare_phase" => is_in_next_prepare_phase, + ); + + // Check if we need to refresh the signers: + // need to refresh the current signer if we are not configured for the current reward cycle + // need to refresh the next signer if we're not configured for the next reward cycle, and we're in the prepare phase + if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { + self.refresh_signer_config(current_reward_cycle); + } + if is_in_next_prepare_phase { + if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { self.refresh_signer_config(next_reward_cycle); } - } else { - info!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; - "reward_cycle" => reward_cycle_info.reward_cycle, - "reward_cycle_length" => reward_cycle_info.reward_cycle_length, - "prepare_phase_block_length" => reward_cycle_info.prepare_phase_block_length, - "first_burnchain_block_height" => reward_cycle_info.first_burnchain_block_height, - "last_burnchain_block_height" => reward_cycle_info.last_burnchain_block_height, - ); } + self.cleanup_stale_signers(current_reward_cycle); if self.stacks_signers.is_empty() { self.state = State::NoRegisteredSigners; @@ -383,6 +445,16 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(()) } + fn is_configured_for_cycle( + stacks_signers: &HashMap>, + reward_cycle: u64, + ) -> bool { + let Some(signer) = stacks_signers.get(&(reward_cycle % 2)) else { + return false; + }; + signer.reward_cycle() == reward_cycle + } + fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { @@ -390,7 +462,13 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let next_reward_cycle = reward_cycle.wrapping_add(1); let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => !signer.has_pending_blocks(), // We are the next reward cycle, so check if we have any pending blocks to process + std::cmp::Ordering::Equal => { + // We are the next reward cycle, so check if we were registered and have any pending blocks to process + match signer { + ConfiguredSigner::RegisteredSigner(signer) => !signer.has_pending_blocks(), + _ => true, + } + } std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale }; if stale { @@ -425,6 +503,19 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", self.state ); + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + info!("Signer status check requested: {:?}.", self.state); + if let Err(e) = res.send(vec![StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + } + .into()]) + { + error!("Failed to send status check result: {e}."); + } + } + if let Some(cmd) = cmd { self.commands.push_back(cmd); } @@ -447,7 +538,12 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> .as_ref() .expect("FATAL: cannot be an initialized signer with no reward cycle info.") .reward_cycle; - for signer in self.stacks_signers.values_mut() { + for configured_signer in self.stacks_signers.values_mut() { + let ConfiguredSigner::RegisteredSigner(ref mut signer) = configured_signer else { + debug!("{configured_signer}: Not configured for cycle, ignoring events for cycle"); + continue; + }; + signer.process_event( &self.stacks_client, &mut self.sortition_state, @@ -466,18 +562,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> let next_reward_cycle = current_reward_cycle.saturating_add(1); info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); } - // This is the only event that we respond to from the outer signer runloop - if let Some(SignerEvent::StatusCheck) = event { - info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![StateInfo { - runloop_state: self.state, - reward_cycle_info: self.current_reward_cycle_info, - } - .into()]) - { - error!("Failed to send status check result: {e}."); - } - } None } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d81fbbd8148..c32af06f3fd 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -71,10 +71,6 @@ impl SignerTrait for Signer { Self::from(config) } - /// Refresh the next signer data from the given configuration data - fn update_signer(&mut self, _new_signer_config: &SignerConfig) { - // do nothing - } /// Return the reward cycle of the signer fn reward_cycle(&self) -> u64 { self.reward_cycle diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 18e31946c02..8212586beb7 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -143,16 +143,6 @@ impl SignerTrait for Signer { Self::from(config) } - /// Refresh the next signer data from the given configuration data - fn update_signer(&mut self, new_signer_config: &SignerConfig) { - self.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } /// Return the reward cycle of the signer fn reward_cycle(&self) -> u64 { self.reward_cycle @@ -356,6 +346,18 @@ impl Signer { } } + /// Refresh the next signer data from the given configuration data + #[allow(dead_code)] + fn update_signer(&mut self, new_signer_config: &SignerConfig) { + self.next_signer_addresses = new_signer_config + .signer_entries + .signer_ids + .keys() + .copied() + .collect(); + self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); + } + /// Get the current coordinator for executing DKG /// This will always use the coordinator selector to determine the coordinator fn get_coordinator_dkg(&self) -> (u32, PublicKey) { diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4546b66fc93..afcea6b5515 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -51,6 +51,35 @@ pub struct GetStackersResponse { pub stacker_set: RewardSet, } +pub enum GetStackersErrors { + NotAvailableYet(crate::chainstate::coordinator::Error), + Other(String), +} + +impl GetStackersErrors { + pub fn error_type_string(&self) -> &'static str { + match self { + GetStackersErrors::NotAvailableYet(_) => "not_available_try_again", + GetStackersErrors::Other(_) => "other", + } + } +} + +impl From<&str> for GetStackersErrors { + fn from(value: &str) -> Self { + GetStackersErrors::Other(value.into()) + } +} + +impl std::fmt::Display for GetStackersErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GetStackersErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), + GetStackersErrors::Other(msg) => write!(f, "{msg}") + } + } +} + impl GetStackersResponse { pub fn load( sortdb: &SortitionDB, @@ -58,7 +87,7 @@ impl GetStackersResponse { tip: &StacksBlockId, burnchain: &Burnchain, cycle_number: u64, - ) -> Result { + ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); let pox_contract_name = burnchain @@ -74,16 +103,9 @@ impl GetStackersResponse { } let provider = OnChainRewardSetProvider::new(); - let stacker_set = provider.read_reward_set_nakamoto( - cycle_start_height, - chainstate, - burnchain, - sortdb, - tip, - true, - ).map_err( - |e| format!("Could not read reward set. Prepare phase may not have started for this cycle yet. Cycle = {cycle_number}, Err = {e:?}") - )?; + let stacker_set = provider + .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) } @@ -173,10 +195,13 @@ impl RPCRequestHandler for GetStackersRequestHandler { let response = match stacker_response { Ok(response) => response, - Err(err_str) => { + Err(error) => { return StacksHttpResponse::new_error( &preamble, - &HttpBadRequest::new_json(json!({"response": "error", "err_msg": err_str})), + &HttpBadRequest::new_json(json!({ + "response": "error", + "err_type": error.error_type_string(), + "err_msg": error.to_string()})), ) .try_into_contents() .map_err(NetError::from) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index db442ac46b7..914c2efb1a0 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -775,9 +775,10 @@ impl SignCoordinator { let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { warn!( - "Processed signature but didn't validate over the expected block. Returning error."; + "Processed signature for a different block. Will try to continue."; "signature" => %signature, "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, "slot_id" => slot_id, "reward_cycle_id" => reward_cycle_id, "response_hash" => %response_hash diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 13de8a350c8..d07f6b91d6c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -673,54 +673,95 @@ pub fn next_block_and_mine_commit( coord_channels: &Arc>, commits_submitted: &Arc, ) -> Result<(), String> { - let commits_submitted = commits_submitted.clone(); - let blocks_processed_before = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_before = commits_submitted.load(Ordering::SeqCst); - let mut block_processed_time: Option = None; - let mut commit_sent_time: Option = None; + next_block_and_wait_for_commits( + btc_controller, + timeout_secs, + &[coord_channels], + &[commits_submitted], + ) +} + +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +/// This waits for this check to pass on *all* supplied channels +pub fn next_block_and_wait_for_commits( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &[&Arc>], + commits_submitted: &[&Arc], +) -> Result<(), String> { + let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let blocks_processed_before: Vec<_> = coord_channels + .iter() + .map(|x| { + x.lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + }) + .collect(); + let commits_before: Vec<_> = commits_submitted + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + + let mut block_processed_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); + let mut commit_sent_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); next_block_and(btc_controller, timeout_secs, || { - let commits_sent = commits_submitted.load(Ordering::SeqCst); - let blocks_processed = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before && block_processed_time.is_none() { - block_processed_time.replace(now); - } - if commits_sent > commits_before && commit_sent_time.is_none() { - commit_sent_time.replace(now); - } - if blocks_processed > blocks_processed_before { - let block_processed_time = block_processed_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - if commits_sent <= commits_before { - return Ok(false); - } - let commit_sent_time = commit_sent_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - return Ok(true); + for i in 0..commits_submitted.len() { + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let now = Instant::now(); + if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { + block_processed_time[i].replace(now); } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before + 2 { - return Ok(true); + if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { + commit_sent_time[i].replace(now); } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - return Ok(true); + } + + for i in 0..commits_submitted.len() { + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + + if blocks_processed > blocks_processed_before[i] { + let block_processed_time = block_processed_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + if commits_sent <= commits_before[i] { + return Ok(false); + } + let commit_sent_time = commit_sent_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + // try to ensure the commit was sent after the block was processed + if commit_sent_time > block_processed_time { + continue; + } + // if two commits have been sent, one of them must have been after + if commits_sent >= commits_before[i] + 2 { + continue; + } + // otherwise, just timeout if the commit was sent and its been long enough + // for a new commit pass to have occurred + if block_processed_time.elapsed() > Duration::from_secs(10) { + continue; + } + return Ok(false); + } else { + return Ok(false); } - Ok(false) - } else { - Ok(false) } + Ok(true) }) } @@ -1196,15 +1237,11 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - let epoch_3_reward_set_calculation = - btc_regtest_controller.get_headers_height().wrapping_add(1); - run_until_burnchain_height( - btc_regtest_controller, - &blocks_processed, - epoch_3_reward_set_calculation, - &naka_conf, + next_block_and_wait(btc_regtest_controller, &blocks_processed); + info!( + "Bootstrapped to Epoch 3.0 reward set calculation height: {}", + get_chain_info(naka_conf).burn_block_height ); - info!("Bootstrapped to Epoch 3.0 reward set calculation height: {epoch_3_reward_set_calculation}."); } /// Wait for a block commit, without producing a block diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 31afcf1300b..fe7bf771041 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -15,6 +15,7 @@ mod v0; mod v1; +use std::collections::HashSet; // Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify @@ -51,17 +52,19 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::{SignerResult, StateInfo}; +use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use wsts::state_machine::PublicKeys; +use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ - naka_neon_integration_conf, next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, + naka_neon_integration_conf, next_block_and_mine_commit, next_block_and_wait_for_commits, + POX_4_DEFAULT_STACKER_BALANCE, }; use crate::tests::neon_integrations::{ get_chain_info, next_block_and_wait, run_until_burnchain_height, test_observer, @@ -222,8 +225,12 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest) { + for signer_ix in 0..self.spawned_signers.len() { + if exclude.contains(&signer_ix) { + continue; + } + let port = 3000 + signer_ix; let endpoint = format!("http://localhost:{}", port); let path = format!("{endpoint}/status"); let client = reqwest::blocking::Client::new(); @@ -235,39 +242,78 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { - debug!("Waiting for Status..."); - let now = std::time::Instant::now(); - let mut states = Vec::with_capacity(self.spawned_signers.len()); - for signer in self.spawned_signers.iter() { - let old_len = states.len(); - loop { - assert!( - now.elapsed() < timeout, - "Timed out waiting for state checks" - ); - let results = signer - .res_recv - .recv_timeout(timeout) - .expect("failed to recv state results"); - for result in results { - match result { - SignerResult::OperationResult(_operation) => { - panic!("Recieved an operation result."); - } - SignerResult::StatusCheck(state_info) => { - states.push(state_info); - } - } + pub fn wait_for_registered(&mut self, timeout_secs: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + if state.runloop_state == State::RegisteredSigners { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a registered signers state from them.", state.runloop_state); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + pub fn wait_for_cycle(&mut self, timeout_secs: u64, reward_cycle: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + let Some(reward_cycle_info) = state.reward_cycle_info else { continue; }; + if reward_cycle_info.reward_cycle == reward_cycle { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a cycle = {} state from them.", state, reward_cycle); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + /// Get status check results (if returned) from each signer without blocking + /// Returns Some() or None() for each signer, in order of `self.spawned_signers` + pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { + let mut output = Vec::new(); + for (ix, signer) in self.spawned_signers.iter().enumerate() { + if exclude.contains(&ix) { + output.push(None); + continue; + } + let Ok(mut results) = signer.res_recv.try_recv() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + if results.len() > 1 { + warn!("Received multiple states from the signer receiver: this test function assumes it should only ever receive 1"); + panic!(); + } + let Some(result) = results.pop() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + match result { + SignerResult::OperationResult(_operation) => { + panic!("Recieved an operation result."); } - if states.len() > old_len { - break; + SignerResult::StatusCheck(state_info) => { + output.push(Some(state_info)); } } } - debug!("Finished waiting for state checks!"); - states + output } fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { @@ -337,18 +383,21 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], + commits_submitted: &[&Arc], + timeout: Duration, + ) { let blocks_len = test_observer::get_blocks().len(); let mined_block_time = Instant::now(); - next_block_and_mine_commit( + next_block_and_wait_for_commits( &mut self.running_nodes.btc_regtest_controller, timeout.as_secs(), - &self.running_nodes.coord_channel, - &commits_submitted, + coord_channels, + commits_submitted, ) .unwrap(); - let t_start = Instant::now(); while test_observer::get_blocks().len() <= blocks_len { assert!( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ee2e417eeb..92c1c782876 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -212,22 +212,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, ); - let now = std::time::Instant::now(); - loop { - self.send_status_request(); - let states = self.wait_for_states(short_timeout); - if states - .iter() - .all(|state_info| state_info.runloop_state == State::RegisteredSigners) - { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be registered" - ); - std::thread::sleep(Duration::from_secs(1)); - } + self.wait_for_registered(30); debug!("Signers initialized"); info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); @@ -255,7 +240,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, Some(self.num_stacking_cycles), ); - debug!("Waiting for signer set calculation."); + info!("Waiting for signer set calculation."); let mut reward_set_calculated = false; let short_timeout = Duration::from_secs(30); let now = std::time::Instant::now(); @@ -277,31 +262,16 @@ impl SignerTest { "Timed out waiting for reward set calculation" ); } - debug!("Signer set calculated"); + info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state - debug!("Waiting for signers to initialize."); + info!("Waiting for signers to initialize."); next_block_and_wait( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, ); - let now = std::time::Instant::now(); - loop { - self.send_status_request(); - let states = self.wait_for_states(short_timeout); - if states - .iter() - .all(|state_info| state_info.runloop_state == State::RegisteredSigners) - { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be registered" - ); - std::thread::sleep(Duration::from_secs(1)); - } - debug!("Singers initialized"); + self.wait_for_registered(30); + info!("Signers initialized"); self.run_until_epoch_3_boundary(); @@ -1244,6 +1214,11 @@ fn multiple_miners() { ); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + .. + } = run_loop_2.counters(); let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -1260,6 +1235,8 @@ fn multiple_miners() { // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); @@ -1270,7 +1247,11 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } - signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure @@ -1785,25 +1766,7 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - let now = std::time::Instant::now(); - // Wait for the signer to process the burn blocks and fully enter the next reward cycle - loop { - signer_test.send_status_request(); - let states = signer_test.wait_for_states(short_timeout); - if states.iter().all(|state_info| { - state_info - .reward_cycle_info - .map(|info| info.reward_cycle == final_reward_cycle) - .unwrap_or(false) - }) { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be in the next reward cycle" - ); - std::thread::sleep(Duration::from_millis(100)); - } + signer_test.wait_for_cycle(30, final_reward_cycle); info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); From 5cc974e2ddce40d4fa25a8a30ac71572b71fc2d6 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 22:12:34 +0300 Subject: [PATCH 159/910] adjust the code blocks with line breaks for readability --- contrib/tools/local-mutation-testing.sh | 3 ++- docs/mutation-testing.md | 23 +++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index 41592b50302..11da6810e54 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -64,7 +64,8 @@ run_mutants() { if [ "$mutant_count" -gt "$threshold" ]; then echo "Running mutants for $package ($mutant_count mutants)" - RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ --output "$output_dir" \ diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index e75ba75bdf7..85fcd89a7f6 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -68,6 +68,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -79,6 +80,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -90,6 +92,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -118,10 +121,26 @@ MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace >::run_one_pass -> Option> with None in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 ``` General command to run ```sh -RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace [modify this] with [modify this] in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace [modify this] with [modify this] in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 ``` From 98d05a4b7d2c7b0111b76c0ec94b67851ddb47b7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 8 Aug 2024 16:00:01 -0500 Subject: [PATCH 160/910] test: add test for reloading signer config when reward set unavailable at start of prepare phase --- .github/workflows/bitcoin-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 61 ++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 118 +++++++++++++++++- 3 files changed, 163 insertions(+), 17 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index cd0fda66654..e14934558a5 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -95,6 +95,7 @@ jobs: - tests::signer::v0::mock_sign_epoch_25 - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking + - tests::signer::v0::reloads_signer_set_in - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d07f6b91d6c..62afa03ac42 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1073,11 +1073,7 @@ fn signer_vote_if_needed( } } -/// -/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order -/// for pox-4 to activate -/// * `signer_pks` - must be the same size as `stacker_sks` -pub fn boot_to_epoch_3_reward_set_calculation_boundary( +pub fn setup_epoch_3_reward_set( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], @@ -1099,9 +1095,6 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( ); let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); - let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary - .saturating_sub(prepare_phase_len) - .wrapping_add(1); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); @@ -1115,13 +1108,13 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( .block_height_to_reward_cycle(block_height) .unwrap(); let lock_period: u128 = num_stacking_cycles.unwrap_or(12_u64).into(); - debug!("Test Cycle Info"; - "prepare_phase_len" => {prepare_phase_len}, - "reward_cycle_len" => {reward_cycle_len}, - "block_height" => {block_height}, - "reward_cycle" => {reward_cycle}, - "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, - "epoch_3_start_height" => {epoch_3_start_height}, + info!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, + "epoch_3_start_height" => {epoch_3_start_height}, ); for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( @@ -1165,6 +1158,44 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( ); submit_tx(&http_origin, &stacking_tx); } +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_3_reward_set_calculation_boundary( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, +) { + setup_epoch_3_reward_set( + naka_conf, + blocks_processed, + stacker_sks, + signer_sks, + btc_regtest_controller, + num_stacking_cycles, + ); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .saturating_add(1); run_until_burnchain_height( btc_regtest_controller, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 92c1c782876..f589416746c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -61,8 +61,9 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, wait_for, - POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -744,6 +745,119 @@ struct TenureForkingResult { mined_d: MinedNakamotoBlockEvent, } +#[test] +#[ignore] +/// Test to make sure that the signers are capable of reloading their reward set +/// if the stacks-node doesn't have it available at the first block of a prepare phase (e.g., if there was no block) +fn reloads_signer_set_in() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |_| {}, + &[], + ); + + setup_epoch_3_reward_set( + &signer_test.running_nodes.conf, + &signer_test.running_nodes.blocks_processed, + &signer_test.signer_stacks_private_keys, + &signer_test.signer_stacks_private_keys, + &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), + ); + + let naka_conf = &signer_test.running_nodes.conf; + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let before_epoch_3_reward_set_calculation = + epoch_3_reward_cycle_boundary.saturating_sub(prepare_phase_len); + run_until_burnchain_height( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + before_epoch_3_reward_set_calculation, + naka_conf, + ); + + info!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = signer_test.get_current_reward_cycle() + 1; + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + while !reward_set_calculated { + let reward_set = signer_test + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + info!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + info!("Signer set calculated"); + + // Manually consume one more block to ensure signers refresh their state + info!("Waiting for signers to initialize."); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + signer_test.wait_for_registered(30); + info!("Signers initialized"); + + signer_test.run_until_epoch_3_boundary(); + + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + + info!("Waiting 1 burnchain block for miner VRF key confirmation"); + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }, + ) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + signer_test.shutdown(); +} + /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches /// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop From 7d82eefde8d51df91a839c17c162ea8db5c5c164 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:41:36 -0700 Subject: [PATCH 161/910] Update signer changelog --- stacks-signer/CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 6b28b15e8f6..1476d56ad01 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,6 +7,25 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added + +### Changed + +## [2.5.0.0.5.2-rc1] + +### Added + +- Signer set handoff integration test (#5037) +- Add mock signing (#5020) +- Add versioning info set at build-time (#5016) + +### Changed + +- Fix out of sync `RPCPeerInfo` with stacks-node (#5033, #5014, #4999) +- Logging Improvements (#5025) +- Timeout empty sortition (#5003) +- Enum for version specific data (#4981) + ## [2.5.0.0.5.1] ### Added From 9f4b56ebc6eb088c0fa1234f0d55f75d1e95b006 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 09:03:23 -0500 Subject: [PATCH 162/910] test: add coverage for mutants --- stacks-signer/src/client/stacks_client.rs | 4 +-- stackslib/src/net/api/getstackers.rs | 35 +++++++++++++++++-- .../src/nakamoto_node/sign_coordinator.rs | 3 ++ 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e41485ea40a..223455c72d6 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -33,7 +33,7 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; -use blockstack_lib::net::api::getstackers::GetStackersResponse; +use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersResponse}; use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; @@ -541,7 +541,7 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if error_data.err_type == "not_available_try_again" { + if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); } else { warn!("Got error response ({status}): {}", error_data.err_msg); diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index afcea6b5515..4fd42340708 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -57,10 +57,13 @@ pub enum GetStackersErrors { } impl GetStackersErrors { + pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &'static str = "other"; + pub fn error_type_string(&self) -> &'static str { match self { - GetStackersErrors::NotAvailableYet(_) => "not_available_try_again", - GetStackersErrors::Other(_) => "other", + Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, + Self::Other(_) => Self::OTHER_ERR_TYPE, } } } @@ -252,3 +255,31 @@ impl StacksHttpResponse { Ok(response) } } + +#[cfg(test)] +mod test { + use super::GetStackersErrors; + + #[test] + // Test the formatting and error type strings of GetStackersErrors + fn get_stackers_errors() { + let not_available_err = GetStackersErrors::NotAvailableYet( + crate::chainstate::coordinator::Error::PoXNotProcessedYet, + ); + let other_err = GetStackersErrors::Other("foo".into()); + + assert_eq!( + not_available_err.error_type_string(), + GetStackersErrors::NOT_AVAILABLE_ERR_TYPE + ); + assert_eq!( + other_err.error_type_string(), + GetStackersErrors::OTHER_ERR_TYPE + ); + + assert!(not_available_err + .to_string() + .starts_with("Could not read reward set")); + assert_eq!(other_err.to_string(), "foo".to_string()); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 914c2efb1a0..6a5f026a16c 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -631,6 +631,9 @@ impl SignCoordinator { /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond /// with their signatures. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, From e6e89552d4dc1dab00c16b7207724b6a1e8f5e33 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:24:36 -0400 Subject: [PATCH 163/910] fix: broadcast-signed-block fault injection in the global signer config, so we can set it in tests --- stacks-signer/src/config.rs | 3 +++ stacks-signer/src/runloop.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 68f6141ee82..037e8af7730 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -203,6 +203,8 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Broadcast a block to the node if we gather enough signatures from other signers + pub broadcast_signed_blocks: bool, } /// Internal struct for loading up the config file @@ -359,6 +361,7 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, + broadcast_signed_blocks: true, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 58502314bb0..9e1083047b5 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -335,7 +335,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - broadcast_signed_blocks: true, + broadcast_signed_blocks: self.config.broadcast_signed_blocks, })) } From 1fe36734885a4dda7f9cfa125ff5dc2ba2a67825 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:25:05 -0400 Subject: [PATCH 164/910] fix: address #5046 completely --- .../stacks-node/src/nakamoto_node/relayer.rs | 77 +++++++++++-------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index d5e873a54dd..add33424ad2 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -375,47 +375,56 @@ impl RelayerThread { } } - /// Choose a miner directive based on the outcome of a sortition + /// Choose a miner directive based on the outcome of a sortition. + /// We won't always be able to mine -- for example, this could be an empty sortition, but the + /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for + /// the next block-commit. pub(crate) fn choose_miner_directive( config: &Config, sortdb: &SortitionDB, sn: BlockSnapshot, won_sortition: bool, committed_index_hash: StacksBlockId, - ) -> MinerDirective { + ) -> Option { let directive = if sn.sortition { - if won_sortition || config.get_node_config(false).mock_mining { - MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, - burnchain_tip: sn, - } - } else { - MinerDirective::StopTenure - } + Some( + if won_sortition || config.get_node_config(false).mock_mining { + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::StopTenure + }, + ) } else { - let ih = sortdb.index_handle(&sn.sortition_id); - let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( - "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", - ); - - let parent_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); - - let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height) - .expect("FATAL: failed to query sortition DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); - - if parent_epoch.epoch_id != cur_epoch.epoch_id { - // this is the first-ever sortition, so definitely mine - MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, - burnchain_tip: sn, - } + // find out what epoch the Stacks tip is in. + // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so + // right now since this sortition has no winner. + let (cur_stacks_tip_ch, _cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + + let stacks_tip_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cur_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); + + let cur_epoch = + SortitionDB::get_stacks_epoch(sortdb.conn(), stacks_tip_sn.block_height) + .expect("FATAL: failed to query sortition DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + if cur_epoch.epoch_id != StacksEpochId::Epoch30 { + debug!( + "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.", + &stacks_tip_sn.consensus_hash + ); + None } else { - MinerDirective::ContinueTenure { + Some(MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, - } + }) } }; directive @@ -425,7 +434,7 @@ impl RelayerThread { /// determine what miner action (if any) to take. /// /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if - /// this sortition matches the sortition tip. + /// this sortition matches the sortition tip and we have a parent to build atop. /// /// Otherwise, returns None, meaning no action will be taken. fn process_sortition( @@ -465,14 +474,14 @@ impl RelayerThread { return Ok(None); } - let directive = Self::choose_miner_directive( + let directive_opt = Self::choose_miner_directive( &self.config, &self.sortdb, sn, won_sortition, committed_index_hash, ); - Ok(Some(directive)) + Ok(directive_opt) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params From f7f12932a76263847c9a10f0e547ff941664d398 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:26:06 -0400 Subject: [PATCH 165/910] fix: counter for rejected blocks, and also, the canonical stacks tip doesn't have any bearing on whether or not we should keep mining, so don't check it --- .../src/nakamoto_node/sign_coordinator.rs | 33 ++----------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 97b64cbacd4..d57639cd1dc 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -683,6 +683,7 @@ impl SignCoordinator { ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); + #[cfg(test)] { info!( @@ -749,37 +750,6 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - // we don't have the block we ostensibly mined, but perhaps the tenure has advanced - // anyway? If so, then give up. - let canonical_stacks_header = - NakamotoChainState::get_canonical_block_header(chain_state.db(), sortdb) - .map_err(|e| { - let msg = format!("Failed to query canonical stacks tip: {:?}", &e); - warn!("{}", &msg); - NakamotoNodeError::SignerSignatureError(msg) - })? - .ok_or_else(|| { - let msg = "No canonical stacks tip".to_string(); - warn!("{}", &msg); - NakamotoNodeError::SignerSignatureError(msg) - })?; - - debug!( - "run_sign_v0: our canonical tip is currently {}/{}", - &canonical_stacks_header.consensus_hash, - &canonical_stacks_header.anchored_header.block_hash() - ); - if canonical_stacks_header.anchored_header.height() >= block.header.chain_length - && canonical_stacks_header.index_block_hash() != block.header.block_id() - { - info!( - "SignCoordinator: our block {} is superceded by block {}", - block.header.block_id(), - canonical_stacks_header.index_block_hash() - ); - return Err(NakamotoNodeError::StacksTipChanged); - } - // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); @@ -850,6 +820,7 @@ impl SignCoordinator { &block.header.consensus_hash, &block.header.block_hash() ); + counters.bump_naka_rejected_blocks(); return Err(NakamotoNodeError::SignersRejected); } } else { From 744bc37be88e423ec49b9ab933ef9a571b4840ea Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:24 -0400 Subject: [PATCH 166/910] feat: signer-rejected blocks counter --- testnet/stacks-node/src/run_loop/neon.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 663c14e27ba..ecf541a0dea 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -104,6 +104,7 @@ pub struct Counters { pub naka_submitted_vrfs: RunLoopCounter, pub naka_submitted_commits: RunLoopCounter, pub naka_mined_blocks: RunLoopCounter, + pub naka_rejected_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, @@ -168,6 +169,10 @@ impl Counters { Counters::inc(&self.naka_proposed_blocks); } + pub fn bump_naka_rejected_blocks(&self) { + Counters::inc(&self.naka_rejected_blocks); + } + pub fn bump_naka_mined_tenures(&self) { Counters::inc(&self.naka_mined_tenures); } From 30603691824234fc7434db15790b96c1478e4bf3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:38 -0400 Subject: [PATCH 167/910] chore: don't crash if we ask for /v2/info while the node is re-binding on the nakamoto transition --- testnet/stacks-node/src/tests/neon_integrations.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978c..8a1a08b5dc3 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -694,7 +694,10 @@ pub fn run_until_burnchain_height( if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let Ok(tip_info) = get_chain_info_result(&conf) else { + sleep_ms(1000); + continue; + }; current_height = tip_info.burn_block_height; } From 1bd92ba3fb53e4bc6b43834aaf3432b1adaff5ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:57 -0400 Subject: [PATCH 168/910] chore: track rejected blocks --- testnet/stacks-node/src/tests/signer/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5d5f8d0c433..3c5aec785e2 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -85,6 +85,7 @@ pub struct RunningNodes { pub blocks_processed: Arc, pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, + pub nakamoto_blocks_rejected: Arc, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -747,6 +748,7 @@ fn setup_stx_btc_node ()>( naka_submitted_commits: commits_submitted, naka_proposed_blocks: naka_blocks_proposed, naka_mined_blocks: naka_blocks_mined, + naka_rejected_blocks: naka_blocks_rejected, naka_skip_commit_op: nakamoto_test_skip_commit_op, .. } = run_loop.counters(); @@ -780,6 +782,7 @@ fn setup_stx_btc_node ()>( blocks_processed: blocks_processed.0, nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, + nakamoto_blocks_rejected: naka_blocks_rejected.0, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, From 3ca62eec07fb5df86e03ea9f4d391034effd29a0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:40:11 -0400 Subject: [PATCH 169/910] fix: get some of the forking tests to work --- testnet/stacks-node/src/tests/signer/v0.rs | 127 ++++++++++++++++++--- 1 file changed, 112 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 69d4bc68da9..34aeafdc315 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -32,7 +32,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; @@ -46,6 +46,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::bitvec::BitVec; +use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; @@ -57,7 +58,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -776,6 +777,9 @@ fn forked_tenure_testing( |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; + // don't allow signers to post signed blocks (limits the amount of fault injection we + // need) + config.broadcast_signed_blocks = false; }, |_| {}, &[], @@ -783,6 +787,7 @@ fn forked_tenure_testing( let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); + sleep_ms(1000); info!("------------------------- Reached Epoch 3.0 -------------------------"); let naka_conf = signer_test.running_nodes.conf.clone(); @@ -799,8 +804,14 @@ fn forked_tenure_testing( let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let proposed_blocks = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); + let rejected_blocks = signer_test.running_nodes.nakamoto_blocks_rejected.clone(); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - info!("Starting tenure A."); + info!("Starting Tenure A."); // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -810,20 +821,30 @@ fn forked_tenure_testing( || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }, ) .unwrap(); + sleep_ms(1000); + let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted TEST_BROADCAST_STALL.lock().unwrap().replace(true); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Starting tenure B."); + + info!("Starting Tenure B."); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -837,6 +858,7 @@ fn forked_tenure_testing( info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. + // However, do not allow B to be processed just yet signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -858,13 +880,52 @@ fn forked_tenure_testing( info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); thread::sleep(post_btc_block_pause); - let tip_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + + // the block will be stored, not processed, so load it out of staging + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("Failed to get sortition tip"); + + let tip_b_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() + .get(0) + .cloned() .unwrap(); + + // synthesize a StacksHeaderInfo from this unprocessed block + let tip_b = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), + microblock_tail: None, + stacks_block_height: tip_b_block.header.chain_length.into(), + index_root: tip_b_block.header.state_index_root.clone(), + consensus_hash: tip_b_block.header.consensus_hash.clone(), + burn_header_hash: tip_sn.burn_header_hash.clone(), + burn_header_height: tip_sn.block_height as u32, + burn_header_timestamp: tip_sn.burn_header_timestamp, + anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, + burn_view: Some(tip_b_block.header.consensus_hash.clone()), + }; + let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_b = blocks.last().unwrap().clone(); - info!("Starting tenure C."); + // Block B was built atop block A + assert_eq!(tip_b.stacks_block_height, tip_a.stacks_block_height + 1); + assert_eq!( + mined_b.parent_block_id, + tip_a.index_block_hash().to_string() + ); + assert_ne!(tip_b, tip_a); + + if !expect_tenure_c { + // allow B to process, so it'll be distinct from C + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); + sleep_ms(1000); + } + + info!("Starting Tenure C."); + // Submit a block commit op for tenure C let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = if expect_tenure_c { @@ -872,6 +933,8 @@ fn forked_tenure_testing( } else { proposed_blocks.load(Ordering::SeqCst) }; + let rejected_before = rejected_blocks.load(Ordering::SeqCst); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -883,17 +946,38 @@ fn forked_tenure_testing( .lock() .unwrap() .replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); - let blocks_count = if expect_tenure_c { - mined_blocks.load(Ordering::SeqCst) + if commits_count > commits_before { + // now allow block B to process. + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); + } + let rejected_count = rejected_blocks.load(Ordering::SeqCst); + let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { + // if tenure C is going to be canonical, then we expect the miner to RBF its commit + // once (i.e. for the block it mines and gets signed), and we expect zero + // rejections. + (mined_blocks.load(Ordering::SeqCst), 1, true) } else { - proposed_blocks.load(Ordering::SeqCst) + // if tenure C is NOT going to be canonical, then we expect no RBFs (since the + // miner can't get its block signed), and we expect at least one rejection + ( + proposed_blocks.load(Ordering::SeqCst), + 0, + rejected_count > rejected_before, + ) }; - Ok(commits_count > commits_before && blocks_count > blocks_before) + + Ok(commits_count > commits_before + rbf_count + && blocks_count > blocks_before + && has_reject_count) }, ) .unwrap(); + // allow blocks B and C to be processed + sleep_ms(1000); + info!("Tenure C produced (or proposed) a block!"); let tip_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() @@ -902,6 +986,9 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); + assert_eq!(tip_b, tip_c); + assert_ne!(tip_c, tip_a); + let (tip_c_2, mined_c_2) = if !expect_tenure_c { (None, None) } else { @@ -922,6 +1009,9 @@ fn forked_tenure_testing( thread::sleep(Duration::from_secs(1)); } + // give C's second block a moment to process + sleep_ms(1000); + info!("Tenure C produced a second block!"); let block_2_tenure_c = @@ -933,7 +1023,11 @@ fn forked_tenure_testing( (Some(block_2_tenure_c), Some(block_2_c)) }; - info!("Starting tenure D."); + // allow block C2 to be processed + sleep_ms(1000); + + info!("Starting Tenure D."); + // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -948,6 +1042,9 @@ fn forked_tenure_testing( ) .unwrap(); + // allow block D to be processed + sleep_ms(1000); + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -1247,7 +1344,7 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } - + let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); @@ -1255,13 +1352,13 @@ fn multiple_miners() { "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", &info_1, &info_2 ); - + signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], &[&rl1_commits, &rl2_commits], Duration::from_secs(30), ); - + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure From 40d96e839e88af57a09c0c24fb27f701c95fe805 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 11:31:16 -0400 Subject: [PATCH 170/910] fix: assert_ne, not assert_eq --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 34aeafdc315..11625e01bfd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -986,7 +986,7 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); - assert_eq!(tip_b, tip_c); + assert_ne!(tip_b, tip_c); assert_ne!(tip_c, tip_a); let (tip_c_2, mined_c_2) = if !expect_tenure_c { From e0773044a6135c7925f5358d5c488068bd50385b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 13:39:18 -0500 Subject: [PATCH 171/910] ci: attempt to fix naka mock mining test --- .../stacks-node/src/run_loop/boot_nakamoto.rs | 7 +++- .../src/tests/nakamoto_integrations.rs | 40 +++++++++---------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 97bf8dd4e00..0f6c3d6388b 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -160,7 +160,12 @@ impl BootRunLoop { info!("Shutting down epoch-2/3 transition thread"); return; } - info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + + info!( + "Reached Epoch-3.0 boundary, starting nakamoto node"; + "with_neon_data" => data_to_naka.is_some(), + "with_p2p_stack" => data_to_naka.as_ref().map(|x| x.peer_network.is_some()).unwrap_or(false) + ); termination_switch.store(true, Ordering::SeqCst); let naka = NakaRunLoop::new( self.config.clone(), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 62afa03ac42..f3cf76af04b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7127,19 +7127,31 @@ fn mock_mining() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - let tenure_count = 5; - let inter_blocks_per_tenure = 9; + let tenure_count = 3; + let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let localhost = "127.0.0.1"; + naka_conf.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + naka_conf.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, @@ -7212,11 +7224,7 @@ fn mock_mining() { blind_signer(&naka_conf, &signers, proposals_submitted); // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); follower_conf.node.mock_mining = true; @@ -7225,18 +7233,10 @@ fn mock_mining() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + follower_conf.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( From b9d332b471424a8197746e2495f72da4ae9df0c0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 5 Aug 2024 13:02:43 -0400 Subject: [PATCH 172/910] fix: Warnings after Rust 1.80 update --- clarity/Cargo.toml | 2 ++ clarity/src/vm/database/key_value_wrapper.rs | 16 ++++++++-------- stacks-common/Cargo.toml | 4 ++++ stacks-common/src/deps_common/bech32/mod.rs | 16 ++++++++-------- stacks-common/src/deps_common/httparse/mod.rs | 1 - stackslib/src/clarity_cli.rs | 3 --- testnet/stacks-node/Cargo.toml | 1 + 7 files changed, 23 insertions(+), 20 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 2e35c064733..284e856e498 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -55,3 +55,5 @@ developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] testing = ["canonical"] devtools = [] +rollback_value_check = [] +disable-costs = [] diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 69eb74b39ed..3fd845f92ff 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -31,15 +31,15 @@ use crate::vm::types::{ }; use crate::vm::{StacksEpoch, Value}; -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] type RollbackValueCheck = String; -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] type RollbackValueCheck = (); -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_value_check(_value: &str, _check: &RollbackValueCheck) {} -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _value: &str) { edits.push((key, ())); } @@ -47,7 +47,7 @@ fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _val // wrapper -- i.e., when committing to the underlying store. for the _unchecked_ implementation // this is used to get the edit _value_ out of the lookupmap, for used in the subsequent `put_all` // command. -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, @@ -71,11 +71,11 @@ where output } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) where T: Eq + Hash + Clone, @@ -84,7 +84,7 @@ where } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 57c1407fa8c..b91f63ff999 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -75,6 +75,10 @@ canonical = ["rusqlite"] developer-mode = [] slog_json = ["slog-json"] testing = ["canonical"] +serde = [] +clippy = [] +bech32_std = [] +bech32_strict = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stacks-common/src/deps_common/bech32/mod.rs b/stacks-common/src/deps_common/bech32/mod.rs index 99f95e9cd6b..655f2b1a822 100644 --- a/stacks-common/src/deps_common/bech32/mod.rs +++ b/stacks-common/src/deps_common/bech32/mod.rs @@ -30,7 +30,7 @@ //! has more details. //! #![cfg_attr( - feature = "std", + feature = "bech32_std", doc = " # Examples ``` @@ -54,20 +54,20 @@ assert_eq!(variant, Variant::Bech32); #![deny(non_camel_case_types)] #![deny(non_snake_case)] #![deny(unused_mut)] -#![cfg_attr(feature = "strict", deny(warnings))] +#![cfg_attr(feature = "bech32_strict", deny(warnings))] -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] extern crate alloc; -#[cfg(any(test, feature = "std"))] +#[cfg(any(test, feature = "bech32_std"))] extern crate core; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::borrow::Cow; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::{string::String, vec::Vec}; use core::{fmt, mem}; -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] use std::borrow::Cow; /// Integer in the range `0..32` @@ -690,7 +690,7 @@ impl fmt::Display for Error { } } -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] impl std::error::Error for Error { fn description(&self) -> &str { match *self { diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 90a08bf2f1c..67ca2c52cdd 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -22,7 +22,6 @@ #![cfg_attr(test, deny(warnings))] // we can't upgrade while supporting Rust 1.3 #![allow(deprecated)] -#![cfg_attr(httparse_min_2018, allow(rust_2018_idioms))] //! # httparse //! diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 53b53c36b78..21cf55dea6d 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -106,7 +106,6 @@ macro_rules! panic_test { }; } -#[cfg_attr(tarpaulin, skip)] fn print_usage(invoked_by: &str) { eprintln!( "Usage: {} [command] @@ -129,7 +128,6 @@ where command is one of: panic_test!() } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect(input: Result, msg: &str) -> A { input.unwrap_or_else(|e| { eprintln!("{}\nCaused by: {}", msg, e); @@ -137,7 +135,6 @@ fn friendly_expect(input: Result, msg: &str) -> A }) } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect_opt(input: Option, msg: &str) -> A { input.unwrap_or_else(|| { eprintln!("{}", msg); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index ba674dbaac1..aa72f814db8 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -63,3 +63,4 @@ monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stack slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] +testing = [] From d64e443d6ba2f2bf6a2247a4cf24a1ff5ff96096 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 10:43:15 -0400 Subject: [PATCH 173/910] chore: Remove `clippy` feature flag --- stacks-common/Cargo.toml | 1 - stacks-common/src/deps_common/bitcoin/mod.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index b91f63ff999..d5bfeb44e9e 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -76,7 +76,6 @@ developer-mode = [] slog_json = ["slog-json"] testing = ["canonical"] serde = [] -clippy = [] bech32_std = [] bech32_strict = [] diff --git a/stacks-common/src/deps_common/bitcoin/mod.rs b/stacks-common/src/deps_common/bitcoin/mod.rs index 890825ea987..b70da5deb21 100644 --- a/stacks-common/src/deps_common/bitcoin/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/mod.rs @@ -26,8 +26,8 @@ //! // Clippy flags -#![cfg_attr(feature = "clippy", allow(needless_range_loop))] // suggests making a big mess of array newtypes -#![cfg_attr(feature = "clippy", allow(extend_from_slice))] // `extend_from_slice` only available since 1.6 +#![allow(clippy::needless_range_loop)] // suggests making a big mess of array newtypes +#![allow(clippy::extend_from_slice)] // `extend_from_slice` only available since 1.6 // Coding conventions #![deny(non_upper_case_globals)] From b800565f53cd345c47f3b6231eaa57aa02d3ad43 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 14:43:11 -0500 Subject: [PATCH 174/910] ci: add mutants skip --- testnet/stacks-node/src/run_loop/boot_nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 0f6c3d6388b..b78d857d59b 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -140,6 +140,10 @@ impl BootRunLoop { naka_loop.start(burnchain_opt, mine_start, None) } + // configuring mutants::skip -- this function is covered through integration tests (this function + // is pretty definitionally an integration, so thats unavoidable), and the integration tests + // do not get counted in mutants coverage. + #[cfg_attr(test, mutants::skip)] fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); From 8430618aff74a2c1a1365fba885358a55723c9bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 16:54:02 -0400 Subject: [PATCH 175/910] fix: fix remaining integration tests (all of which failed due not waiting around long enough for a Nakamoto block to process) --- .../src/tests/nakamoto_integrations.rs | 6 ++++ testnet/stacks-node/src/tests/signer/v0.rs | 34 +++++++++++++++---- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 22fde687931..9a72b7b57e7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5720,6 +5720,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); info!("Pausing commit ops to trigger a tenure extend."); test_skip_commit_op.0.lock().unwrap().replace(true); @@ -5732,6 +5733,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); // Submit a TX let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); @@ -5766,6 +5768,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5775,6 +5778,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -5802,6 +5806,8 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + + sleep_ms(5_000); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1d1f3bc6387..041e7f373a9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -46,6 +46,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::TrieHash; use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -541,6 +542,9 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); + // give the system a chance to mine a Nakamoto block + sleep_ms(30_000); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -626,8 +630,14 @@ fn forked_tenure_invalid() { } let result = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false); - assert_ne!(result.tip_b, result.tip_a); - assert_eq!(result.tip_b, result.tip_c); + assert_ne!( + result.tip_b.index_block_hash(), + result.tip_a.index_block_hash() + ); + assert_eq!( + result.tip_b.index_block_hash(), + result.tip_c.index_block_hash() + ); assert_ne!(result.tip_c, result.tip_a); // Block B was built atop block A @@ -661,7 +671,10 @@ fn forked_tenure_invalid() { // Tenure D should continue progress assert_ne!(result.tip_c, result.tip_d); - assert_ne!(result.tip_b, result.tip_d); + assert_ne!( + result.tip_b.index_block_hash(), + result.tip_d.index_block_hash() + ); assert_ne!(result.tip_a, result.tip_d); // Tenure D builds off of Tenure B @@ -1012,7 +1025,7 @@ fn forked_tenure_testing( anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, stacks_block_height: tip_b_block.header.chain_length.into(), - index_root: tip_b_block.header.state_index_root.clone(), + index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed consensus_hash: tip_b_block.header.consensus_hash.clone(), burn_header_hash: tip_sn.burn_header_hash.clone(), burn_header_height: tip_sn.block_height as u32, @@ -1063,7 +1076,7 @@ fn forked_tenure_testing( let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { - // now allow block B to process. + // now allow block B to process if it hasn't already. TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); } let rejected_count = rejected_blocks.load(Ordering::SeqCst); @@ -1100,7 +1113,11 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); - assert_ne!(tip_b, tip_c); + if expect_tenure_c { + assert_ne!(tip_b.index_block_hash(), tip_c.index_block_hash()); + } else { + assert_eq!(tip_b.index_block_hash(), tip_c.index_block_hash()); + } assert_ne!(tip_c, tip_a); let (tip_c_2, mined_c_2) = if !expect_tenure_c { @@ -1893,6 +1910,9 @@ fn end_of_tenure() { .reward_cycle_to_block_height(final_reward_cycle) - 2; + // give the system a chance to mine a Nakamoto block + sleep_ms(30_000); + info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( long_timeout, @@ -1969,6 +1989,8 @@ fn end_of_tenure() { ) .unwrap(); } + + sleep_ms(10_000); assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); while test_observer::get_burn_blocks() From 6e6e69d47b097454371a9da4a5b22fdf11ad8141 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:31:38 -0400 Subject: [PATCH 176/910] feat: enable test for #4998 --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a5..84acefd639d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -96,6 +96,7 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in + - tests::signer::v0::signers_broadcast_signed_blocks - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state From f2a3a9998b24fea877329916a7e14bd2c67ae4d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:31:51 -0400 Subject: [PATCH 177/910] chore: make block accept/reject an INFO-level log item --- stackslib/src/net/relay.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 92a1ebb4801..dca8738d3df 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -936,14 +936,14 @@ impl Relayer { staging_db_tx.commit()?; if accepted { - debug!("{}", &accept_msg); + info!("{}", &accept_msg); if let Some(coord_comms) = coord_comms { if !coord_comms.announce_new_stacks_block() { return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); } } } else { - debug!("{}", &reject_msg); + info!("{}", &reject_msg); } Ok(accepted) From 87940805270f2a3af182e3f950d9a2f13d3e3e97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:09 -0400 Subject: [PATCH 178/910] feat: fault injection to ignore a signer signature --- .../src/nakamoto_node/sign_coordinator.rs | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 98090b2082f..87afd617faf 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -52,6 +52,11 @@ use crate::event_dispatcher::STACKER_DB_CHANNEL; use crate::neon::Counters; use crate::Config; +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +#[cfg(test)] +pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); + /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); @@ -631,6 +636,20 @@ impl SignCoordinator { )) } + /// Do we ignore signer signatures? + #[cfg(test)] + fn fault_injection_ignore_signatures() -> bool { + if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { + return true; + } + false + } + + #[cfg(not(test))] + fn fault_injection_ignore_signatures() -> bool { + false + } + /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond @@ -750,6 +769,7 @@ impl SignCoordinator { }) { debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); return Ok(stored_block.header.signer_signature); } @@ -887,6 +907,21 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } + + if Self::fault_injection_ignore_signatures() { + debug!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + continue; + } + debug!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), From aa0cb7441f1155cc09fd8554a73818724074ec65 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:25 -0400 Subject: [PATCH 179/910] feat: counter for signer-pushed blocks --- testnet/stacks-node/src/run_loop/neon.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index ecf541a0dea..5d5ff3653d4 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -107,6 +107,7 @@ pub struct Counters { pub naka_rejected_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, + pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] pub naka_skip_commit_op: TestFlag, @@ -173,6 +174,10 @@ impl Counters { Counters::inc(&self.naka_rejected_blocks); } + pub fn bump_naka_signer_pushed_blocks(&self) { + Counters::inc(&self.naka_signer_pushed_blocks); + } + pub fn bump_naka_mined_tenures(&self) { Counters::inc(&self.naka_mined_tenures); } From 9ea5e1d32811c74069bc810fdd6d5244ce62a480 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:39 -0400 Subject: [PATCH 180/910] chore: push through counter for signer-pushed blocks --- testnet/stacks-node/src/tests/signer/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 3c5aec785e2..a5973569a1d 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -86,6 +86,7 @@ pub struct RunningNodes { pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, pub nakamoto_blocks_rejected: Arc, + pub nakamoto_blocks_signer_pushed: Arc, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -750,6 +751,7 @@ fn setup_stx_btc_node ()>( naka_mined_blocks: naka_blocks_mined, naka_rejected_blocks: naka_blocks_rejected, naka_skip_commit_op: nakamoto_test_skip_commit_op, + naka_signer_pushed_blocks, .. } = run_loop.counters(); @@ -783,6 +785,7 @@ fn setup_stx_btc_node ()>( nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, nakamoto_blocks_rejected: naka_blocks_rejected.0, + nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks.0, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, From 69e1a7649ce0016d7eccff67cb4bc400b59cce59 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:54 -0400 Subject: [PATCH 181/910] feat: add test coverage for #4998 and #5048 --- testnet/stacks-node/src/tests/signer/v0.rs | 111 ++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 041e7f373a9..5e366ba488b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -60,6 +60,7 @@ use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -2134,10 +2135,105 @@ fn retry_on_timeout() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test checks that the signers will broadcast a block once they receive enough signatures. +fn signers_broadcast_signed_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + sleep_ms(10_000); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + sleep_ms(10_000); + + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let signer_pushed_before = signer_test + .running_nodes + .nakamoto_blocks_signer_pushed + .load(Ordering::SeqCst); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + debug!("Transaction sent; waiting for block-mining"); + + let start = Instant::now(); + let duration = 60; + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let signer_pushed = signer_test + .running_nodes + .nakamoto_blocks_signer_pushed + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + if blocks_mined > blocks_before + && signer_pushed > signer_pushed_before + && info.stacks_tip_height > info_before.stacks_tip_height + { + break; + } + + debug!( + "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", + blocks_mined, + blocks_before, + signer_pushed, + signer_pushed_before, + info.stacks_tip_height, + info_before.stacks_tip_height + ); + + std::thread::sleep(Duration::from_millis(100)); + if start.elapsed() >= Duration::from_secs(duration) { + panic!("Timed out"); + } + } + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: /// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The miner will stop trying to mine once it sees a threshold of signers reject the block /// - The empty sortition will trigger the miner to attempt a tenure extend. /// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition fn empty_sortition() { @@ -2238,6 +2334,11 @@ fn empty_sortition() { .load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before); + let rejected_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = @@ -2294,8 +2395,14 @@ fn empty_sortition() { info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); } } - // wait until we've found rejections for all the signers - Ok(found_rejections.len() == signer_slot_ids.len()) + let rejections = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + // wait until we've found rejections for all the signers, and the miner has confirmed that + // the signers have rejected the block + Ok(found_rejections.len() == signer_slot_ids.len() && rejections > rejected_before) }).unwrap(); signer_test.shutdown(); } From c1b6e6061f1a03a00688235e7a3cc03d43ccb42f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:22:45 -0400 Subject: [PATCH 182/910] chore: more debug output --- stackslib/src/net/chat.rs | 153 +++++++++++++-------------- stackslib/src/net/connection.rs | 14 +-- stackslib/src/net/neighbors/comms.rs | 7 ++ 3 files changed, 90 insertions(+), 84 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 52622d1e597..5b5f06a6653 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -713,10 +713,9 @@ impl ConversationP2P { } }; if bhh != their_burn_header_hash { - test_debug!( + debug!( "Burn header hash mismatch in preamble: {} != {}", - bhh, - their_burn_header_hash + bhh, their_burn_header_hash ); return true; } @@ -742,18 +741,16 @@ impl ConversationP2P { if my_epoch <= remote_epoch { // remote node supports same epochs we do - test_debug!( - "Remote peer has epoch {}, which is newer than our epoch {}", - remote_epoch, - my_epoch + debug!( + "Remote peer has epoch {}, which is at least as new as our epoch {}", + remote_epoch, my_epoch ); return true; } - test_debug!( + debug!( "Remote peer has old network version {} (epoch {})", - remote_peer_version, - remote_epoch + remote_peer_version, remote_epoch ); // what epoch are we in? @@ -764,10 +761,9 @@ impl ConversationP2P { if cur_epoch <= remote_epoch { // epoch shift hasn't happened yet, and this peer supports the current epoch - test_debug!( + debug!( "Remote peer has epoch {} and current epoch is {}, so still valid", - remote_epoch, - cur_epoch + remote_epoch, cur_epoch ); return true; } @@ -806,11 +802,9 @@ impl ConversationP2P { } if (msg.preamble.peer_version & 0xff000000) != (self.version & 0xff000000) { // major version mismatch - test_debug!( + debug!( "{:?}: Preamble invalid: wrong peer version: {:x} != {:x}", - &self, - msg.preamble.peer_version, - self.version + &self, msg.preamble.peer_version, self.version ); return Err(net_error::InvalidMessage); } @@ -1344,11 +1338,6 @@ impl ConversationP2P { }; if let Some(stackerdb_accept) = stackerdb_accept { - test_debug!( - "{} =?= {}", - &stackerdb_accept.rc_consensus_hash, - &burnchain_view.rc_consensus_hash - ); if stackerdb_accept.rc_consensus_hash == burnchain_view.rc_consensus_hash { // remote peer is in the same reward cycle as us. self.update_from_stacker_db_handshake_data(stackerdb_accept); @@ -1435,7 +1424,7 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_chat_neighbors { // never report neighbors if this is disabled by a test - test_debug!( + debug!( "{:?}: Neighbor crawl is disabled; reporting 0 neighbors", &local_peer ); @@ -1672,7 +1661,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1768,7 +1757,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1807,10 +1796,9 @@ impl ConversationP2P { Ok(Some(sn)) => { if !sn.pox_valid { // invalid consensus hash - test_debug!( + debug!( "{:?}: Snapshot {:?} is not on a valid PoX fork", - local_peer, - sn.burn_header_hash + local_peer, sn.burn_header_hash ); return Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1822,7 +1810,7 @@ impl ConversationP2P { % (burnchain.pox_constants.reward_cycle_length as u64) != 1 { - test_debug!( + debug!( "{:?}: block height ({} - {}) % {} != 1", local_peer, sn.block_height, @@ -1866,10 +1854,9 @@ impl ConversationP2P { } } Ok(None) | Err(db_error::NotFoundError) => { - test_debug!( + debug!( "{:?}: snapshot for consensus hash {} not found", - local_peer, - getpoxinv.consensus_hash + local_peer, getpoxinv.consensus_hash ); Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1969,9 +1956,29 @@ impl ConversationP2P { ) { Ok(Some(chunk)) => chunk, Ok(None) => { - // request for a stale chunk + // TODO: this is racey + if let Ok(Some(actual_version)) = + stacker_dbs.get_slot_version(&getchunk.contract_id, getchunk.slot_id) + { + // request for a stale chunk + debug!("{:?}: NACK StackerDBGetChunk; version mismatch for requested slot {}.{} for {}. Expected {}", local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id, actual_version); + if actual_version > getchunk.slot_version { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::StaleVersion, + ))); + } else { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::FutureVersion, + ))); + } + } + // if we hit a DB error, just treat it as if the DB doesn't exist + debug!( + "{:?}: NACK StackerDBGetChunk; unloadable slot {}.{} for {}", + local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id + ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleVersion, + NackErrorCodes::NoSuchDB, ))); } Err(e) => { @@ -2332,14 +2339,16 @@ impl ConversationP2P { Ok(num_recved) => { total_recved += num_recved; if num_recved > 0 { + debug!("{:?}: received {} bytes", self, num_recved); self.stats.last_recv_time = get_epoch_time_secs(); self.stats.bytes_rx += num_recved as u64; } else { + debug!("{:?}: received {} bytes, stopping", self, num_recved); break; } } Err(net_error::PermanentlyDrained) => { - trace!( + debug!( "{:?}: failed to recv on P2P conversation: PermanentlyDrained", self ); @@ -2351,7 +2360,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: received {} bytes", self, total_recved); + debug!("{:?}: received {} bytes", self, total_recved); Ok(total_recved) } @@ -2379,7 +2388,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: sent {} bytes", self, total_sent); + debug!("{:?}: sent {} bytes", self, total_sent); Ok(total_sent) } @@ -2470,12 +2479,12 @@ impl ConversationP2P { Ok(handshake_opt) } StacksMessageType::HandshakeAccept(ref data) => { - test_debug!("{:?}: Got HandshakeAccept", &self); + debug!("{:?}: Got HandshakeAccept", &self); self.handle_handshake_accept(network.get_chain_view(), &msg.preamble, data, None) .and_then(|_| Ok(None)) } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { - test_debug!("{:?}: Got StackerDBHandshakeAccept", &self); + debug!("{:?}: Got StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2485,21 +2494,21 @@ impl ConversationP2P { .and_then(|_| Ok(None)) } StacksMessageType::Ping(_) => { - test_debug!("{:?}: Got Ping", &self); + debug!("{:?}: Got Ping", &self); // consume here if unsolicited consume = true; self.handle_ping(network.get_chain_view(), msg) } StacksMessageType::Pong(_) => { - test_debug!("{:?}: Got Pong", &self); + debug!("{:?}: Got Pong", &self); Ok(None) } StacksMessageType::NatPunchRequest(ref nonce) => { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchRequest({})", &self, nonce); + debug!("{:?}: Got NatPunchRequest({})", &self, nonce); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2509,11 +2518,11 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); + debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); Ok(None) } _ => { - test_debug!( + debug!( "{:?}: Got a data-plane message (type {})", &self, msg.payload.get_message_name() @@ -2542,14 +2551,14 @@ impl ConversationP2P { let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); - test_debug!("{:?}: Got unauthenticated Handshake", &self); + debug!("{:?}: Got unauthenticated Handshake", &self); let (reply_opt, handled) = self.handle_handshake(network, msg, false, ibd)?; consume = handled; Ok(reply_opt) } StacksMessageType::HandshakeAccept(ref data) => { if solicited { - test_debug!("{:?}: Got unauthenticated HandshakeAccept", &self); + debug!("{:?}: Got unauthenticated HandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2558,7 +2567,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); + debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); // don't update stats or state, and don't pass back consume = true; @@ -2567,7 +2576,7 @@ impl ConversationP2P { } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { if solicited { - test_debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); + debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2576,7 +2585,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!( + debug!( "{:?}: Unsolicited unauthenticated StackerDBHandshakeAccept", &self ); @@ -2587,14 +2596,14 @@ impl ConversationP2P { } } StacksMessageType::HandshakeReject => { - test_debug!("{:?}: Got unauthenticated HandshakeReject", &self); + debug!("{:?}: Got unauthenticated HandshakeReject", &self); // don't NACK this back just because we were rejected. // But, it's okay to forward this back (i.e. don't consume). Ok(None) } StacksMessageType::Nack(_) => { - test_debug!("{:?}: Got unauthenticated Nack", &self); + debug!("{:?}: Got unauthenticated Nack", &self); // don't NACK back. // But, it's okay to forward this back (i.e. don't consume). @@ -2604,10 +2613,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchRequest({})", - &self, - *nonce + &self, *nonce ); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2617,10 +2625,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchReply({})", - &self, - _m.nonce + &self, _m.nonce ); // it's okay to forward this back (i.e. don't consume) @@ -2855,7 +2862,7 @@ impl ConversationP2P { ibd: bool, ) -> Result, net_error> { let num_inbound = self.connection.inbox_len(); - test_debug!("{:?}: {} messages pending", &self, num_inbound); + debug!("{:?}: {} messages pending", &self, num_inbound); let mut unsolicited = vec![]; for _ in 0..num_inbound { @@ -2888,7 +2895,7 @@ impl ConversationP2P { if let Some(mut reply) = reply_opt.take() { // handler generated a reply. // send back this message to the remote peer. - test_debug!( + debug!( "{:?}: Send control-plane reply type {}", &self, reply.payload.get_message_name() @@ -2904,11 +2911,9 @@ impl ConversationP2P { let _relayers = format!("{:?}", &msg.relayers); let _seq = msg.request_id(); - test_debug!( + debug!( "{:?}: Received message {}, relayed by {}", - &self, - &_msgtype, - &_relayers + &self, &_msgtype, &_relayers ); // Is there someone else waiting for this message? If so, pass it along. @@ -2920,33 +2925,27 @@ impl ConversationP2P { &self, _msgtype, _seq ); } else { - test_debug!( + debug!( "{:?}: Try handling message (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); if let Some(msg) = self.handle_data_message(network, sortdb, chainstate, msg)? { // this message was unsolicited - test_debug!( + debug!( "{:?}: Did not handle message (type {} seq {}); passing upstream", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); unsolicited.push(msg); } else { // expected and handled the message - test_debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); + debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); } } } else { - // no one was waiting for this reply, so just drop it - test_debug!( + // message was passed to the relevant message handle + debug!( "{:?}: Fulfilled pending message request (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); } } diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 7986c9294d1..3fbfe408505 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -167,8 +167,9 @@ impl NetworkReplyHandle

{ /// is destroyed in the process). pub fn try_recv(mut self) -> Result, net_error>> { if self.deadline > 0 && self.deadline < get_epoch_time_secs() { - test_debug!( - "Reply deadline {} exceeded (now = {})", + debug!( + "Reply deadline for event {} at {} exceeded (now = {})", + self.socket_event_id, self.deadline, get_epoch_time_secs() ); @@ -234,10 +235,9 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. - test_debug!( + debug!( "Still have data to send, drop_on_success = {}, ret = {}", - drop_on_success, - ret + drop_on_success, ret ); Some(fd) } @@ -957,7 +957,7 @@ impl ConnectionInbox

{ || e.kind() == io::ErrorKind::ConnectionReset { // write endpoint is dead - test_debug!("reader was reset: {:?}", &e); + debug!("reader was reset: {:?}", &e); socket_closed = true; blocked = true; Ok(0) @@ -971,7 +971,7 @@ impl ConnectionInbox

{ total_read += num_read; if num_read > 0 || total_read > 0 { - trace!("read {} bytes; {} total", num_read, total_read); + debug!("read {} bytes; {} total", num_read, total_read); } if num_read > 0 { diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 31c62a1f8f0..8fdf38d87b3 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -466,12 +466,19 @@ impl PeerNetworkComms { Ok(None) => { if let Some(rh) = req_opt { // keep trying + debug!("{:?}: keep polling {}", network.get_local_peer(), naddr); inflight.insert(naddr, rh); } continue; } Err(_e) => { // peer was already marked as dead in the given network set + debug!( + "{:?}: peer {} is dead: {:?}", + network.get_local_peer(), + naddr, + &_e + ); continue; } }; From e167337786b7956ae97045ef08ff255384980a88 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:23:29 -0400 Subject: [PATCH 183/910] chore: pass coordinator comms --- stackslib/src/net/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c56..1db6e3ff798 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -65,6 +65,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::{Error as burnchain_error, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::{ConsensusHash, Opcodes}; +use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::Error as coordinator_error; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::boot::{ @@ -626,6 +627,8 @@ pub struct RPCHandlerArgs<'a> { pub fee_estimator: Option<&'a dyn FeeEstimator>, /// tx runtime cost metric pub cost_metric: Option<&'a dyn CostMetric>, + /// coordinator channels + pub coord_comms: Option<&'a CoordinatorChannels>, } impl<'a> RPCHandlerArgs<'a> { @@ -1026,6 +1029,7 @@ pub mod NackErrorCodes { pub const NoSuchDB: u32 = 6; pub const StaleVersion: u32 = 7; pub const StaleView: u32 = 8; + pub const FutureVersion: u32 = 9; } #[derive(Debug, Clone, PartialEq)] From 99f0736d45d0648ea3a6f7c25d696890c57a0cc1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:23:44 -0400 Subject: [PATCH 184/910] chore: more debug output --- stackslib/src/net/neighbors/rpc.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs index 3a5378803f5..7257b9f4756 100644 --- a/stackslib/src/net/neighbors/rpc.rs +++ b/stackslib/src/net/neighbors/rpc.rs @@ -109,16 +109,22 @@ impl NeighborRPC { Ok(Some(response)) => response, Ok(None) => { // keep trying + debug!("Still waiting for next reply from {}", &naddr); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(NetError::WaitingForDNS) => { // keep trying + debug!( + "Could not yet poll next reply from {}: waiting for DNS", + &naddr + ); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(_e) => { // declare this neighbor as dead by default + debug!("Failed to poll next reply from {}: {:?}", &naddr, &_e); dead.push(naddr); continue; } @@ -201,6 +207,10 @@ impl NeighborRPC { }) })?; + debug!( + "Send request to {} on event {}: {:?}", + &naddr, event_id, &request + ); self.state.insert(naddr, (event_id, Some(request))); Ok(()) } From b3645c7e83827f7e397554545e3db7576dc76e29 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:24:03 -0400 Subject: [PATCH 185/910] chore: more debug output --- stackslib/src/net/p2p.rs | 114 ++++++++++++++++++++------------------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795aa..ad75835f269 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -644,11 +644,9 @@ impl PeerNetwork { let (p2p_handle, bound_p2p_addr) = net.bind(my_addr)?; let (http_handle, bound_http_addr) = net.bind(http_addr)?; - test_debug!( + debug!( "{:?}: bound on p2p {:?}, http {:?}", - &self.local_peer, - bound_p2p_addr, - bound_http_addr + &self.local_peer, bound_p2p_addr, bound_http_addr ); self.network = Some(net); @@ -898,6 +896,12 @@ impl PeerNetwork { return Err(e); } Ok(sz) => { + if sz > 0 { + debug!( + "Sent {} bytes on p2p socket {:?} for conversation {:?}", + sz, client_sock, convo + ); + } total_sent += sz; if sz == 0 { break; @@ -1185,7 +1189,7 @@ impl PeerNetwork { let next_event_id = match self.network { None => { - test_debug!("{:?}: network not connected", &self.local_peer); + debug!("{:?}: network not connected", &self.local_peer); return Err(net_error::NotConnected); } Some(ref mut network) => { @@ -1481,7 +1485,7 @@ impl PeerNetwork { (convo.to_neighbor_key(), Some(neighbor)) } None => { - test_debug!( + debug!( "No such neighbor in peer DB, but will ban nevertheless: {:?}", convo.to_neighbor_key() ); @@ -1645,11 +1649,9 @@ impl PeerNetwork { // already connected? if let Some(event_id) = self.get_event_id(&neighbor_key) { - test_debug!( + debug!( "{:?}: already connected to {:?} on event {}", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return Err(net_error::AlreadyConnected(event_id, neighbor_key.clone())); } @@ -1927,7 +1929,7 @@ impl PeerNetwork { match self.events.get(&peer_key) { None => { // not connected - test_debug!("Could not sign for peer {:?}: not connected", peer_key); + debug!("Could not sign for peer {:?}: not connected", peer_key); Err(net_error::PeerNotConnected) } Some(event_id) => self.sign_for_p2p(*event_id, message_payload), @@ -1947,7 +1949,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -1968,7 +1970,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -2042,7 +2044,7 @@ impl PeerNetwork { match (self.peers.remove(&event_id), self.sockets.remove(&event_id)) { (Some(convo), Some(sock)) => (convo, sock), (Some(convo), None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); self.peers.insert(event_id, convo); return Err(net_error::PeerNotConnected); } @@ -2055,7 +2057,7 @@ impl PeerNetwork { return Err(net_error::PeerNotConnected); } (None, None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); return Err(net_error::PeerNotConnected); } }; @@ -2184,7 +2186,7 @@ impl PeerNetwork { ) { Ok((convo_unhandled, alive)) => (convo_unhandled, alive), Err(_e) => { - test_debug!( + debug!( "{:?}: Connection to {:?} failed: {:?}", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2196,7 +2198,7 @@ impl PeerNetwork { }; if !alive { - test_debug!( + debug!( "{:?}: Connection to {:?} is no longer alive", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2383,11 +2385,9 @@ impl PeerNetwork { } }; if neighbor.allowed < 0 || (neighbor.allowed as u64) > now { - test_debug!( + debug!( "{:?}: event {} is allowed: {:?}", - &self.local_peer, - event_id, - &nk + &self.local_peer, event_id, &nk ); safe.insert(*event_id); } @@ -2474,17 +2474,19 @@ impl PeerNetwork { let mut relay_handles = std::mem::replace(&mut self.relay_handles, HashMap::new()); for (event_id, handle_list) in relay_handles.iter_mut() { if handle_list.len() == 0 { + debug!("No handles for event {}", event_id); drained.push(*event_id); continue; } - test_debug!( + debug!( "Flush {} relay handles to event {}", handle_list.len(), event_id ); while handle_list.len() > 0 { + debug!("Flush {} relay handles", handle_list.len()); let res = self.with_p2p_convo(*event_id, |_network, convo, client_sock| { if let Some(handle) = handle_list.front_mut() { let (num_sent, flushed) = @@ -2496,12 +2498,9 @@ impl PeerNetwork { } }; - test_debug!( + debug!( "Flushed relay handle to {:?} ({:?}): sent={}, flushed={}", - client_sock, - convo, - num_sent, - flushed + client_sock, convo, num_sent, flushed ); return Ok((num_sent, flushed)); } @@ -2512,6 +2511,7 @@ impl PeerNetwork { Ok(Ok(x)) => x, Ok(Err(_)) | Err(_) => { // connection broken; next list + debug!("Relay handle broken to event {}", event_id); broken.push(*event_id); break; } @@ -2519,7 +2519,7 @@ impl PeerNetwork { if !flushed && num_sent == 0 { // blocked on this peer's socket - test_debug!("Relay handle to event {} is blocked", event_id); + debug!("Relay handle to event {} is blocked", event_id); break; } @@ -2553,7 +2553,7 @@ impl PeerNetwork { /// Return true if we finish, and true if we're throttled fn do_network_neighbor_walk(&mut self, ibd: bool) -> bool { if cfg!(test) && self.connection_opts.disable_neighbor_walk { - test_debug!("neighbor walk is disabled"); + debug!("neighbor walk is disabled"); return true; } @@ -2800,7 +2800,7 @@ impl PeerNetwork { fn need_public_ip(&mut self) -> bool { if !self.public_ip_learned { // IP was given, not learned. nothing to do - test_debug!("{:?}: IP address was given to us", &self.local_peer); + debug!("{:?}: IP address was given to us", &self.local_peer); return false; } if self.local_peer.public_ip_address.is_some() @@ -2808,7 +2808,7 @@ impl PeerNetwork { >= get_epoch_time_secs() { // still fresh - test_debug!("{:?}: learned IP address is still fresh", &self.local_peer); + debug!("{:?}: learned IP address is still fresh", &self.local_peer); return false; } let throttle_timeout = if self.local_peer.public_ip_address.is_none() { @@ -2871,7 +2871,7 @@ impl PeerNetwork { match self.do_learn_public_ip() { Ok(b) => { if !b { - test_debug!("{:?}: try do_learn_public_ip again", &self.local_peer); + debug!("{:?}: try do_learn_public_ip again", &self.local_peer); return false; } } @@ -2958,7 +2958,7 @@ impl PeerNetwork { for (_, block, _) in network_result.blocks.iter() { if block_set.contains(&block.block_hash()) { - test_debug!("Duplicate block {}", block.block_hash()); + debug!("Duplicate block {}", block.block_hash()); } block_set.insert(block.block_hash()); } @@ -2966,7 +2966,7 @@ impl PeerNetwork { for (_, mblocks, _) in network_result.confirmed_microblocks.iter() { for mblock in mblocks.iter() { if microblock_set.contains(&mblock.block_hash()) { - test_debug!("Duplicate microblock {}", mblock.block_hash()); + debug!("Duplicate microblock {}", mblock.block_hash()); } microblock_set.insert(mblock.block_hash()); } @@ -4209,7 +4209,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -4315,7 +4315,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -4364,7 +4364,11 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!("No such neighbor event={}", event_id); + debug!( + "{:?}: No such neighbor event={}", + self.get_local_peer(), + event_id + ); return None; } }; @@ -4373,10 +4377,9 @@ impl PeerNetwork { let reciprocal_event_id = match self.find_reciprocal_event(event_id) { Some(re) => re, None => { - test_debug!( + debug!( "{:?}: no reciprocal conversation for {:?}", - &self.local_peer, - &neighbor_key + &self.local_peer, &neighbor_key ); return None; } @@ -4390,32 +4393,26 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!( + debug!( "{:?}: No reciprocal conversation for {} (event={})", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return None; } }; if !is_authenticated && !reciprocal_is_authenticated { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not authenticated", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } if !is_outbound && !reciprocal_is_outbound { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not outbound", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } @@ -5196,7 +5193,7 @@ impl PeerNetwork { /// for. Add them to our network pingbacks fn schedule_network_pingbacks(&mut self, event_ids: Vec) { if cfg!(test) && self.connection_opts.disable_pingbacks { - test_debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); + debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); return; } @@ -5278,7 +5275,7 @@ impl PeerNetwork { } } - test_debug!( + debug!( "{:?}: have {} pingbacks scheduled", &self.local_peer, self.walk_pingbacks.len() @@ -5580,6 +5577,13 @@ impl PeerNetwork { }; // update cached burnchain view for /v2/info + debug!( + "{:?}: chain view for burn block {} has stacks tip consensus {}", + &self.local_peer, + new_chain_view.burn_block_height, + &new_chain_view.rc_consensus_hash + ); + self.chain_view = new_chain_view; self.chain_view_stable_consensus_hash = new_chain_view_stable_consensus_hash; } @@ -5649,7 +5653,7 @@ impl PeerNetwork { .get_last_selected_anchor_block_txid()? .unwrap_or(Txid([0x00; 32])); - test_debug!( + debug!( "{:?}: chain view is {:?}", &self.get_local_peer(), &self.chain_view From 7c37f89dce9771b774ea7505d76913916873832b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:24:13 -0400 Subject: [PATCH 186/910] chore: info accept/reject --- stackslib/src/net/relay.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 5d7ef500994..ff2361cc014 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -663,7 +663,7 @@ impl Relayer { debug!( "Handle incoming Nakamoto block {}/{}", &block.header.consensus_hash, - &block.header.block_hash() + &block.header.block_hash(), ); // do we have this block? don't lock the DB needlessly if so. @@ -745,14 +745,14 @@ impl Relayer { staging_db_tx.commit()?; if accepted { - debug!("{}", &accept_msg); + info!("{}", &accept_msg); if let Some(coord_comms) = coord_comms { if !coord_comms.announce_new_stacks_block() { return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); } } } else { - debug!("{}", &reject_msg); + info!("{}", &reject_msg); } Ok(accepted) From c443f82fbfe45f1669262ac34747e13aae79c386 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 14:24:29 -0400 Subject: [PATCH 187/910] fix: immediately retry sync if a getchunk/putchunk fails due to stale inventory data --- stackslib/src/net/stackerdb/db.rs | 2 + stackslib/src/net/stackerdb/mod.rs | 14 ++++++- stackslib/src/net/stackerdb/sync.rs | 60 +++++++++++++++++++++-------- 3 files changed, 59 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6cdebb69d96..907ce29cc8e 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -323,6 +323,8 @@ impl<'a> StackerDBTx<'a> { } } + debug!("Reset slot {} of {}", slot_id, smart_contract); + // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index da3ffa45559..2cf0e0ddfac 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -388,6 +388,8 @@ pub struct StackerDBSync { /// whether or not we should immediately re-fetch chunks because we learned about new chunks /// from our peers when they replied to our chunk-pushes with new inventory state need_resync: bool, + /// whether or not the fetched inventory was determined to be stale + stale_inv: bool, /// Track stale neighbors pub(crate) stale_neighbors: HashSet, /// How many attempted connections have been made in the last pass (gets reset) @@ -466,7 +468,9 @@ impl PeerNetwork { Err(e) => { debug!( "{:?}: failed to get chunk versions for {}: {:?}", - self.local_peer, contract_id, &e + self.get_local_peer(), + contract_id, + &e ); // most likely indicates that this DB doesn't exist @@ -475,6 +479,14 @@ impl PeerNetwork { }; let num_outbound_replicas = self.count_outbound_stackerdb_replicas(contract_id) as u32; + + debug!( + "{:?}: inventory for {} has {} outbound replicas; versions are {:?}", + self.get_local_peer(), + contract_id, + num_outbound_replicas, + &slot_versions + ); StacksMessageType::StackerDBChunkInv(StackerDBChunkInvData { slot_versions, num_outbound_replicas, diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index c3e61acbc47..3eb0d86ae63 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -71,6 +71,7 @@ impl StackerDBSync { total_pushed: 0, last_run_ts: 0, need_resync: false, + stale_inv: false, stale_neighbors: HashSet::new(), num_connections: 0, num_attempted_connections: 0, @@ -210,6 +211,7 @@ impl StackerDBSync { self.write_freq = config.write_freq; self.need_resync = false; + self.stale_inv = false; self.last_run_ts = get_epoch_time_secs(); self.state = StackerDBSyncState::ConnectBegin; @@ -253,7 +255,7 @@ impl StackerDBSync { .get_slot_write_timestamps(&self.smart_contract_id)?; if local_slot_versions.len() != local_write_timestamps.len() { - let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}); abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len()); + let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}) for {}; abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len(), &self.smart_contract_id); warn!("{}", &msg); return Err(net_error::Transient(msg)); } @@ -267,12 +269,13 @@ impl StackerDBSync { let write_ts = local_write_timestamps[i]; if write_ts + self.write_freq > now { debug!( - "{:?}: Chunk {} was written too frequently ({} + {} >= {}), so will not fetch chunk", + "{:?}: Chunk {} was written too frequently ({} + {} >= {}) in {}, so will not fetch chunk", network.get_local_peer(), i, write_ts, self.write_freq, - now + now, + &self.smart_contract_id, ); continue; } @@ -340,10 +343,11 @@ impl StackerDBSync { schedule.reverse(); debug!( - "{:?}: Will request up to {} chunks for {}", + "{:?}: Will request up to {} chunks for {}. Schedule: {:?}", network.get_local_peer(), &schedule.len(), &self.smart_contract_id, + &schedule ); Ok(schedule) } @@ -507,12 +511,13 @@ impl StackerDBSync { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. debug!( - "{:?}: peer {:?} has a newer version of slot {} ({} < {})", + "{:?}: peer {:?} has a newer version of slot {} ({} < {}) in {}", _network.get_local_peer(), &naddr, old_slot_id, old_version, - new_inv.slot_versions[old_slot_id] + new_inv.slot_versions[old_slot_id], + &self.smart_contract_id, ); resync = true; break; @@ -621,9 +626,10 @@ impl StackerDBSync { self.replicas = replicas; } debug!( - "{:?}: connect_begin: establish StackerDB sessions to {} neighbors", + "{:?}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", network.get_local_peer(), - self.replicas.len() + self.replicas.len(), + network.get_num_p2p_convos() ); if self.replicas.len() == 0 { // nothing to do @@ -820,9 +826,10 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us (on {}) with code {}", &network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); self.connected_replicas.remove(&naddr); @@ -838,9 +845,10 @@ impl StackerDBSync { } }; debug!( - "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}", + "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", network.get_local_peer(), - &naddr + &naddr, + &chunk_inv_opt ); if let Some(chunk_inv) = chunk_inv_opt { @@ -956,14 +964,17 @@ impl StackerDBSync { StacksMessageType::StackerDBChunk(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk (on {}) with code {}", network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); + } else if data.error_code == NackErrorCodes::StaleVersion { + // try again immediately, without throttling + self.stale_inv = true; } continue; } @@ -1068,7 +1079,6 @@ impl StackerDBSync { &selected_neighbor, &e ); - self.connected_replicas.remove(&selected_neighbor); continue; } @@ -1107,7 +1117,6 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); } @@ -1199,8 +1208,11 @@ impl StackerDBSync { let done = self.connect_begin(network)?; if done { self.state = StackerDBSyncState::ConnectFinish; - blocked = false; + } else { + // no replicas; try again + self.state = StackerDBSyncState::Finished; } + blocked = false; } StackerDBSyncState::ConnectFinish => { let done = self.connect_try_finish(network)?; @@ -1248,6 +1260,11 @@ impl StackerDBSync { { // someone pushed newer chunk data to us, and getting chunks is // enabled, so immediately go request them + debug!( + "{:?}: immediately retry StackerDB GetChunks on {} due to PushChunk NACK", + network.get_local_peer(), + &self.smart_contract_id + ); self.recalculate_chunk_request_schedule(network)?; self.state = StackerDBSyncState::GetChunks; } else { @@ -1259,8 +1276,19 @@ impl StackerDBSync { } } StackerDBSyncState::Finished => { + let stale_inv = self.stale_inv; + let result = self.reset(Some(network), config); self.state = StackerDBSyncState::ConnectBegin; + + if stale_inv { + debug!( + "{:?}: immediately retry StackerDB sync on {} due to stale inventory", + network.get_local_peer(), + &self.smart_contract_id + ); + self.wakeup(); + } return Ok(Some(result)); } }; From 545f61b56bab2351ccef7ebb2c7f53cef2ad4703 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 16:01:07 -0400 Subject: [PATCH 188/910] fix: upgrade time crate to 0.3.36 to fix compatibility with rust 1.80 --- Cargo.lock | 14 +++++++------- stacks-common/src/address/c32.rs | 2 +- stacks-common/src/address/c32_old.rs | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa89992f8e5..284bda2e999 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3294,7 +3294,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3307,7 +3307,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3776,9 +3776,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -3788,7 +3788,7 @@ dependencies = [ "powerfmt", "serde", "time-core", - "time-macros 0.2.17", + "time-macros 0.2.18", ] [[package]] @@ -3809,9 +3809,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", diff --git a/stacks-common/src/address/c32.rs b/stacks-common/src/address/c32.rs index a8c26632f88..a31d0722feb 100644 --- a/stacks-common/src/address/c32.rs +++ b/stacks-common/src/address/c32.rs @@ -360,7 +360,7 @@ fn c32_check_decode(check_data_unsanitized: &str) -> Result<(u8, Vec), Error } pub fn c32_address_decode(c32_address_str: &str) -> Result<(u8, Vec), Error> { - if c32_address_str.len() <= 5 { + if !c32_address_str.is_ascii() || c32_address_str.len() <= 5 { Err(Error::InvalidCrockford32) } else { c32_check_decode(&c32_address_str[1..]) diff --git a/stacks-common/src/address/c32_old.rs b/stacks-common/src/address/c32_old.rs index 37cd599304c..29d441e5c0d 100644 --- a/stacks-common/src/address/c32_old.rs +++ b/stacks-common/src/address/c32_old.rs @@ -221,7 +221,7 @@ fn c32_check_decode(check_data_unsanitized: &str) -> Result<(u8, Vec), Error } pub fn c32_address_decode(c32_address_str: &str) -> Result<(u8, Vec), Error> { - if c32_address_str.len() <= 5 { + if !c32_address_str.is_ascii() || c32_address_str.len() <= 5 { Err(Error::InvalidCrockford32) } else { c32_check_decode(&c32_address_str[1..]) From 7638706004530655578f550b80a7ed97e80e086b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 16:14:04 -0400 Subject: [PATCH 189/910] chore: document all nack codes --- stackslib/src/net/mod.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 1db6e3ff798..776d5645ce1 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1021,14 +1021,23 @@ pub struct NackData { pub error_code: u32, } pub mod NackErrorCodes { + /// A handshake has not yet been completed with the requester pub const HandshakeRequired: u32 = 1; + /// The request depends on a burnchain block that this peer does not recognize pub const NoSuchBurnchainBlock: u32 = 2; + /// The remote peer has exceeded local per-peer bandwidth limits pub const Throttled: u32 = 3; + /// The request depends on a PoX fork that this peer does not recognize as canonical pub const InvalidPoxFork: u32 = 4; + /// The message received is not appropriate for the ongoing step in the protocol being executed pub const InvalidMessage: u32 = 5; + /// The StackerDB requested is not known to this node pub const NoSuchDB: u32 = 6; + /// The StackerDB chunk request referred to an older copy of the chunk than this node has pub const StaleVersion: u32 = 7; + /// The remote peer's view of the burnchain is too out-of-date for the protocol to continue pub const StaleView: u32 = 8; + /// The StackerDB chunk request referred to a newer copy of the chunk that this node has pub const FutureVersion: u32 = 9; } From 6f21d1e800c6caac4fa3e8ff32df58207bc89ed7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 12 Aug 2024 16:46:31 -0400 Subject: [PATCH 190/910] fix: remove std feature requirement --- stacks-common/src/deps_common/httparse/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 5d572585b8e..90a08bf2f1c 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -1280,7 +1280,6 @@ mod tests { ); } - #[cfg(feature = "std")] #[test] fn test_std_error() { use std::error::Error as StdError; From 753a87ec453c0a0cc0a233c2abb630def2284978 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 12 Aug 2024 16:46:38 -0400 Subject: [PATCH 191/910] test: add test with multiple miners mining multiple blocks per tenure --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 251 +++++++++++++++++++++ 2 files changed, 252 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a5..b46da01d6b6 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -105,6 +105,7 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::signer::v0::multiple_miners_with_nakamoto_blocks # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f589416746c..1ae75fdda33 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2643,3 +2643,254 @@ fn signer_set_rollover() { assert!(signer.stop().is_none()); } } + +#[test] +#[ignore] +// This test involves two miners, each mining tenures with 6 blocks each. +fn multiple_miners_with_nakamoto_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + loop { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + info!( + "Mined interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash.clone()); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + btc_blocks_mined * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + signer_test.shutdown(); +} From a574768cadf3d6c3d2d307db273ad223df3a96ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 08:19:24 -0400 Subject: [PATCH 192/910] Add pre_nakamoto_miner_messaging option to MinerConfig Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 5 +++++ testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4eef0bbdd07..04def859d17 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2332,6 +2332,8 @@ pub struct MinerConfig { pub max_reorg_depth: u64, /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block pub wait_on_signers: Duration, + /// Whether to send miner messages in Epoch 2.5 through the .miners contract. This is used for testing. + pub pre_nakamoto_miner_messaging: bool, } impl Default for MinerConfig { @@ -2362,6 +2364,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), + pre_nakamoto_miner_messaging: true, } } } @@ -2693,6 +2696,7 @@ pub struct MinerConfigFile { pub filter_origins: Option, pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, + pub pre_nakamoto_miner_messaging: Option, } impl MinerConfigFile { @@ -2795,6 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), + pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true) }) } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f589416746c..705cb2e011e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -49,7 +49,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -61,7 +60,7 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; From 4e37d0bdaec28f27c2a252a1b6b9226cbc0a8e02 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 15:23:58 -0400 Subject: [PATCH 193/910] Have miners respond to mock signature messages in epoch 2.5 via stackerdb Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 84 +++++++++- testnet/stacks-node/src/config.rs | 2 +- .../src/nakamoto_node/sign_coordinator.rs | 4 + testnet/stacks-node/src/neon_node.rs | 147 +++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 5 +- 5 files changed, 232 insertions(+), 10 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7d411f89b5b..af7c38e22a3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,9 +88,18 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1 + BlockPushed = 1, + /// Mock message from the miner + MockMinerMessage = 2 }); +#[cfg_attr(test, mutants::skip)] +impl Display for MinerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}({})", self, self.to_u8()) + } +} + impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) @@ -116,7 +125,9 @@ SignerMessageTypePrefix { /// Block Pushed message from miners BlockPushed = 2, /// Mock Signature message from Epoch 2.5 signers - MockSignature = 3 + MockSignature = 3, + /// Mock Pre-Nakamoto message from Epoch 2.5 miners + MockMinerMessage = 4 }); #[cfg_attr(test, mutants::skip)] @@ -160,6 +171,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, + SignerMessage::MockMinerMessage(_) => SignerMessageTypePrefix::MockMinerMessage, } } } @@ -175,6 +187,8 @@ pub enum SignerMessage { BlockPushed(NakamotoBlock), /// A mock signature from the epoch 2.5 signers MockSignature(MockSignature), + /// A mock message from the epoch 2.5 miners + MockMinerMessage(MockMinerMessage), } impl SignerMessage { @@ -184,7 +198,7 @@ impl SignerMessage { #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> Option { match self { - Self::BlockProposal(_) | Self::BlockPushed(_) => None, + Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockMinerMessage(_) => None, Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), Self::MockSignature(_) => Some(MessageSlotID::MockSignature), } @@ -201,6 +215,7 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), + SignerMessage::MockMinerMessage(message) => message.consensus_serialize(fd), }?; Ok(()) } @@ -226,6 +241,10 @@ impl StacksMessageCodec for SignerMessage { let signature = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockSignature(signature) } + SignerMessageTypePrefix::MockMinerMessage => { + let message = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockMinerMessage(message) + } }; Ok(message) } @@ -441,6 +460,43 @@ impl StacksMessageCodec for MockSignature { } } +/// A mock message for the stacks node to be used for mock mining messages +/// This is only used by Epoch 2.5 miners to simulate miners responding to mock signatures +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockMinerMessage { + /// The view of the stacks node peer information at the time of the mock signature + pub peer_info: PeerInfo, + /// The burn block height of the miner's tenure + pub tenure_burn_block_height: u64, + /// The chain id for the mock signature + pub chain_id: u32, + /// The mock signatures that the miner received + pub mock_signatures: Vec, +} + +impl StacksMessageCodec for MockMinerMessage { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.tenure_burn_block_height)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.mock_signatures)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let tenure_burn_block_height = read_next::(fd)?; + let chain_id = read_next::(fd)?; + let mock_signatures = read_next::, _>(fd)?; + Ok(Self { + peer_info, + tenure_burn_block_height, + chain_id, + mock_signatures, + }) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { @@ -940,4 +996,26 @@ mod test { .expect("Failed to deserialize MockSignData"); assert_eq!(sign_data, deserialized_data); } + + #[test] + fn serde_mock_miner_message() { + let mock_signature_1 = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + let mock_signature_2 = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + let mock_miner_message = MockMinerMessage { + peer_info: random_peer_data(), + tenure_burn_block_height: thread_rng().next_u64(), + chain_id: thread_rng().gen_range(0..=1), + mock_signatures: vec![mock_signature_1, mock_signature_2], + }; + let serialized_data = mock_miner_message.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize MockSignData"); + assert_eq!(mock_miner_message, deserialized_data); + } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 04def859d17..4528e072221 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2799,7 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true) + pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6a5f026a16c..b366d931320 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -774,6 +774,10 @@ impl SignCoordinator { debug!("Received mock signature message. Ignoring."); continue; } + SignerMessageV0::MockMinerMessage(_) => { + debug!("Received mock miner message. Ignoring."); + continue; + } }; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed1799..f3170a1c009 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -147,9 +147,14 @@ use std::thread::JoinHandle; use std::time::Duration; use std::{fs, mem, thread}; +use clarity::boot_util::boot_code_id; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use libsigner::v0::messages::{ + MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, +}; +use libsigner::StackerDBSession; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; @@ -164,10 +169,11 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, + signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -178,7 +184,6 @@ use stacks::core::mempool::MemPoolDB; use stacks::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; -use stacks::monitoring; use stacks::monitoring::{increment_stx_blocks_mined_counter, update_active_miners_count_gauge}; use stacks::net::atlas::{AtlasConfig, AtlasDB}; use stacks::net::db::{LocalPeer, PeerDB}; @@ -190,6 +195,7 @@ use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, @@ -210,6 +216,7 @@ use crate::burnchains::make_bitcoin_indexer; use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; +use crate::nakamoto_node::sign_coordinator::SignCoordinator; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -2255,6 +2262,133 @@ impl BlockMinerThread { return false; } + /// Read any mock signatures from stackerdb and respond to them + pub fn respond_to_mock_signatures(&mut self) -> Result<(), ChainstateError> { + let miner_config = self.config.get_miner_config(); + if miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto miner messaging is disabled"); + return Ok(()); + } + + let burn_db_path = self.config.get_burn_db_file_path(); + let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let target_epoch_id = + SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1)? + .expect("FATAL: no epoch defined") + .epoch_id; + if target_epoch_id != StacksEpochId::Epoch25 { + debug!("Mock signing is disabled for non-epoch 2.5 blocks."; + "target_epoch_id" => target_epoch_id.to_string() + ); + return Ok(()); + } + // Retrieve any MockSignatures from stackerdb + let mut mock_signatures = Vec::new(); + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("BUG: block commit exists before first block height"); + let signers_contract_id = MessageSlotID::MockSignature + .stacker_db_contract(self.config.is_mainnet(), reward_cycle); + // Get the slots for every signer + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; + let slot_ids: Vec<_> = stackerdbs + .get_signers(&signers_contract_id) + .expect("FATAL: could not get signers from stacker DB") + .into_iter() + .enumerate() + .map(|(slot_id, _)| { + u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") + }) + .collect(); + let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; + for chunk in chunks { + if let Some(chunk) = chunk { + match MockSignature::consensus_deserialize(&mut chunk.as_slice()) { + Ok(mock_signature) => { + if mock_signature.sign_data.event_burn_block_height + == self.burn_block.block_height + { + mock_signatures.push(mock_signature); + } + } + Err(e) => { + warn!("Failed to deserialize mock signature: {:?}", &e); + continue; + } + } + } + } + info!( + "Miner responding to {} mock signatures", + mock_signatures.len() + ); + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + let p2p_net = StacksNode::setup_peer_network( + &self.config, + &self.config.atlas, + self.burnchain.clone(), + ); + + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let stacks_tip_height = p2p_net.stacks_tip.height; + let stacks_tip = p2p_net.stacks_tip.block_hash.clone(); + let stacks_tip_consensus_hash = p2p_net.stacks_tip.consensus_hash.clone(); + let pox_consensus = p2p_net.burnchain_tip.consensus_hash.clone(); + let burn_block_height = p2p_net.chain_view.burn_block_height; + + let peer_info = PeerInfo { + burn_block_height, + stacks_tip_consensus_hash, + stacks_tip, + stacks_tip_height, + pox_consensus, + server_version, + }; + + info!("Responding to mock signatures for burn block {:?}", &self.burn_block.block_height; + "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_info.stacks_tip.clone(), + "peer_burn_block_height" => peer_info.burn_block_height, + "pox_consensus" => ?peer_info.pox_consensus.clone(), + "server_version" => peer_info.server_version.clone(), + "chain_id" => self.config.burnchain.chain_id + ); + let message = MockMinerMessage { + peer_info, + tenure_burn_block_height: self.burn_block.block_height, + chain_id: self.config.burnchain.chain_id, + mock_signatures, + }; + let sort_db = SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: failed to open burnchain DB"); + + if let Err(e) = SignCoordinator::send_miners_message( + &miner_config.mining_key.expect("BUG: no mining key"), + &sort_db, + &self.burn_block, + &stackerdbs, + SignerMessage::MockMinerMessage(message), + MinerSlotID::MockMinerMessage, + self.config.is_mainnet(), + &mut miners_stackerdb, + &self.burn_block.consensus_hash, + ) { + warn!("Failed to send mock miner message: {:?}", &e); + } + Ok(()) + } + // TODO: add tests from mutation testing results #4871 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a @@ -3595,7 +3729,14 @@ impl RelayerThread { if let Ok(miner_handle) = thread::Builder::new() .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || miner_thread_state.run_tenure()) + .spawn(move || { + let result = miner_thread_state.run_tenure(); + if let Err(e) = miner_thread_state.respond_to_mock_signatures() { + warn!("Failed to respond to mock signatures: {}", e); + } + result + + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); e diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 705cb2e011e..4b3fea46a04 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -60,9 +60,8 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, From c6c3aa4dafaecd87dd1af7deb02a16cb9a834dd1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 15:25:48 -0400 Subject: [PATCH 194/910] Rust fmt Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f3170a1c009..6cbbd3b9f67 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -173,7 +173,7 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder + signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -3735,8 +3735,7 @@ impl RelayerThread { warn!("Failed to respond to mock signatures: {}", e); } result - - }) + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); e From 8c68cb284bbf34c8c4537276bcccc80347436654 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 14 Aug 2024 12:57:21 -0400 Subject: [PATCH 195/910] test: add partial_tenure_fork test --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/net/relay.rs | 39 +++ testnet/stacks-node/src/tests/signer/v0.rs | 277 +++++++++++++++++++-- 3 files changed, 301 insertions(+), 16 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b46da01d6b6..52635b7abb6 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -106,6 +106,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::signer::v0::multiple_miners_with_nakamoto_blocks + - tests::signer::v0::partial_tenure_fork # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065ab..ab59632ea78 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -70,6 +70,40 @@ pub const MAX_RECENT_MESSAGES: usize = 256; pub const MAX_RECENT_MESSAGE_AGE: usize = 600; // seconds; equal to the expected epoch length pub const RELAY_DUPLICATE_INFERENCE_WARMUP: usize = 128; +#[cfg(any(test, feature = "testing"))] +pub mod fault_injection { + use std::path::Path; + + static IGNORE_BLOCK: std::sync::Mutex> = std::sync::Mutex::new(None); + + pub fn ignore_block(height: u64, working_dir: &str) -> bool { + if let Some((ignore_height, ignore_dir)) = &*IGNORE_BLOCK.lock().unwrap() { + let working_dir_path = Path::new(working_dir); + let ignore_dir_path = Path::new(ignore_dir); + + let ignore = *ignore_height == height && working_dir_path.starts_with(ignore_dir_path); + if ignore { + warn!("Fault injection: ignore block at height {}", height); + } + return ignore; + } + false + } + + pub fn set_ignore_block(height: u64, working_dir: &str) { + warn!( + "Fault injection: set ignore block at height {} for working directory {}", + height, working_dir + ); + *IGNORE_BLOCK.lock().unwrap() = Some((height, working_dir.to_string())); + } + + pub fn clear_ignore_block() { + warn!("Fault injection: clear ignore block"); + *IGNORE_BLOCK.lock().unwrap() = None; + } +} + pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, @@ -810,6 +844,11 @@ impl Relayer { &block.header.block_hash() ); + #[cfg(any(test, feature = "testing"))] + if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { + return Ok(false); + } + // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1ae75fdda33..3cf7a148046 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -37,6 +37,7 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::net::relay::fault_injection::{clear_ignore_block, set_ignore_block}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -49,7 +50,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -61,9 +61,8 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -2797,14 +2796,12 @@ fn multiple_miners_with_nakamoto_blocks() { btc_blocks_mined += 1; // wait for the new block to be processed - loop { + wait_for(60, || { let blocks_processed = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!( "Nakamoto blocks mined: {}", @@ -2822,14 +2819,12 @@ fn multiple_miners_with_nakamoto_blocks() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { + wait_for(60, || { let blocks_processed = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!( "Mined interim block {}:{}", btc_blocks_mined, interim_block_ix @@ -2894,3 +2889,253 @@ fn multiple_miners_with_nakamoto_blocks() { signer_test.shutdown(); } + +#[test] +#[ignore] +// This test involves two miners, 1 and 2. During miner 1's first tenure, miner +// 2 is forced to ignore one of the blocks in that tenure. The next time miner +// 2 mines a block, it should attempt to fork the chain at that point. The test +// verifies that the fork is not successful and that miner 1 is able to +// continue mining after this fork attempt. +fn partial_tenure_fork() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + + // All signers are listening to node 1 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_mined_blocks: blocks_mined2, + naka_proposed_blocks: blocks_proposed2, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0u64; + let mut miner_2_tenures = 0u64; + let mut fork_initiated = false; + let mut min_miner_1_tenures = u64::MAX; + let mut min_miner_2_tenures = u64::MAX; + let mut ignore_block = 0; + + while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + + // Mine a block and wait for it to be processed, unless we are in a + // forked tenure, in which case, just wait for the block proposal + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let miner = if mined_1 > mined_before_1 { 1 } else { 2 }; + + if miner == 1 && miner_1_tenures == 0 { + // Setup miner 2 to ignore a block in this tenure + ignore_block = pre_nakamoto_peer_1_height + + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + + 3; + set_ignore_block(ignore_block, &conf_node_2.node.working_dir); + + // Ensure that miner 2 runs at least one more tenure + min_miner_2_tenures = miner_2_tenures + 1; + fork_initiated = true; + } + if miner == 2 && miner_2_tenures == min_miner_2_tenures { + // This is the forking tenure. Ensure that miner 1 runs one more + // tenure after this to validate that it continues to build off of + // the proper block. + min_miner_1_tenures = miner_1_tenures + 1; + } + + // mine (or attempt to mine) the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }) + .unwrap(); + info!( + "Attempted to mine interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + if miner == 1 { + miner_1_tenures += 1; + } else { + miner_2_tenures += 1; + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + if miner == 1 { + assert_eq!(mined_1, mined_before_1 + inter_blocks_per_tenure + 1); + } else { + if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); + } else if miner_2_tenures == min_miner_2_tenures { + // If this is the forking tenure, miner 2 should have mined 0 blocks + assert_eq!(mined_2, mined_before_2); + + // Clear the ignore block + clear_ignore_block(); + } + } + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_2_height, ignore_block - 1); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + signer_test.shutdown(); +} From dc5b1170c0136ed483b9fe9cf729a668ec0fc351 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 14 Aug 2024 12:58:14 -0400 Subject: [PATCH 196/910] refactor: clean up in block processing stall code --- .../src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 569114aa124..b9c994e3e0f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -41,10 +41,10 @@ use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::fault_injection::*; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::test_stall::*; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb63..536819e72a8 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -271,15 +271,14 @@ lazy_static! { } #[cfg(test)] -mod test_stall { - pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = - std::sync::Mutex::new(None); +mod fault_injection { + static PROCESS_BLOCK_STALL: std::sync::Mutex = std::sync::Mutex::new(false); pub fn stall_block_processing() { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + if *PROCESS_BLOCK_STALL.lock().unwrap() { // Do an extra check just so we don't log EVERY time. warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + while *PROCESS_BLOCK_STALL.lock().unwrap() { std::thread::sleep(std::time::Duration::from_millis(10)); } info!("Block processing is no longer stalled due to testing directive."); @@ -287,11 +286,11 @@ mod test_stall { } pub fn enable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + *PROCESS_BLOCK_STALL.lock().unwrap() = true; } pub fn disable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + *PROCESS_BLOCK_STALL.lock().unwrap() = false; } } @@ -1748,7 +1747,7 @@ impl NakamotoChainState { dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { #[cfg(test)] - test_stall::stall_block_processing(); + fault_injection::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = From 78e3189c5780345283083c22f71ca208137b6ffa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:00:24 -0400 Subject: [PATCH 197/910] fix: /v3/block_proposal --- docs/rpc-endpoints.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6163f27b753..eea916a7812 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -427,7 +427,7 @@ Determine whether a given trait is implemented within the specified contract (ei See OpenAPI [spec](./rpc/openapi.yaml) for details. -### POST /v2/block_proposal +### POST /v3/block_proposal Used by miner to validate a proposed Stacks block using JSON encoding. From 8dcd82b245b6508dedc34d90b7f0f56e58e7daf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:00:44 -0400 Subject: [PATCH 198/910] feat: use broadcast=1 on block upload and use /v3/ endpoint --- stacks-signer/src/client/stacks_client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 223455c72d6..a2e995ae9cd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -685,8 +685,9 @@ impl StacksClient { pub fn post_block(&self, block: &NakamotoBlock) -> Result { let response = self .stacks_node_client - .post(format!("{}{}", self.http_origin, postblock_v3::PATH)) + .post(format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH)) .header("Content-Type", "application/octet-stream") + .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) .send()?; if !response.status().is_success() { @@ -789,7 +790,7 @@ impl StacksClient { } fn block_proposal_path(&self) -> String { - format!("{}/v2/block_proposal", self.http_origin) + format!("{}/v3/block_proposal", self.http_origin) } fn sortition_info_path(&self) -> String { @@ -814,7 +815,7 @@ impl StacksClient { } fn reward_set_path(&self, reward_cycle: u64) -> String { - format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) + format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } fn fees_transaction_path(&self) -> String { From f3fe79155873640303539d740d47614468a0474a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:05 -0400 Subject: [PATCH 199/910] fix: store broadcast timestamp and don't allow it to be overwritten --- stacks-signer/src/signerdb.rs | 65 +++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 86e9928c26c..4964f549d4c 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -242,7 +242,7 @@ CREATE TABLE IF NOT EXISTS blocks ( block_info TEXT NOT NULL, consensus_hash TEXT NOT NULL, signed_over INTEGER NOT NULL, - broadcasted INTEGER NOT NULL, + broadcasted INTEGER, stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) @@ -261,7 +261,7 @@ CREATE TABLE IF NOT EXISTS block_signatures ( ) STRICT;"#; static CREATE_INDEXES_2: &str = r#" -CREATE INDEX IF NOT EXISTS block_reward_cycle_and_signature ON block_signatures(signer_signature_hash); +CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); "#; static SCHEMA_1: &[&str] = &[ @@ -479,18 +479,18 @@ impl SignerDb { } /// Insert or replace a block into the database. - /// `hash` is the `signer_signature_hash` of the block. + /// Preserves the `broadcast` column if replacing an existing block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { let block_json = serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; - let broadcasted = false; let vote = block_info .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, @@ -498,7 +498,7 @@ impl SignerDb { "sighash" => %hash, "block_id" => %block_id, "signed" => %signed_over, - "broadcasted" => %broadcasted, + "broadcasted" => ?broadcasted, "vote" => vote ); self.db @@ -553,12 +553,12 @@ impl SignerDb { let qry = "SELECT signature FROM block_signatures WHERE signer_signature_hash = ?1"; let args = params![block_sighash]; let sigs_txt: Vec = query_rows(&self.db, qry, args)?; - let mut sigs = vec![]; - for sig_txt in sigs_txt.into_iter() { - let sig = serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)?; - sigs.push(sig); - } - Ok(sigs) + sigs_txt + .into_iter() + .map(|sig_txt| { + serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError) + }) + .collect() } /// Mark a block as having been broadcasted @@ -566,27 +566,33 @@ impl SignerDb { &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, + ts: u64 ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = 1 WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; + let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; - debug!("Marking block {} as broadcasted", block_sighash); + debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; Ok(()) } - /// Is a block broadcasted already - pub fn is_block_broadcasted( + /// Get the timestamp at which the block was broadcasted. + pub fn get_block_broadcasted( &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, - ) -> Result { + ) -> Result, DBError> { let qry = - "SELECT broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; let args = params![u64_to_sql(reward_cycle)?, block_sighash]; - let broadcasted: i64 = query_row(&self.db, qry, args)?.unwrap_or(0); - Ok(broadcasted != 0) + let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { + return Ok(None); + }; + if broadcasted == 0 { + return Ok(None); + } + Ok(u64::try_from(broadcasted).ok()) } } @@ -901,22 +907,29 @@ mod tests { db.insert_block(&block_info_1) .expect("Unable to insert block into db"); - assert!(!db - .is_block_broadcasted( + assert!(db + .get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) - .unwrap()); + .unwrap() + .is_none()); db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), + 12345 ) .unwrap(); - assert!(db - .is_block_broadcasted( + db.insert_block(&block_info_1) + .expect("Unable to insert block into db a second time"); + + assert_eq!(db + .get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) - .unwrap()); + .unwrap() + .unwrap(), + 12345); } } From c41cf4b455d7039ea31c73db4fc5fe308f417766 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:23 -0400 Subject: [PATCH 200/910] refactor: separate signature weights and total weights calculations --- stacks-signer/src/v0/signer.rs | 129 ++++++++++++++++++--------------- 1 file changed, 71 insertions(+), 58 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 78fb181a0f8..1bf94e38b53 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -31,6 +31,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::get_epoch_time_secs; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -452,8 +453,7 @@ impl Signer { block_validate_response: &BlockValidateResponse, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); - let mut signature_opt = None; - let (response, block_info) = match block_validate_response { + let (response, block_info, signature_opt) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; @@ -479,10 +479,10 @@ impl Signer { .sign(&signer_signature_hash.0) .expect("Failed to sign block"); - signature_opt = Some(signature.clone()); ( BlockResponse::accepted(signer_signature_hash, signature), block_info, + Some(signature.clone()) ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -507,6 +507,7 @@ impl Signer { ( BlockResponse::from(block_validate_reject.clone()), block_info, + None ) } }; @@ -541,35 +542,27 @@ impl Signer { } } - /// Compute the signing weight and total weight, given a list of signatures - fn compute_signature_weight( + /// Compute the signing weight, given a list of signatures + fn compute_signature_signing_weight<'a>( &self, - block_hash: &Sha512Trunc256Sum, - sigs: &[MessageSignature], - ) -> (u32, u32) { - let signing_weight = sigs.iter().fold(0usize, |signing_weight, sig| { - let weight = if let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), sig) - { - let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); - *stacker_weight - } else { - 0 - }; - signing_weight.saturating_add(weight) + addrs: impl Iterator + ) -> u32 { + let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { + let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + signing_weight.saturating_add(*stacker_weight) }); + u32::try_from(signing_weight) + .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")) + } + /// Compute the total signing weight + fn compute_signature_total_weight(&self) -> u32 { let total_weight = self .signer_weights .values() .fold(0usize, |acc, val| acc.saturating_add(*val)); - ( - u32::try_from(signing_weight) - .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")), - u32::try_from(total_weight) - .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")), - ) + u32::try_from(total_weight) + .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) } /// Handle an observed signature from another signer @@ -586,16 +579,34 @@ impl Signer { debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + // have we broadcasted before? + if let Some(ts) = self + .signer_db + .get_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }) + { + debug!("{self}: have already broadcasted block {} at {}, so will not re-attempt", block_hash, ts); + return; + } + + // recover public key + let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + else { + debug!("{self}: Received unrecovarable signature. Will not store."; + "signature" => %signature, + "block_hash" => %block_hash); + + return; + }; + // authenticate the signature -- it must be signed by one of the stacking set let is_valid_sig = self .signer_addresses .iter() .find(|addr| { - let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) - else { - return false; - }; let stacker_address = StacksAddress::p2pkh(true, &public_key); // it only matters that the address hash bytes match @@ -608,18 +619,34 @@ impl Signer { return; } + // signature is valid! store it self.signer_db .add_block_signature(block_hash, signature) .unwrap_or_else(|_| panic!("{self}: Failed to save block signature")); // do we have enough signatures to broadcast? + // i.e. is the threshold reached? let signatures = self .signer_db .get_block_signatures(block_hash) .unwrap_or_else(|_| panic!("{self}: Failed to load block signatures")); - let (signature_weight, total_weight) = - self.compute_signature_weight(block_hash, &signatures); + // put signatures in order by signer address (i.e. reward cycle order) + let addrs_to_sigs: HashMap<_, _> = signatures + .into_iter() + .filter_map(|sig| { + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) + else { + return None; + }; + let addr = StacksAddress::p2pkh(self.mainnet, &public_key); + Some((addr, sig)) + }) + .collect(); + + let signature_weight = self.compute_signature_signing_weight(addrs_to_sigs.keys()); + let total_weight = self.compute_signature_total_weight(); + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) .unwrap_or_else(|_| { panic!("{self}: Failed to compute threshold weight for {total_weight}") @@ -634,19 +661,7 @@ impl Signer { } // have enough signatures to broadcast! - // have we broadcasted before? - if self - .signer_db - .is_block_broadcasted(self.reward_cycle, block_hash) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }) - { - debug!("{self}: will not re-broadcast block {}", block_hash); - return; - } - - let Ok(Some(block_info)) = self + let Ok(Some(mut block_info)) = self .signer_db .block_lookup(self.reward_cycle, block_hash) .map_err(|e| { @@ -658,19 +673,17 @@ impl Signer { return; }; - // put signatures in order by signer address (i.e. reward cycle order) - let addrs_to_sigs: HashMap<_, _> = signatures - .into_iter() - .filter_map(|sig| { - let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) - else { - return None; - }; - let addr = StacksAddress::p2pkh(self.mainnet, &public_key); - Some((addr, sig)) - }) - .collect(); + // record time at which we reached the threshold + block_info.signed_group = Some(get_epoch_time_secs()); + let _ = self + .signer_db + .insert_block(&block_info) + .map_err(|e| { + warn!("Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e); + e + }); + // collect signatures for the block let signatures: Vec<_> = self .signer_addresses .iter() @@ -697,7 +710,7 @@ impl Signer { if broadcasted { self.signer_db - .set_block_broadcasted(self.reward_cycle, block_hash) + .set_block_broadcasted(self.reward_cycle, block_hash, get_epoch_time_secs()) .unwrap_or_else(|_| { panic!("{self}: failed to determine if block {block_hash} was broadcasted") }); From b9f5291bc7cd7b2503f110f166ad2acd752e3c97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:44 -0400 Subject: [PATCH 201/910] refactor: log errors in deserialization within a closure --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 1d7b2a84142..931a00777bd 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -339,7 +339,16 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { let block_data: Vec> = query_rows(self, qry, args)?; Ok(block_data .into_iter() - .filter_map(|block_vec| NakamotoBlock::consensus_deserialize(&mut &block_vec[..]).ok()) + .filter_map(|block_vec| { + NakamotoBlock::consensus_deserialize(&mut &block_vec[..]) + .map_err(|e| { + error!("Failed to deserialize block from DB, likely database corruption"; + "consensus_hash" => %consensus_hash, + "error" => ?e); + e + }) + .ok() + }) .collect()) } From 3aabc4db4e9c2c6619e5ccf66d690985034311a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:10 -0400 Subject: [PATCH 202/910] fix: /v3/ for 3.0-only endpoints --- stackslib/src/net/api/getstackers.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4fd42340708..69961dbe143 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -121,11 +121,11 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/stacker_set/:cycle_num" + "/v3/stacker_set/:cycle_num" } /// Try to decode this request. @@ -239,7 +239,7 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/stacker_set/{cycle_num}"), + format!("/v3/stacker_set/{cycle_num}"), HttpRequestContents::new().for_tip(tip_req), ) .expect("FATAL: failed to construct request from infallible data") From 3dc662115441eee3ed528869562e5f0220794c5e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:30 -0400 Subject: [PATCH 203/910] refactor: auth_token --- stackslib/src/net/api/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index d256c15b975..4405f49a25c 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -129,9 +129,9 @@ impl StacksHttp { ); self.register_rpc_endpoint(postblock::RPCPostBlockRequestHandler::new()); self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( - self.block_proposal_token.clone(), + self.auth_token.clone(), )); - self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::default()); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new(self.auth_token.clone())); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); From 2ac05375a4eea4ebbfcf16eeed2e64a7155970b7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:46 -0400 Subject: [PATCH 204/910] fix: /v3/ for v3-only endpoints --- stackslib/src/net/api/postblock_proposal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5d..043c3165652 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -163,7 +163,7 @@ impl From> for BlockValidateRespons } } -/// Represents a block proposed to the `v2/block_proposal` endpoint for validation +/// Represents a block proposed to the `v3/block_proposal` endpoint for validation #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlockProposal { /// Proposed block @@ -431,11 +431,11 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/block_proposal$"#).unwrap() + Regex::new(r#"^/v3/block_proposal$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal" + "/v3/block_proposal" } /// Try to decode this request. From d14466b56c8be2f3333ed9fcbbff0656f613df06 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:03 -0400 Subject: [PATCH 205/910] feat: if broadcast=1 is given, and the auth token is set, then broadcast the block on the p2p network even if we already have it locally --- stackslib/src/net/api/postblock_v3.rs | 57 ++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index df7a7eae73e..4eeb68750e3 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -31,14 +31,26 @@ use crate::net::httpcore::{ use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; +use url::form_urlencoded; + pub static PATH: &'static str = "/v3/blocks/upload/"; #[derive(Clone, Default)] pub struct RPCPostBlockRequestHandler { pub block: Option, + pub auth: Option, + pub broadcast: Option } impl RPCPostBlockRequestHandler { + pub fn new(auth: Option) -> Self { + Self { + block: None, + auth, + broadcast: None + } + } + /// Decode a bare block from the body fn parse_postblock_octets(mut body: &[u8]) -> Result { let block = NakamotoBlock::consensus_deserialize(&mut body).map_err(|e| { @@ -87,6 +99,31 @@ impl HttpRequest for RPCPostBlockRequestHandler { )); } + // if broadcast=1 is set, then the requester must be authenticated + let mut broadcast = false; + let mut authenticated = false; + + // look for authorization header + if let Some(password) = &self.auth { + if let Some(auth_header) = preamble.headers.get("authorization") { + if auth_header != password { + return Err(Error::Http(401, "Unauthorized".into())); + } + authenticated = true; + } + } + + // see if broadcast=1 is set + for (key, value) in form_urlencoded::parse(query.as_ref().unwrap_or(&"").as_bytes()) { + if key == "broadcast" { + broadcast = broadcast || value == "1"; + } + } + + if broadcast && !authenticated { + return Err(Error::Http(401, "Unauthorized".into())); + } + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() { return Err(Error::DecodeError( @@ -97,6 +134,7 @@ impl HttpRequest for RPCPostBlockRequestHandler { let block = Self::parse_postblock_octets(body)?; self.block = Some(block); + self.broadcast = Some(broadcast); Ok(HttpRequestContents::new().query_string(query)) } } @@ -105,6 +143,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { /// Reset internal state fn restart(&mut self) { self.block = None; + self.broadcast = None; } /// Make the response @@ -124,7 +163,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { .with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { let mut handle_conn = sortdb.index_handle_at_tip(); let stacks_tip = network.stacks_tip.block_id(); - Relayer::process_new_nakamoto_block( + Relayer::process_new_nakamoto_block_ext( &network.burnchain, &sortdb, &mut handle_conn, @@ -133,6 +172,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &block, rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, + self.broadcast.unwrap_or(false) ) }) .map_err(|e| { @@ -186,4 +226,19 @@ impl StacksHttpRequest { ) .expect("FATAL: failed to construct request from infallible data") } + + /// Make a new post-block request, with intent to broadcast + pub fn new_post_block_v3_broadcast(host: PeerHost, block: &NakamotoBlock, auth: &str) -> StacksHttpRequest { + let mut request = StacksHttpRequest::new_for_peer( + host, + "POST".into(), + PATH.into(), + HttpRequestContents::new() + .query_arg("broadcast".into(), "1".into()) + .payload_stacks(block), + ) + .expect("FATAL: failed to construct request from infallible data"); + request.add_header("authorization".into(), auth.into()); + request + } } From 00d6ca4a7fda46ae1e105429dd19ebd24187f22e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:31 -0400 Subject: [PATCH 206/910] fix: wrong API path --- stackslib/src/net/api/poststackerdbchunk.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index b3c94206025..affcc8dc1b0 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -83,7 +83,7 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler { } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal/:principal/:contract_name/chunks" + "/v2/stackerdb/:principal/:contract_name/chunks" } /// Try to decode this request. From 0b8bb80a0b965d5b13165316ac2e85835135e0db Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:42 -0400 Subject: [PATCH 207/910] refactor: auth_token --- stackslib/src/net/api/tests/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index f0a537d045e..b02bb53bb80 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -265,7 +265,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_1_config.connection_opts.maximum_call_argument_size = 4096; - peer_1_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_1_config.connection_opts.auth_token = Some("password".to_string()); peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, @@ -275,7 +275,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_2_config.connection_opts.maximum_call_argument_size = 4096; - peer_2_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_2_config.connection_opts.auth_token = Some("password".to_string()); // stacker DBs get initialized thru reconfiguration when the above block gets processed peer_1_config.add_stacker_db( From 9d649ac9aa9455fdf85b83313c80919099ef68d6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:56 -0400 Subject: [PATCH 208/910] fix: /v2 to /v3 --- stackslib/src/net/api/tests/postblock_proposal.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 6ab465a683c..391afc949f1 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -69,7 +69,7 @@ fn test_try_parse_request() { let mut request = StacksHttpRequest::new_for_peer( addr.into(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -320,7 +320,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -340,7 +340,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -360,7 +360,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); From d9170614dde7450b5d3970259c6cf03545589516 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:12 -0400 Subject: [PATCH 209/910] feat: test coverage for new broadcast=1 directive --- stackslib/src/net/api/tests/postblock_v3.rs | 81 ++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index e68d334239d..0764953c6ee 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -44,7 +44,7 @@ fn parse_request() { let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::new(Some("12345".to_string())); let mut parsed_request = http .handle_try_parse_request( &mut handler, @@ -60,9 +60,36 @@ fn parse_request() { let (preamble, _contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); + assert_eq!(handler.broadcast, Some(false)); handler.restart(); assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // try to authenticate + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &miner_sk); + let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "12345"); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + parsed_request.clear_headers(); + parsed_request.add_header("authorization".into(), "12345".into()); + let (preamble, _contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + assert_eq!(handler.broadcast, Some(true)); + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); // try to deal with an invalid block let mut bad_block = block.clone(); @@ -72,7 +99,6 @@ fn parse_request() { let request = StacksHttpRequest::new_post_block_v3(addr.into(), &bad_block); let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); match http.handle_try_parse_request( &mut handler, &parsed_preamble.expect_request(), @@ -83,6 +109,57 @@ fn parse_request() { panic!("worked with bad block"); } } + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // deal with bad authentication + let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let bad_response = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_response { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + x => { + error!("Expected HTTP 401, got {:?}", &x); + panic!("expected error"); + } + } + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // deal with missing authorization + let mut request = StacksHttpRequest::new_post_block_v3(addr.into(), &block); + let path = request.request_path(); + request.preamble_mut().path_and_query_str = format!("{}?broadcast=1", &path); + + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let bad_response = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_response { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + x => { + error!("Expected HTTP 401, got {:?}", &x); + panic!("expected error"); + } + } } #[test] From b76641e3f65b94d984d37825ae199a5a024328b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:28 -0400 Subject: [PATCH 210/910] fix: auth_token --- stackslib/src/net/connection.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index d3a77ebc8d9..36b1fc18ff0 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -398,8 +398,8 @@ pub struct ConnectionOptions { /// maximum number of confirmations for a nakamoto block's sortition for which it will be /// pushed pub max_nakamoto_block_relay_age: u64, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + /// The authorization token to enable privileged RPC endpoints + pub auth_token: Option, // fault injection /// Disable neighbor walk and discovery @@ -521,7 +521,7 @@ impl std::default::Default for ConnectionOptions { socket_send_buffer_size: 16384, // Linux default private_neighbors: true, max_nakamoto_block_relay_age: 6, - block_proposal_token: None, + auth_token: None, // no faults on by default disable_neighbor_walk: false, From 4b1e50b5416b478b1fef233eaa33fbbb3b2255b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:48 -0400 Subject: [PATCH 211/910] fix: off-by-one calculation in next reward cycle --- .../src/net/download/nakamoto/download_state_machine.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index d1510af9c19..9d30fa50e31 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -774,7 +774,7 @@ impl NakamotoDownloadStateMachine { let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); let sort_rc = sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) + .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height.saturating_add(1)) .expect("FATAL: burnchain tip is before system start"); let next_sort_rc = if last_sort_height == sort_tip.block_height { @@ -782,13 +782,13 @@ impl NakamotoDownloadStateMachine { .pox_constants .block_height_to_reward_cycle( sortdb.first_block_height, - sort_tip.block_height.saturating_add(1), + sort_tip.block_height.saturating_add(2), ) .expect("FATAL: burnchain tip is before system start") } else { sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) + .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height.saturating_add(1)) .expect("FATAL: burnchain tip is before system start") }; From 2ac0332b3dc235a38348316643e94caf3cfa31ed Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:06 -0400 Subject: [PATCH 212/910] refactor: set self.idle in one place --- .../src/net/download/nakamoto/tenure_downloader.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 7197adf0b26..74b7084a842 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -643,7 +643,7 @@ impl NakamotoTenureDownloader { &mut self, response: StacksHttpResponse, ) -> Result>, NetError> { - match self.state { + let handle_result = match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { debug!( "Got download response for tenure-start block {}", @@ -654,12 +654,10 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_start_block(block)?; - self.idle = true; Ok(None) } NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { debug!("Invalid state -- Got download response for WaitForTenureBlock"); - self.idle = true; Err(NetError::InvalidState) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { @@ -669,7 +667,6 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_end_block(&block)?; - self.idle = true; Ok(None) } NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { @@ -682,14 +679,14 @@ impl NakamotoTenureDownloader { e })?; let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - self.idle = true; Ok(blocks_opt) } NakamotoTenureDownloadState::Done => { - self.idle = true; Err(NetError::InvalidState) } - } + }; + self.idle = true; + handle_result } pub fn is_done(&self) -> bool { From c1fd6fee874f378144da6512ce97edeac7bff653 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:22 -0400 Subject: [PATCH 213/910] fix: structured logging --- .../nakamoto/tenure_downloader_set.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 337c8d1cd6f..28a40e7eb50 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -214,6 +214,7 @@ impl NakamotoTenureDownloaderSet { /// Returns true if the peer gets scheduled. /// Returns false if not. pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); if let Some(idx) = self.peers.get(&naddr) { let Some(Some(_downloader)) = self.downloaders.get(*idx) else { return false; @@ -426,16 +427,14 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - debug!("schedule: {:?}", schedule); - debug!("available: {:?}", &available); - debug!("tenure_block_ids: {:?}", &tenure_block_ids); - debug!("inflight: {}", self.inflight()); - debug!( - "count: {}, running: {}, scheduled: {}", - count, - self.num_downloaders(), - self.num_scheduled_downloaders() - ); + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); self.clear_finished_downloaders(); self.clear_available_peers(); From cddf696d4e0cb2739d58aa094e4f8f88b87c3109 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:38 -0400 Subject: [PATCH 214/910] chore: auth_token --- stackslib/src/net/httpcore.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index dec51df42ae..88ee0365b27 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -888,8 +888,8 @@ pub struct StacksHttp { pub maximum_call_argument_size: u32, /// Maximum execution budget of a read-only call pub read_only_call_limit: ExecutionCost, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + /// The authorization token to enable access to privileged features, such as the block proposal RPC endpoint + pub auth_token: Option, } impl StacksHttp { @@ -905,7 +905,7 @@ impl StacksHttp { request_handlers: vec![], maximum_call_argument_size: conn_opts.maximum_call_argument_size, read_only_call_limit: conn_opts.read_only_call_limit.clone(), - block_proposal_token: conn_opts.block_proposal_token.clone(), + auth_token: conn_opts.auth_token.clone(), }; http.register_rpc_methods(); http From b41846b8b004cd48494c255bc7f0ac684930c07a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:50 -0400 Subject: [PATCH 215/910] fix: restart inv sync if burnchain tip changes --- stackslib/src/net/inv/nakamoto.rs | 50 +++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 0a2ea4dc639..3a27c1072d5 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -397,13 +397,19 @@ impl NakamotoTenureInv { if self.start_sync_time + inv_sync_interval <= now && (self.cur_reward_cycle >= cur_rc || !self.online) { - debug!("Reset inv comms for {}", &self.neighbor_address); - self.online = true; - self.start_sync_time = now; - self.cur_reward_cycle = start_rc; + self.reset_comms(start_rc); } } + /// Reset synchronization state for this peer in the last reward cycle. + /// Called as part of processing a new burnchain block + pub fn reset_comms(&mut self, start_rc: u64) { + debug!("Reset inv comms for {}", &self.neighbor_address); + self.online = true; + self.start_sync_time = get_epoch_time_secs(); + self.cur_reward_cycle = start_rc; + } + /// Get the reward cycle we're sync'ing for pub fn reward_cycle(&self) -> u64 { self.cur_reward_cycle @@ -506,6 +512,19 @@ impl NakamotoTenureInv { } } } + + /// Get the burnchain tip reward cycle for purposes of inv sync + fn get_current_reward_cycle(tip: &BlockSnapshot, sortdb: &SortitionDB) -> u64 { + // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but + // .block_height_to_reward_cycle does not account for this. + sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + tip.block_height.saturating_sub(1), + ) + .expect("FATAL: snapshot occurred before system start") + } } /// Nakamoto inventory state machine @@ -593,15 +612,7 @@ impl NakamotoInvStateMachine { .map(|(highest_rc, _)| *highest_rc) .unwrap_or(0); - // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but - // .block_height_to_reward_cycle does not account for this. - let tip_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - tip.block_height.saturating_sub(1), - ) - .expect("FATAL: snapshot occurred before system start"); + let tip_rc = NakamotoTenureInv::get_current_reward_cycle(tip, sortdb); debug!( "Load all reward cycle consensus hashes from {} to {}", @@ -794,7 +805,20 @@ impl NakamotoInvStateMachine { Ok((num_msgs, learned)) } + /// Top-level state machine execution pub fn run(&mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, ibd: bool) -> bool { + // if the burnchain tip has changed, then force all communications to reset for the current + // reward cycle in order to hasten block download + if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { + if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { + debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); + let tip_rc = NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + for inv_state in self.inventories.values_mut() { + inv_state.reset_comms(tip_rc.saturating_sub(1)); + } + } + } + if let Err(e) = self.process_getnakamotoinv_begins(network, sortdb, ibd) { warn!( "{:?}: Failed to begin Nakamoto tenure inventory sync: {:?}", From 21fc76d4d95d9ccdfff0c3871161f0507d8b39eb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:05 -0400 Subject: [PATCH 216/910] feat: allow forced block broadcast --- stackslib/src/net/relay.rs | 51 +++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index dca8738d3df..ff71feda9d2 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -790,11 +790,44 @@ impl Relayer { } Ok(res) } + + /// Wrapper around inner_process_new_nakamoto_block + pub fn process_new_nakamoto_block( + burnchain: &Burnchain, + sortdb: &SortitionDB, + sort_handle: &mut SortitionHandleConn, + chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, + block: &NakamotoBlock, + coord_comms: Option<&CoordinatorChannels>, + obtained_method: NakamotoBlockObtainMethod, + ) -> Result { + Self::process_new_nakamoto_block_ext( + burnchain, + sortdb, + sort_handle, + chainstate, + stacks_tip, + block, + coord_comms, + obtained_method, + false + ) + } /// Insert a staging Nakamoto block that got relayed to us somehow -- e.g. uploaded via http, /// downloaded by us, or pushed via p2p. - /// Return Ok(true) if we stored it, Ok(false) if we didn't - pub fn process_new_nakamoto_block( + /// Return Ok(true) if we should broadcast the block. If force_broadcast is true, then this + /// function will return Ok(true) even if we already have the block. + /// Return Ok(false) if we should not broadcast it (e.g. we already have it, it was invalid, + /// etc.) + /// Return Err(..) in the following cases, beyond DB errors: + /// * If the block is from a tenure we don't recognize + /// * If we're not in the Nakamoto epoch + /// * If the reward cycle info could not be determined + /// * If there was an unrecognized signer + /// * If the coordinator is closed, and `coord_comms` is Some(..) + pub fn process_new_nakamoto_block_ext( burnchain: &Burnchain, sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, @@ -803,6 +836,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, + force_broadcast: bool ) -> Result { debug!( "Handle incoming Nakamoto block {}/{} obtained via {}", @@ -825,8 +859,16 @@ impl Relayer { e })? { - debug!("Already have Nakamoto block {}", &block.header.block_id()); - return Ok(false); + if force_broadcast { + // it's possible that the signer sent this block to us, in which case, we should + // broadcast it + debug!("Already have Nakamoto block {}, but broadcasting anyway", &block.header.block_id()); + return Ok(true); + } + else { + debug!("Already have Nakamoto block {}", &block.header.block_id()); + return Ok(false); + } } let block_sn = @@ -2541,6 +2583,7 @@ impl Relayer { accepted_blocks: Vec, force_send: bool, ) { + // TODO: we don't relay HTTP-uploaded blocks :( debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, From 62004f92f0f0e4779a834276f57a78be13da4426 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:23 -0400 Subject: [PATCH 217/910] refactor: auth_token --- stackslib/src/net/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 05477bb08c0..8372116ced5 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -352,7 +352,7 @@ impl NakamotoBootPlan { peer_config .initial_balances .append(&mut self.initial_balances.clone()); - peer_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_config.connection_opts.auth_token = Some("password".to_string()); // Create some balances for test Stackers // They need their stacking amount + enough to pay fees From c9ec4fb9871e0809710088c2258ddd5e52d5254b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:40 -0400 Subject: [PATCH 218/910] chore: auth_token, and fix poll timeout --- testnet/stacks-node/src/config.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f5c7c7bfbd5..256011001d5 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -221,12 +221,12 @@ mod tests { } #[test] - fn should_load_block_proposal_token() { + fn should_load_auth_token() { let config = Config::from_config_file( ConfigFile::from_str( r#" [connection_options] - block_proposal_token = "password" + auth_token = "password" "#, ) .unwrap(), @@ -235,7 +235,7 @@ mod tests { .expect("Expected to be able to parse block proposal token from file"); assert_eq!( - config.connection_options.block_proposal_token, + config.connection_options.auth_token, Some("password".to_string()) ); } @@ -1368,7 +1368,7 @@ impl Config { let poll_timeout = if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { - 5000 + 1000 }; poll_timeout } @@ -2425,7 +2425,7 @@ pub struct ConnectionOptionsFile { pub force_disconnect_interval: Option, pub antientropy_public: Option, pub private_neighbors: Option, - pub block_proposal_token: Option, + pub auth_token: Option, pub antientropy_retry: Option, } @@ -2551,7 +2551,7 @@ impl ConnectionOptionsFile { max_sockets: self.max_sockets.unwrap_or(800) as usize, antientropy_public: self.antientropy_public.unwrap_or(true), private_neighbors: self.private_neighbors.unwrap_or(true), - block_proposal_token: self.block_proposal_token, + auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), ..default }) From f5df7efe46c3d0dc63885ae509fd779af4e95bbf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:55 -0400 Subject: [PATCH 219/910] refactor: separate fault injection into its own function --- .../stacks-node/src/nakamoto_node/miner.rs | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 43a5c510408..8036389d536 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -678,6 +678,24 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// Fault injection -- possibly fail to broadcast + /// Return true to drop the block + fn fault_injection_broadcast_fail(&self) -> bool { + let drop_prob = self + .config + .node + .fault_injection_block_push_fail_probability + .unwrap_or(0) + .min(100); + let will_drop = if drop_prob > 0 { + let throw: u8 = thread_rng().gen_range(0..100); + throw < drop_prob + } else { + false + }; + will_drop + } + /// Store a block to the chainstate, and if successful (it should be since we mined it), /// broadcast it via the p2p network. fn broadcast_p2p( @@ -717,25 +735,12 @@ impl BlockMinerThread { } // forward to p2p thread, but do fault injection - let block_id = block.block_id(); - let drop_prob = self - .config - .node - .fault_injection_block_push_fail_probability - .unwrap_or(0) - .min(100); - let will_drop = if drop_prob > 0 { - let throw: u8 = thread_rng().gen_range(0..100); - throw < drop_prob - } else { - false - }; - - if will_drop { - info!("Fault injection: drop block {}", &block_id); + if self.fault_injection_broadcast_fail() { + info!("Fault injection: drop block {}", &block.block_id()); return Ok(()); } + let block_id = block.block_id(); debug!("Broadcasting block {}", &block_id); if let Err(e) = self.p2p_handle.broadcast_message( vec![], From e1046fd8f7a2cdfaec7a4ca86c24df686944f454 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:07:11 -0400 Subject: [PATCH 220/910] fix: use get_poll_time() --- testnet/stacks-node/src/nakamoto_node/peer.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index b825cfe46f3..facb1dd8357 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -190,8 +190,7 @@ impl PeerThread { info!("`PeerNetwork::bind()` skipped, already bound"); } - let poll_timeout = cmp::min(1000, config.miner.first_attempt_time_ms / 2); - + let poll_timeout = config.get_poll_time(); PeerThread { config, net, From 3150d4c092b5922638fed1c26672ccaf64684b70 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:07:35 -0400 Subject: [PATCH 221/910] chore: awaken coordinator thread on new sortition since a stacks block may be buffered --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index add33424ad2..148c80d030e 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -415,7 +415,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); - if cur_epoch.epoch_id != StacksEpochId::Epoch30 { + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { debug!( "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.", &stacks_tip_sn.consensus_hash @@ -454,6 +454,9 @@ impl RelayerThread { } self.globals.set_last_sortition(sn.clone()); + // there may be a bufferred stacks block to process, so wake up the coordinator to check + self.globals.coord_comms.announce_new_stacks_block(); + info!( "Relayer: Process sortition"; "sortition_ch" => %consensus_hash, From c436e0e6aca4673314880873ac547f8222de4acf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:01 -0400 Subject: [PATCH 222/910] refactor: info/warn instead of debug, and short return for invalid sig --- .../src/nakamoto_node/sign_coordinator.rs | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 87afd617faf..0bba347795b 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -738,22 +738,6 @@ impl SignCoordinator { let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { - // one of two things can happen: - // * we get enough signatures from stackerdb from the signers, OR - // * we see our block get processed in our chainstate (meaning, the signers broadcasted - // the block and our node got it and processed it) - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold if let Ok(Some((stored_block, _sz))) = chain_state @@ -773,6 +757,22 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } + // one of two things can happen: + // * we get enough signatures from stackerdb from the signers, OR + // * we see our block get processed in our chainstate (meaning, the signers broadcasted + // the block and our node got it and processed it) + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )) + } + }; + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); @@ -821,33 +821,34 @@ impl SignCoordinator { )); }; if rejected_data.signer_signature_hash - == block.header.signer_signature_hash() + != block.header.signer_signature_hash() + { + debug!("Received rejected block response for a block besides my own. Ignoring."); + continue; + } + + debug!( + "Signer {} rejected our block {}/{}", + slot_id, + &block.header.consensus_hash, + &block.header.block_hash() + ); + total_reject_weight = total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if total_reject_weight.saturating_add(self.weight_threshold) + > self.total_weight { debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "{}/{} signers vote to reject our block {}/{}", + total_reject_weight, + self.total_weight, &block.header.consensus_hash, &block.header.block_hash() ); - total_reject_weight = total_reject_weight - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); - - if total_reject_weight.saturating_add(self.weight_threshold) - > self.total_weight - { - debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, - self.total_weight, - &block.header.consensus_hash, - &block.header.block_hash() - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } - } else { - debug!("Received rejected block response for a block besides my own. Ignoring."); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); } continue; } @@ -909,7 +910,7 @@ impl SignCoordinator { } if Self::fault_injection_ignore_signatures() { - debug!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, @@ -922,7 +923,7 @@ impl SignCoordinator { continue; } - debug!("SignCoordinator: Signature Added to block"; + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, From 5341074571d1b21072faced75dabcab624cf88b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:20 -0400 Subject: [PATCH 223/910] refactor: auth_token --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 1596a0bad9d..df8c5127564 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -231,7 +231,7 @@ impl TestSigningChannel { pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{http_origin}/v2/stacker_set/{cycle}"); + let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client .get(&path) .send() @@ -2302,7 +2302,7 @@ fn correct_burn_outs() { run_loop_thread.join().unwrap(); } -/// Test `/v2/block_proposal` API endpoint +/// Test `/v3/block_proposal` API endpoint /// /// This endpoint allows miners to propose Nakamoto blocks to a node, /// and test if they would be accepted or rejected @@ -2315,7 +2315,7 @@ fn block_proposal_api_endpoint() { let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); - conf.connection_options.block_proposal_token = Some(password.clone()); + conf.connection_options.auth_token = Some(password.clone()); let account_keys = add_initial_balances(&mut conf, 10, 1_000_000); let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); @@ -2539,7 +2539,7 @@ fn block_proposal_api_endpoint() { .expect("Failed to build `reqwest::Client`"); // Build URL let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{http_origin}/v2/block_proposal"); + let path = format!("{http_origin}/v3/block_proposal"); let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in @@ -4494,7 +4494,7 @@ fn nakamoto_attempt_time() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); - naka_conf.connection_options.block_proposal_token = Some(password.clone()); + naka_conf.connection_options.auth_token = Some(password.clone()); // Use fixed timing params for this test let nakamoto_attempt_time_ms = 20_000; naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; @@ -5165,7 +5165,7 @@ fn signer_chainstate() { socket, naka_conf .connection_options - .block_proposal_token + .auth_token .clone() .unwrap_or("".into()), false, From 3fe4266784ebf8a58e93a69115711d02333804f4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:31 -0400 Subject: [PATCH 224/910] refactor: /v3 --- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8a1a08b5dc3..ee4aff890cc 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1397,7 +1397,7 @@ pub fn get_contract_src( pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/stacker_set/{}", http_origin, reward_cycle); + let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); let res = client.get(&path).send().unwrap(); info!("Got stacker_set response {:?}", &res); From e9bf54d97ed15266fb99bdd3c9b38937d0ac0482 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:40 -0400 Subject: [PATCH 225/910] refactor: auth_token --- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a5973569a1d..91371578cb9 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -155,7 +155,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Wed, 14 Aug 2024 14:13:01 -0400 Subject: [PATCH 226/910] chore: cargo fmt --- stacks-signer/src/client/stacks_client.rs | 6 +++- stacks-signer/src/signerdb.rs | 19 ++++++------ stacks-signer/src/v0/signer.rs | 30 ++++++++++--------- stackslib/src/net/api/mod.rs | 4 ++- stackslib/src/net/api/postblock_v3.rs | 17 ++++++----- stackslib/src/net/api/tests/postblock_v3.rs | 7 +++-- .../nakamoto/download_state_machine.rs | 10 +++++-- .../download/nakamoto/tenure_downloader.rs | 4 +-- stackslib/src/net/inv/nakamoto.rs | 3 +- stackslib/src/net/relay.rs | 14 +++++---- 10 files changed, 66 insertions(+), 48 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index a2e995ae9cd..0aeb30bb6e0 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -685,7 +685,11 @@ impl StacksClient { pub fn post_block(&self, block: &NakamotoBlock) -> Result { let response = self .stacks_node_client - .post(format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH)) + .post(format!( + "{}{}?broadcast=1", + self.http_origin, + postblock_v3::PATH + )) .header("Content-Type", "application/octet-stream") .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4964f549d4c..2d2e9cc22a7 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -554,11 +554,9 @@ impl SignerDb { let args = params![block_sighash]; let sigs_txt: Vec = query_rows(&self.db, qry, args)?; sigs_txt - .into_iter() - .map(|sig_txt| { - serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError) - }) - .collect() + .into_iter() + .map(|sig_txt| serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)) + .collect() } /// Mark a block as having been broadcasted @@ -566,7 +564,7 @@ impl SignerDb { &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, - ts: u64 + ts: u64, ) -> Result<(), DBError> { let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; @@ -917,19 +915,20 @@ mod tests { db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), - 12345 + 12345, ) .unwrap(); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); - assert_eq!(db - .get_block_broadcasted( + assert_eq!( + db.get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) .unwrap() .unwrap(), - 12345); + 12345 + ); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 1bf94e38b53..e51d96d9331 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -29,9 +29,9 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::util::get_epoch_time_secs; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -482,7 +482,7 @@ impl Signer { ( BlockResponse::accepted(signer_signature_hash, signature), block_info, - Some(signature.clone()) + Some(signature.clone()), ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -507,7 +507,7 @@ impl Signer { ( BlockResponse::from(block_validate_reject.clone()), block_info, - None + None, ) } }; @@ -545,7 +545,7 @@ impl Signer { /// Compute the signing weight, given a list of signatures fn compute_signature_signing_weight<'a>( &self, - addrs: impl Iterator + addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); @@ -587,13 +587,15 @@ impl Signer { panic!("{self}: failed to determine if block {block_hash} was broadcasted") }) { - debug!("{self}: have already broadcasted block {} at {}, so will not re-attempt", block_hash, ts); + debug!( + "{self}: have already broadcasted block {} at {}, so will not re-attempt", + block_hash, ts + ); return; } // recover public key - let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) else { debug!("{self}: Received unrecovarable signature. Will not store."; "signature" => %signature, @@ -675,13 +677,13 @@ impl Signer { // record time at which we reached the threshold block_info.signed_group = Some(get_epoch_time_secs()); - let _ = self - .signer_db - .insert_block(&block_info) - .map_err(|e| { - warn!("Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e); - e - }); + let _ = self.signer_db.insert_block(&block_info).map_err(|e| { + warn!( + "Failed to set group threshold signature timestamp for {}: {:?}", + block_hash, &e + ); + e + }); // collect signatures for the block let signatures: Vec<_> = self diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 4405f49a25c..5bbc6281a24 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -131,7 +131,9 @@ impl StacksHttp { self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( self.auth_token.clone(), )); - self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new(self.auth_token.clone())); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new( + self.auth_token.clone(), + )); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 4eeb68750e3..39ff26087f7 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -16,6 +16,7 @@ use regex::{Captures, Regex}; use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; use stacks_common::types::net::PeerHost; +use url::form_urlencoded; use super::postblock::StacksBlockAcceptedData; use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; @@ -31,15 +32,13 @@ use crate::net::httpcore::{ use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; -use url::form_urlencoded; - pub static PATH: &'static str = "/v3/blocks/upload/"; #[derive(Clone, Default)] pub struct RPCPostBlockRequestHandler { pub block: Option, pub auth: Option, - pub broadcast: Option + pub broadcast: Option, } impl RPCPostBlockRequestHandler { @@ -47,7 +46,7 @@ impl RPCPostBlockRequestHandler { Self { block: None, auth, - broadcast: None + broadcast: None, } } @@ -172,7 +171,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &block, rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, - self.broadcast.unwrap_or(false) + self.broadcast.unwrap_or(false), ) }) .map_err(|e| { @@ -226,9 +225,13 @@ impl StacksHttpRequest { ) .expect("FATAL: failed to construct request from infallible data") } - + /// Make a new post-block request, with intent to broadcast - pub fn new_post_block_v3_broadcast(host: PeerHost, block: &NakamotoBlock, auth: &str) -> StacksHttpRequest { + pub fn new_post_block_v3_broadcast( + host: PeerHost, + block: &NakamotoBlock, + auth: &str, + ) -> StacksHttpRequest { let mut request = StacksHttpRequest::new_for_peer( host, "POST".into(), diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index 0764953c6ee..5cc652fc83d 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -79,14 +79,14 @@ fn parse_request() { &bytes[offset..], ) .unwrap(); - + parsed_request.clear_headers(); parsed_request.add_header("authorization".into(), "12345".into()); let (preamble, _contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); assert_eq!(handler.broadcast, Some(true)); - + handler.restart(); assert!(handler.block.is_none()); assert!(handler.broadcast.is_none()); @@ -115,7 +115,8 @@ fn parse_request() { assert!(handler.broadcast.is_none()); // deal with bad authentication - let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); + let request = + StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); let bad_response = http.handle_try_parse_request( diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 9d30fa50e31..6e298470e01 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -774,7 +774,10 @@ impl NakamotoDownloadStateMachine { let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); let sort_rc = sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height.saturating_add(1)) + .block_height_to_reward_cycle( + sortdb.first_block_height, + last_sort_height.saturating_add(1), + ) .expect("FATAL: burnchain tip is before system start"); let next_sort_rc = if last_sort_height == sort_tip.block_height { @@ -788,7 +791,10 @@ impl NakamotoDownloadStateMachine { } else { sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height.saturating_add(1)) + .block_height_to_reward_cycle( + sortdb.first_block_height, + sort_tip.block_height.saturating_add(1), + ) .expect("FATAL: burnchain tip is before system start") }; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 74b7084a842..f7fb970bb6f 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -681,9 +681,7 @@ impl NakamotoTenureDownloader { let blocks_opt = self.try_accept_tenure_blocks(blocks)?; Ok(blocks_opt) } - NakamotoTenureDownloadState::Done => { - Err(NetError::InvalidState) - } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), }; self.idle = true; handle_result diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3a27c1072d5..d01e8625a1a 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -812,7 +812,8 @@ impl NakamotoInvStateMachine { if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); - let tip_rc = NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + let tip_rc = + NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); for inv_state in self.inventories.values_mut() { inv_state.reset_comms(tip_rc.saturating_sub(1)); } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index ff71feda9d2..04969099732 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -790,7 +790,7 @@ impl Relayer { } Ok(res) } - + /// Wrapper around inner_process_new_nakamoto_block pub fn process_new_nakamoto_block( burnchain: &Burnchain, @@ -811,7 +811,7 @@ impl Relayer { block, coord_comms, obtained_method, - false + false, ) } @@ -836,7 +836,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, - force_broadcast: bool + force_broadcast: bool, ) -> Result { debug!( "Handle incoming Nakamoto block {}/{} obtained via {}", @@ -862,10 +862,12 @@ impl Relayer { if force_broadcast { // it's possible that the signer sent this block to us, in which case, we should // broadcast it - debug!("Already have Nakamoto block {}, but broadcasting anyway", &block.header.block_id()); + debug!( + "Already have Nakamoto block {}, but broadcasting anyway", + &block.header.block_id() + ); return Ok(true); - } - else { + } else { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); } From 5d77eabf6c33cf1fed539cbb9bf8afdf236db742 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:20:53 -0400 Subject: [PATCH 227/910] WIP: add test for mock miner messages. Failing to write to miner slot Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 11 +- stackslib/src/chainstate/nakamoto/mod.rs | 1 - testnet/stacks-node/src/neon_node.rs | 72 +++++++----- testnet/stacks-node/src/tests/signer/v0.rs | 122 ++++++++++++++++++++- 4 files changed, 167 insertions(+), 39 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index af7c38e22a3..117a8c4912e 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,18 +88,9 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1, - /// Mock message from the miner - MockMinerMessage = 2 + BlockPushed = 1 }); -#[cfg_attr(test, mutants::skip)] -impl Display for MinerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb63..7e70c9940c4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4210,7 +4210,6 @@ impl NakamotoChainState { "stackerdb_slots" => ?stackerdb_config.signers, "queried_sortition" => %election_sortition, "sortition_hashes" => ?miners_info.get_sortitions()); - return Ok(None); } let slot_id_range = signer_ranges.swap_remove(signer_ix); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 6cbbd3b9f67..6e17328f6d7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -154,7 +154,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, }; -use libsigner::StackerDBSession; +use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; @@ -190,7 +190,7 @@ use stacks::net::db::{LocalPeer, PeerDB}; use stacks::net::dns::{DNSClient, DNSResolver}; use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; -use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; +use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_SLOT_COUNT}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; @@ -2263,10 +2263,11 @@ impl BlockMinerThread { } /// Read any mock signatures from stackerdb and respond to them - pub fn respond_to_mock_signatures(&mut self) -> Result<(), ChainstateError> { + pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { + let new_burn_block_height = self.burn_block.block_height + 1; let miner_config = self.config.get_miner_config(); - if miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto miner messaging is disabled"); + if !miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto mock miner messaging is disabled"); return Ok(()); } @@ -2274,16 +2275,43 @@ impl BlockMinerThread { let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let target_epoch_id = - SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1)? - .expect("FATAL: no epoch defined") - .epoch_id; + let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), new_burn_block_height)? + .expect("FATAL: no epoch defined") + .epoch_id; if target_epoch_id != StacksEpochId::Epoch25 { - debug!("Mock signing is disabled for non-epoch 2.5 blocks."; + debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "target_epoch_id" => target_epoch_id.to_string() ); return Ok(()); } + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + let slot_id = MinerSlotID::BlockProposal.to_u8().into(); + if let Ok(messages) = + miners_stackerdb.get_latest_chunks(&[slot_id, slot_id * MINER_SLOT_COUNT]) + { + debug!("Miner got messages: {:?}", messages.len()); + for message in messages { + if let Some(message) = message { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockMinerMessage(miner_message)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if miner_message.peer_info.burn_block_height == new_burn_block_height { + debug!( + "Already sent mock miner message for tenure burn block height {:?}", + self.burn_block.block_height + ); + return Ok(()); + } + } + } + } // Retrieve any MockSignatures from stackerdb let mut mock_signatures = Vec::new(); let reward_cycle = self @@ -2321,13 +2349,6 @@ impl BlockMinerThread { } } } - info!( - "Miner responding to {} mock signatures", - mock_signatures.len() - ); - let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_stackerdb = - StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let p2p_net = StacksNode::setup_peer_network( &self.config, @@ -2356,30 +2377,29 @@ impl BlockMinerThread { server_version, }; - info!("Responding to mock signatures for burn block {:?}", &self.burn_block.block_height; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", &self.burn_block.block_height; "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), "stacks_tip" => ?peer_info.stacks_tip.clone(), "peer_burn_block_height" => peer_info.burn_block_height, "pox_consensus" => ?peer_info.pox_consensus.clone(), "server_version" => peer_info.server_version.clone(), - "chain_id" => self.config.burnchain.chain_id + "chain_id" => self.config.burnchain.chain_id, + "num_mock_signatures" => mock_signatures.len(), ); let message = MockMinerMessage { peer_info, - tenure_burn_block_height: self.burn_block.block_height, chain_id: self.config.burnchain.chain_id, mock_signatures, + tenure_burn_block_height: new_burn_block_height, }; - let sort_db = SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) - .expect("FATAL: failed to open burnchain DB"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), - &sort_db, + &burn_db, &self.burn_block, &stackerdbs, SignerMessage::MockMinerMessage(message), - MinerSlotID::MockMinerMessage, + MinerSlotID::BlockProposal, // We are sending a mock miner message NOT a block proposal, but since we do not propose blocks in epoch 2.5, it is fine self.config.is_mainnet(), &mut miners_stackerdb, &self.burn_block.consensus_hash, @@ -3731,8 +3751,8 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { let result = miner_thread_state.run_tenure(); - if let Err(e) = miner_thread_state.respond_to_mock_signatures() { - warn!("Failed to respond to mock signatures: {}", e); + if let Err(e) = miner_thread_state.send_mock_miner_message() { + warn!("Failed to send mock miner message: {}", e); } result }) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4b3fea46a04..92cd23a3af2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -184,7 +184,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -2359,6 +2359,124 @@ fn mock_sign_epoch_25() { assert_eq!(old_signatures, new_signatures); } +#[test] +#[ignore] +/// This test checks that Epoch 2.5 miners will issue a MockMinerMessage per burn block they receive +/// including the mock signature from the signers. +fn mock_miner_message_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + &[], + ); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + let main_poll_time = Instant::now(); + let mut mock_miner_message = None; + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_start_height + { + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let mock_poll_time = Instant::now(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + + while mock_miner_message.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.slot_id == MinerSlotID::BlockProposal.to_u8() as u32 { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockMinerMessage(message) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize MockMinerMessage") + else { + continue; + }; + if message.peer_info.burn_block_height == current_burn_block_height { + mock_miner_message = Some(message); + break; + } + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } + test_observer::clear(); + mock_miner_message = None; + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0" + ); + } +} + #[test] #[ignore] /// This test asserts that signer set rollover works as expected. From 4c7598c418c996e2353695ee058ade2c4ff47027 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:35:38 -0400 Subject: [PATCH 228/910] WIP: Failing to write to miner slot Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 6e17328f6d7..9749b2625d1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -152,7 +152,7 @@ use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ - MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, + MessageSlotID, MinerSlotID, MockMinerMessage, PeerInfo, SignerMessage, }; use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; @@ -2334,17 +2334,13 @@ impl BlockMinerThread { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; for chunk in chunks { if let Some(chunk) = chunk { - match MockSignature::consensus_deserialize(&mut chunk.as_slice()) { - Ok(mock_signature) => { - if mock_signature.sign_data.event_burn_block_height - == self.burn_block.block_height - { - mock_signatures.push(mock_signature); - } - } - Err(e) => { - warn!("Failed to deserialize mock signature: {:?}", &e); - continue; + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.sign_data.event_burn_block_height + == self.burn_block.block_height + { + mock_signatures.push(mock_signature); } } } From edfaa10b524d65f73e9c759d371b4c1d4664bf15 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:57:23 -0400 Subject: [PATCH 229/910] WIP: No longer failing to write to .miners but failing to find appropro message Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 9749b2625d1..0fc40c135a4 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3746,11 +3746,10 @@ impl RelayerThread { .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { - let result = miner_thread_state.run_tenure(); if let Err(e) = miner_thread_state.send_mock_miner_message() { warn!("Failed to send mock miner message: {}", e); } - result + miner_thread_state.run_tenure() }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); From c590bcf42039771f0bc42a5b6fa160d4154b576c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 16:16:47 -0400 Subject: [PATCH 230/910] WIP: failing at 222 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 25 +++++++++------------- testnet/stacks-node/src/tests/signer/v0.rs | 1 - 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 0fc40c135a4..43eb1144141 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2264,7 +2264,6 @@ impl BlockMinerThread { /// Read any mock signatures from stackerdb and respond to them pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { - let new_burn_block_height = self.burn_block.block_height + 1; let miner_config = self.config.get_miner_config(); if !miner_config.pre_nakamoto_miner_messaging { debug!("Pre-Nakamoto mock miner messaging is disabled"); @@ -2274,13 +2273,15 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - - let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), new_burn_block_height)? - .expect("FATAL: no epoch defined") - .epoch_id; - if target_epoch_id != StacksEpochId::Epoch25 { + let p2p_net = StacksNode::setup_peer_network( + &self.config, + &self.config.atlas, + self.burnchain.clone(), + ); + let epoch_id = p2p_net.get_current_epoch().epoch_id; + if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; - "target_epoch_id" => target_epoch_id.to_string() + "epoch_id" => epoch_id.to_string() ); return Ok(()); } @@ -2302,7 +2303,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.peer_info.burn_block_height == new_burn_block_height { + if miner_message.peer_info.burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2346,12 +2347,6 @@ impl BlockMinerThread { } } - let p2p_net = StacksNode::setup_peer_network( - &self.config, - &self.config.atlas, - self.burnchain.clone(), - ); - let server_version = version_string( "stacks-node", option_env!("STACKS_NODE_VERSION") @@ -2386,7 +2381,7 @@ impl BlockMinerThread { peer_info, chain_id: self.config.burnchain.chain_id, mock_signatures, - tenure_burn_block_height: new_burn_block_height, + tenure_burn_block_height: self.burn_block.block_height, }; if let Err(e) = SignCoordinator::send_miners_message( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 92cd23a3af2..9034a8a5235 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2468,7 +2468,6 @@ fn mock_miner_message_epoch_25() { "Failed to find mock miner message within timeout" ); } - test_observer::clear(); mock_miner_message = None; assert!( main_poll_time.elapsed() <= Duration::from_secs(45), From d8d333733a5c4bd87a6c48048b813ece74770035 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 31 Jul 2024 15:12:04 -0400 Subject: [PATCH 231/910] chore: Add `mock_mining_output_dir` to node config --- testnet/stacks-node/src/config.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4eef0bbdd07..cae1fda5288 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1810,6 +1810,8 @@ pub struct NodeConfig { pub miner: bool, pub stacker: bool, pub mock_mining: bool, + /// Where to output blocks from mock mining + pub mock_mining_output_dir: Option, pub mine_microblocks: bool, pub microblock_frequency: u64, pub max_microblocks: u64, @@ -2102,6 +2104,7 @@ impl Default for NodeConfig { miner: false, stacker: false, mock_mining: false, + mock_mining_output_dir: None, mine_microblocks: true, microblock_frequency: 30_000, max_microblocks: u16::MAX as u64, @@ -2164,7 +2167,7 @@ impl NodeConfig { ) -> Neighbor { Neighbor { addr: NeighborKey { - peer_version: peer_version, + peer_version, network_id: chain_id, addrbytes: PeerAddress::from_socketaddr(&addr), port: addr.port(), @@ -2556,6 +2559,7 @@ pub struct NodeConfigFile { pub miner: Option, pub stacker: Option, pub mock_mining: Option, + pub mock_mining_output_dir: Option, pub mine_microblocks: Option, pub microblock_frequency: Option, pub max_microblocks: Option, @@ -2595,10 +2599,9 @@ impl NodeConfigFile { p2p_address: self.p2p_address.unwrap_or(rpc_bind.clone()), bootstrap_node: vec![], deny_nodes: vec![], - data_url: match self.data_url { - Some(data_url) => data_url, - None => format!("http://{}", rpc_bind), - }, + data_url: self + .data_url + .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { Some(seed) => hex_bytes(&seed) .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, @@ -2607,6 +2610,14 @@ impl NodeConfigFile { miner, stacker, mock_mining: self.mock_mining.unwrap_or(default_node_config.mock_mining), + mock_mining_output_dir: self + .mock_mining_output_dir + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| { + panic!("Failed to construct PathBuf from node.mock_mining_output_dir: {e}") + }), mine_microblocks: self .mine_microblocks .unwrap_or(default_node_config.mine_microblocks), From f949691c0c50776208f1440690f91a157044c15a Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 31 Jul 2024 17:12:21 -0400 Subject: [PATCH 232/910] feat: Neon mock miner can write blocks to files --- testnet/stacks-node/src/neon_node.rs | 81 ++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed1799..fd3e240c9a7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,12 +140,14 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::io::{Read, Write}; +use std::fs::{self, File}; +use std::io::{BufWriter, Read, Write}; use std::net::SocketAddr; +use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; -use std::{fs, mem, thread}; +use std::{mem, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -237,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone)] +#[derive(Clone, Serialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -255,6 +257,28 @@ pub struct AssembledAnchorBlock { tenure_begin: u128, } +/// Write any `serde_json` object to a file +/// TODO: Move this somewhere else +pub fn serialize_json_to_file(json: &J, filepath: P) -> Result<(), std::io::Error> +where + J: ?Sized + serde::Serialize, + P: AsRef, +{ + let file = File::create(filepath)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, json)?; + writer.flush() +} + +impl AssembledAnchorBlock { + pub fn serialize_to_file

(&self, filepath: P) -> Result<(), std::io::Error> + where + P: AsRef, + { + serialize_json_to_file(self, filepath) + } +} + /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { @@ -2567,28 +2591,44 @@ impl BlockMinerThread { "attempt" => attempt ); + let NodeConfig { + mock_mining, + mock_mining_output_dir, + .. + } = self.config.get_node_config(false); + let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); + let assembled_block = AssembledAnchorBlock { + parent_consensus_hash: parent_block_info.parent_consensus_hash, + my_burn_hash: cur_burn_chain_tip.burn_header_hash, + my_block_height: cur_burn_chain_tip.block_height, + orig_burn_hash: self.burn_block.burn_header_hash, + anchored_block, + attempt, + tenure_begin, + }; if res.is_none() { self.failed_to_submit_last_attempt = true; - if !self.config.get_node_config(false).mock_mining { + if mock_mining { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + if let Some(dir) = mock_mining_output_dir { + let stacks_block_height = assembled_block.anchored_block.header.total_work.work; + let filename = format!("{stacks_block_height}.json"); + let filepath = dir.join(filename); + assembled_block + .serialize_to_file(&filepath) + .unwrap_or_else(|e| panic!("Failed to write to file '{filepath:?}': {e}")); + } + } else { warn!("Relayer: Failed to submit Bitcoin transaction"); return None; } - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); } else { self.failed_to_submit_last_attempt = false; } Some(MinerThreadResult::Block( - AssembledAnchorBlock { - parent_consensus_hash: parent_block_info.parent_consensus_hash, - my_burn_hash: cur_burn_chain_tip.burn_header_hash, - my_block_height: cur_burn_chain_tip.block_height, - orig_burn_hash: self.burn_block.burn_header_hash, - anchored_block, - attempt, - tenure_begin, - }, + assembled_block, microblock_private_key, bitcoin_controller.get_ongoing_commit(), )) @@ -3721,11 +3761,9 @@ impl RelayerThread { parent_consensus_hash, parent_block_hash ); - let mut microblock_thread_state = match MicroblockMinerThread::from_relayer_thread(self) { - Some(ts) => ts, - None => { - return false; - } + let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) + else { + return false; }; if let Ok(miner_handle) = thread::Builder::new() @@ -3737,10 +3775,7 @@ impl RelayerThread { miner_tip, )) }) - .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); - e - }) + .inspect_err(|e| error!("Relayer: Failed to start tenure thread: {e:?}")) { // thread started! self.miner_thread = Some(miner_handle); From 7f12563b19214d5589517a7a4592463c01b46807 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 2 Aug 2024 09:37:49 -0400 Subject: [PATCH 233/910] feat: `stacks-inspect` reads mock mined blocks from files. No replay yet --- stacks-common/src/util/macros.rs | 22 +++++++ stacks-common/src/util/mod.rs | 26 +++++++++ stackslib/src/main.rs | 86 +++++++++++++++++++++++++++- testnet/stacks-node/src/neon_node.rs | 31 ++-------- 4 files changed, 138 insertions(+), 27 deletions(-) diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 4e15c5485bd..b1b26ee0148 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -617,6 +617,28 @@ macro_rules! impl_byte_array_serde { }; } +#[allow(unused_macros)] +#[macro_export] +macro_rules! impl_file_io_serde_json { + ($thing:ident) => { + impl $thing { + pub fn serialize_to_file

(&self, path: P) -> Result<(), std::io::Error> + where + P: AsRef, + { + $crate::util::serialize_json_to_file(self, path) + } + + pub fn deserialize_from_file

(path: P) -> Result + where + P: AsRef, + { + $crate::util::deserialize_json_from_file(path) + } + } + }; +} + // print debug statements while testing #[allow(unused_macros)] #[macro_export] diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index d4dfcda82f0..8575fee283d 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -28,6 +28,9 @@ pub mod uint; pub mod vrf; use std::collections::HashMap; +use std::fs::File; +use std::io::{BufReader, BufWriter, Write}; +use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; @@ -120,3 +123,26 @@ pub mod db_common { true } } + +/// Write any `serde_json` object directly to a file +pub fn serialize_json_to_file(json: &J, path: P) -> Result<(), std::io::Error> +where + J: ?Sized + serde::Serialize, + P: AsRef, +{ + let file = File::create(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, json)?; + writer.flush() +} + +/// Read any `serde_json` object directly from a file +pub fn deserialize_json_from_file(path: P) -> Result +where + J: serde::de::DeserializeOwned, + P: AsRef, +{ + let file = File::open(path)?; + let reader = BufReader::new(file); + serde_json::from_reader::<_, J>(reader).map_err(std::io::Error::from) +} diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8660e0e9a74..97428a2dff0 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,7 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use regex::Regex; use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -38,6 +39,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -92,7 +94,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{deserialize_json_from_file, get_epoch_time_ms, log, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -1339,6 +1341,88 @@ simulating a miner. ); return; } + if argv[1] == "replay-mock-mining" { + let print_help_and_exit = || { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let dir = argv + .get(2) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(print_help_and_exit); + + if !dir.is_dir() { + panic!("Not a valid directory: {dir:?}"); + } + + // Read entries in directory + let dir_entries = dir + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = dir.join(filename); + info!("Replaying block from file"; + "block_height" => bh, + "filepath" => ?filepath + ); + // let block = AssembledAnchorBlock::deserialize_json_from_file(filepath) + // .unwrap_or_else(|e| panic!("Error reading block {block} from file: {e}")); + // debug!("Replaying block from {filepath:?}"; + // "block_height" => bh, + // "block" => %block + // ); + // TODO: Actually replay block + } + } if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fd3e240c9a7..0b48717d25e 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,14 +140,13 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::fs::{self, File}; -use std::io::{BufWriter, Read, Write}; +use std::io::{Read, Write}; use std::net::SocketAddr; use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; -use std::{mem, thread}; +use std::{fs, mem, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -191,6 +190,7 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; +use stacks::util::{deserialize_json_from_file, serialize_json_to_file}; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ @@ -239,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone, Serialize)] +#[derive(Clone, Serialize, Deserialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -256,28 +256,7 @@ pub struct AssembledAnchorBlock { /// Epoch timestamp in milliseconds when we started producing the block. tenure_begin: u128, } - -/// Write any `serde_json` object to a file -/// TODO: Move this somewhere else -pub fn serialize_json_to_file(json: &J, filepath: P) -> Result<(), std::io::Error> -where - J: ?Sized + serde::Serialize, - P: AsRef, -{ - let file = File::create(filepath)?; - let mut writer = BufWriter::new(file); - serde_json::to_writer(&mut writer, json)?; - writer.flush() -} - -impl AssembledAnchorBlock { - pub fn serialize_to_file

(&self, filepath: P) -> Result<(), std::io::Error> - where - P: AsRef, - { - serialize_json_to_file(self, filepath) - } -} +impl_file_io_serde_json!(AssembledAnchorBlock); /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] From d4a92153f58be88f4ed770f176a2994429fef397 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 2 Aug 2024 15:40:37 -0400 Subject: [PATCH 234/910] chore: Move `replay-mock-mining` to stacks-node binary --- stackslib/src/main.rs | 85 +-------------------- testnet/stacks-node/src/main.rs | 110 +++++++++++++++++++++++---- testnet/stacks-node/src/neon_node.rs | 19 ++--- 3 files changed, 105 insertions(+), 109 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 97428a2dff0..90adf0bf1f8 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -39,7 +39,6 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; -use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -94,7 +93,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{deserialize_json_from_file, get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -1341,88 +1340,6 @@ simulating a miner. ); return; } - if argv[1] == "replay-mock-mining" { - let print_help_and_exit = || { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let dir = argv - .get(2) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(print_help_and_exit); - - if !dir.is_dir() { - panic!("Not a valid directory: {dir:?}"); - } - - // Read entries in directory - let dir_entries = dir - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = dir.join(filename); - info!("Replaying block from file"; - "block_height" => bh, - "filepath" => ?filepath - ); - // let block = AssembledAnchorBlock::deserialize_json_from_file(filepath) - // .unwrap_or_else(|e| panic!("Error reading block {block} from file: {e}")); - // debug!("Replaying block from {filepath:?}"; - // "block_height" => bh, - // "block" => %block - // ); - // TODO: Actually replay block - } - } if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 41b74262787..e3f191a946a 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -10,9 +10,12 @@ extern crate stacks; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use regex::Regex; pub use stacks_common::util; use stacks_common::util::hash::hex_bytes; +use crate::neon_node::AssembledAnchorBlock; + pub mod monitoring; pub mod burnchains; @@ -31,7 +34,8 @@ pub mod syncctl; pub mod tenure; use std::collections::HashMap; -use std::{env, panic, process}; +use std::path::PathBuf; +use std::{env, fs, panic, process}; use backtrace::Backtrace; use pico_args::Arguments; @@ -166,10 +170,8 @@ fn cli_get_miner_spend( return 0.0; }; let Ok(active_miners_and_commits) = - MinerStats::get_active_miners(&sortdb, Some(burn_block_height)).map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); - e - }) + MinerStats::get_active_miners(&sortdb, Some(burn_block_height)) + .inspect_err(|e| warn!("Failed to get active miners: {e:?}")) else { return 0.0; }; @@ -187,10 +189,7 @@ fn cli_get_miner_spend( let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) - .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); - e - }) + .inspect_err(|e| warn!("Failed to find unconfirmed block-commits: {e}")) else { return 0.0; }; @@ -229,10 +228,7 @@ fn cli_get_miner_spend( &commit_outs, at_burnchain_height, ) - .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); - e - }) + .inspect_err(|e| warn!("Failed to get unconfirmed burn distribution: {e:?}")) else { return 0.0; }; @@ -265,6 +261,82 @@ fn cli_get_miner_spend( spend_amount } +fn cli_replay_mock_mining(config_path: &str, path: &str) { + info!("Loading config at path {config_path}"); + let config = match ConfigFile::from_path(&config_path) { + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), + Err(e) => { + warn!("Invalid config file: {e}"); + process::exit(1); + } + }; + + // Validate directory path + let dir = PathBuf::from(path); + let dir = fs::canonicalize(dir).unwrap_or_else(|e| panic!("{path} is not a valid path: {e}")); + + if !dir.is_dir() { + panic!("{path} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = dir + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {path}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = dir.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + // TODO: Actually replay block + } +} + fn main() { panic::set_hook(Box::new(|panic_info| { error!("Process abort due to thread panic: {}", panic_info); @@ -412,6 +484,13 @@ fn main() { println!("Will spend {}", spend_amount); process::exit(0); } + "replay-mock-mining" => { + let path: String = args.value_from_str("--path").unwrap(); + let config_path: String = args.value_from_str("--config").unwrap(); + args.finish(); + cli_replay_mock_mining(&config_path, &path); + process::exit(0); + } _ => { print_help(); return; @@ -502,6 +581,11 @@ key-for-seed\tOutput the associated secret key for a burnchain signer created wi \t\tCan be passed a config file for the seed via the `--config ` option *or* by supplying the hex seed on \t\tthe command line directly. +replay-mock-mining\tReplay mock mined blocks from

+\t\tArguments: +\t\t --path: path to directory of mock mined blocks +\t\t --config: path to the config file + help\t\tDisplay this help. OPTIONAL ARGUMENTS: diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 0b48717d25e..58f3b877a62 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -239,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -3603,22 +3603,17 @@ impl RelayerThread { } } - let mut miner_thread_state = - match self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) { - Some(state) => state, - None => { - return false; - } - }; + let Some(mut miner_thread_state) = + self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) + else { + return false; + }; if let Ok(miner_handle) = thread::Builder::new() .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || miner_thread_state.run_tenure()) - .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); - e - }) + .inspect_err(|e| error!("Relayer: Failed to start tenure thread: {e:?}")) { self.miner_thread = Some(miner_handle); } From a6fd22bd44483c4be9e0fcc1d382acab7084d76a Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 09:45:35 -0400 Subject: [PATCH 235/910] chore: Move `replay-block` back to stacks-inspect --- stackslib/src/main.rs | 90 ++++++++++++++++++++++++++++ testnet/stacks-node/src/main.rs | 89 +-------------------------- testnet/stacks-node/src/neon_node.rs | 2 - 3 files changed, 91 insertions(+), 90 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 90adf0bf1f8..71ec309b1b6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -39,6 +39,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -1341,6 +1342,11 @@ simulating a miner. return; } + if argv[1] == "replay-mock-mining" { + replay_mock_mining(argv); + process::exit(0); + } + if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); process::exit(1); @@ -1977,3 +1983,87 @@ fn analyze_sortition_mev(argv: Vec) { process::exit(0); } + +fn replay_mock_mining(argv: Vec) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let chainstate_path = argv + .get(2) + .unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(3) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + // let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + // .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + // TODO: Actually replay block + } +} diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index e3f191a946a..fcdc9f58474 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -10,12 +10,9 @@ extern crate stacks; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; -use regex::Regex; pub use stacks_common::util; use stacks_common::util::hash::hex_bytes; -use crate::neon_node::AssembledAnchorBlock; - pub mod monitoring; pub mod burnchains; @@ -34,8 +31,7 @@ pub mod syncctl; pub mod tenure; use std::collections::HashMap; -use std::path::PathBuf; -use std::{env, fs, panic, process}; +use std::{env, panic, process}; use backtrace::Backtrace; use pico_args::Arguments; @@ -261,82 +257,6 @@ fn cli_get_miner_spend( spend_amount } -fn cli_replay_mock_mining(config_path: &str, path: &str) { - info!("Loading config at path {config_path}"); - let config = match ConfigFile::from_path(&config_path) { - Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), - Err(e) => { - warn!("Invalid config file: {e}"); - process::exit(1); - } - }; - - // Validate directory path - let dir = PathBuf::from(path); - let dir = fs::canonicalize(dir).unwrap_or_else(|e| panic!("{path} is not a valid path: {e}")); - - if !dir.is_dir() { - panic!("{path} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = dir - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {path}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = dir.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - // TODO: Actually replay block - } -} - fn main() { panic::set_hook(Box::new(|panic_info| { error!("Process abort due to thread panic: {}", panic_info); @@ -484,13 +404,6 @@ fn main() { println!("Will spend {}", spend_amount); process::exit(0); } - "replay-mock-mining" => { - let path: String = args.value_from_str("--path").unwrap(); - let config_path: String = args.value_from_str("--config").unwrap(); - args.finish(); - cli_replay_mock_mining(&config_path, &path); - process::exit(0); - } _ => { print_help(); return; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 58f3b877a62..f09287196f3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -142,7 +142,6 @@ use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::io::{Read, Write}; use std::net::SocketAddr; -use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; @@ -190,7 +189,6 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; -use stacks::util::{deserialize_json_from_file, serialize_json_to_file}; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ From f4b89f80805ed4bd39369271d75845efbfa3247d Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 12:38:56 -0400 Subject: [PATCH 236/910] refactor: Move `AssembledAnchorBlock` into stackslib --- stackslib/src/chainstate/stacks/miner.rs | 22 +++++++++++++++++++++ stackslib/src/main.rs | 10 ++++------ testnet/stacks-node/src/neon_node.rs | 25 ++---------------------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f2dfdf5dffe..c41f8b50551 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -66,6 +66,28 @@ use crate::monitoring::{ use crate::net::relay::Relayer; use crate::net::Error as net_error; +/// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was +/// linked to the burnchain and what view(s) the miner had of the burnchain before and after +/// completing the block. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AssembledAnchorBlock { + /// Consensus hash of the parent Stacks block + pub parent_consensus_hash: ConsensusHash, + /// Burnchain tip's block hash when we finished mining + pub my_burn_hash: BurnchainHeaderHash, + /// Burnchain tip's block height when we finished mining + pub my_block_height: u64, + /// Burnchain tip's block hash when we started mining (could be different) + pub orig_burn_hash: BurnchainHeaderHash, + /// The block we produced + pub anchored_block: StacksBlock, + /// The attempt count of this block (multiple blocks will be attempted per burnchain block) + pub attempt: u64, + /// Epoch timestamp in milliseconds when we started producing the block. + pub tenure_begin: u128, +} +impl_file_io_serde_json!(AssembledAnchorBlock); + /// System status for mining. /// The miner can be Ready, in which case a miner is allowed to run /// The miner can be Blocked, in which case the miner *should not start* and/or *should terminate* diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 71ec309b1b6..54c35eb7abf 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1988,14 +1988,12 @@ fn replay_mock_mining(argv: Vec) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); - eprintln!(" {n} "); + eprintln!(" {n} "); process::exit(1); }; // Process CLI args - let chainstate_path = argv - .get(2) - .unwrap_or_else(|| print_help_and_exit()); + let chainstate_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); let blocks_path = argv .get(3) @@ -2058,8 +2056,8 @@ fn replay_mock_mining(argv: Vec) { for (bh, filename) in indexed_files { let filepath = blocks_path.join(filename); - // let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - // .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); debug!("Replaying block from {filepath:?}"; "block_height" => bh, "block" => ?block diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f09287196f3..c6be468a4d9 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -167,7 +167,8 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, + signal_mining_blocked, signal_mining_ready, AssembledAnchorBlock, BlockBuilderSettings, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -234,28 +235,6 @@ pub(crate) enum MinerThreadResult { ), } -/// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was -/// linked to the burnchain and what view(s) the miner had of the burnchain before and after -/// completing the block. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AssembledAnchorBlock { - /// Consensus hash of the parent Stacks block - parent_consensus_hash: ConsensusHash, - /// Burnchain tip's block hash when we finished mining - my_burn_hash: BurnchainHeaderHash, - /// Burnchain tip's block height when we finished mining - my_block_height: u64, - /// Burnchain tip's block hash when we started mining (could be different) - orig_burn_hash: BurnchainHeaderHash, - /// The block we produced - anchored_block: StacksBlock, - /// The attempt count of this block (multiple blocks will be attempted per burnchain block) - attempt: u64, - /// Epoch timestamp in milliseconds when we started producing the block. - tenure_begin: u128, -} -impl_file_io_serde_json!(AssembledAnchorBlock); - /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { From 792e1c123889f23d42d01324cf0015b2d0d2302f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 15:21:47 -0400 Subject: [PATCH 237/910] refactor: Change args to `extract_connecting_microblocks()` --- stackslib/src/chainstate/stacks/db/blocks.rs | 22 +++++++++----------- stackslib/src/main.rs | 3 ++- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 63c22fafb68..0317db7b2f4 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5997,13 +5997,14 @@ impl StacksChainState { /// the given block. pub fn extract_connecting_microblocks( parent_block_header_info: &StacksHeaderInfo, - next_staging_block: &StagingBlock, + next_block_consensus_hash: &ConsensusHash, + next_block_hash: &BlockHeaderHash, block: &StacksBlock, mut next_microblocks: Vec, ) -> Result, Error> { // NOTE: since we got the microblocks from staging, where their signatures were already // validated, we don't need to validate them again. - let microblock_terminus = match StacksChainState::validate_parent_microblock_stream( + let Some((microblock_terminus, _)) = StacksChainState::validate_parent_microblock_stream( parent_block_header_info .anchored_header .as_stacks_epoch2() @@ -6011,15 +6012,11 @@ impl StacksChainState { &block.header, &next_microblocks, false, - ) { - Some((terminus, _)) => terminus, - None => { - debug!( - "Stopping at block {}/{} -- discontiguous header stream", - next_staging_block.consensus_hash, next_staging_block.anchored_block_hash, - ); - return Ok(vec![]); - } + ) else { + debug!( + "Stopping at block {next_block_consensus_hash}/{next_block_hash} -- discontiguous header stream" + ); + return Ok(vec![]); }; // do not consider trailing microblocks that this anchored block does _not_ confirm @@ -6214,7 +6211,8 @@ impl StacksChainState { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block, + &parent_header_info.consensus_hash, + &next_staging_block.anchored_block_hash, &block, next_microblocks, )?; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 54c35eb7abf..16a1319db21 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1701,7 +1701,8 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, &block, next_microblocks, ) From 91952ac541c1f48ab0df22a14d4c088625a371fe Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 8 Aug 2024 09:38:49 -0400 Subject: [PATCH 238/910] feat: Refactor `replay_block()` and use it to validate mock mined blocks --- stackslib/src/chainstate/stacks/block.rs | 8 + stackslib/src/chainstate/stacks/db/blocks.rs | 34 +- stackslib/src/chainstate/stacks/miner.rs | 2 + stackslib/src/main.rs | 405 ++++++++++++------- testnet/stacks-node/src/neon_node.rs | 1 + 5 files changed, 284 insertions(+), 166 deletions(-) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 6ede2bc8e68..85bfcc55768 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -651,6 +651,14 @@ impl StacksBlock { pub fn has_microblock_parent(&self) -> bool { self.header.has_microblock_parent() } + + /// Returns size in bytes of `StacksMessageCodec` representation + /// Note that this will serialize the block, so don't call if there is a better way to get block size + pub fn block_size(&self) -> Result { + let mut buf = vec![]; + self.consensus_serialize(&mut buf)?; + Ok(buf.len()) + } } impl StacksMessageCodec for StacksMicroblockHeader { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 0317db7b2f4..be05151c129 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3716,9 +3716,26 @@ impl StacksChainState { blocks_conn: &DBConn, staging_block: &StagingBlock, ) -> Result>, Error> { - if staging_block.parent_microblock_hash == EMPTY_MICROBLOCK_PARENT_HASH - && staging_block.parent_microblock_seq == 0 - { + Self::inner_find_parent_microblock_stream( + blocks_conn, + &staging_block.anchored_block_hash, + &staging_block.parent_anchored_block_hash, + &staging_block.parent_consensus_hash, + &staging_block.parent_microblock_hash, + staging_block.parent_microblock_seq, + ) + } + + /// Allow `find_parent_microblock_stream()` to be called without `StagingBlock` + pub fn inner_find_parent_microblock_stream( + blocks_conn: &DBConn, + anchored_block_hash: &BlockHeaderHash, + parent_anchored_block_hash: &BlockHeaderHash, + parent_consensus_hash: &ConsensusHash, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + ) -> Result>, Error> { + if *parent_microblock_hash == EMPTY_MICROBLOCK_PARENT_HASH && parent_microblock_seq == 0 { // no parent microblocks, ever return Ok(Some(vec![])); } @@ -3726,9 +3743,9 @@ impl StacksChainState { // find the microblock stream fork that this block confirms match StacksChainState::load_microblock_stream_fork( blocks_conn, - &staging_block.parent_consensus_hash, - &staging_block.parent_anchored_block_hash, - &staging_block.parent_microblock_hash, + parent_consensus_hash, + parent_anchored_block_hash, + parent_microblock_hash, )? { Some(microblocks) => { return Ok(Some(microblocks)); @@ -3736,10 +3753,7 @@ impl StacksChainState { None => { // parent microblocks haven't arrived yet, or there are none debug!( - "No parent microblock stream for {}: expected a stream with tail {},{}", - staging_block.anchored_block_hash, - staging_block.parent_microblock_hash, - staging_block.parent_microblock_seq + "No parent microblock stream for {anchored_block_hash}: expected a stream with tail {parent_microblock_hash},{parent_microblock_seq}", ); return Ok(None); } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index c41f8b50551..4cf3e1e65e4 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -73,6 +73,8 @@ use crate::net::Error as net_error; pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block pub parent_consensus_hash: ConsensusHash, + /// Consensus hash this Stacks block + pub consensus_hash: ConsensusHash, /// Burnchain tip's block hash when we finished mining pub my_burn_hash: BurnchainHeaderHash, /// Burnchain tip's block height when we finished mining diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 16a1319db21..fbf64655d29 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,9 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use blockstack_lib::clarity_vm::clarity::ClarityInstance; +use clarity::types::chainstate::SortitionId; +use db::ChainstateTx; use regex::Regex; use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] @@ -52,7 +55,7 @@ use blockstack_lib::burnchains::{ Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, }; use blockstack_lib::chainstate::burn::db::sortdb::{ - get_block_commit_by_txid, SortitionDB, SortitionHandle, + get_block_commit_by_txid, SortitionDB, SortitionHandle, SortitionHandleContext, }; use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -77,7 +80,7 @@ use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; -use blockstack_lib::util_lib::db::sqlite_open; +use blockstack_lib::util_lib::db::{sqlite_open, IndexDBTx}; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, util_lib}; use libstackerdb::StackerDBChunkData; @@ -882,20 +885,20 @@ simulating a miner. let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); - eprintln!(" {n} "); - eprintln!(" {n} prefix "); - eprintln!(" {n} index-range "); - eprintln!(" {n} range "); - eprintln!(" {n} "); + eprintln!(" {n} "); + eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); + eprintln!(" {n} range "); + eprintln!(" {n} "); process::exit(1); }; if argv.len() < 2 { print_help_and_exit(); } let start = Instant::now(); - let stacks_path = &argv[2]; + let db_path = &argv[2]; let mode = argv.get(3).map(String::as_str); - let staging_blocks_db_path = format!("{stacks_path}/mainnet/chainstate/vm/index.sqlite"); + let staging_blocks_db_path = format!("{db_path}/mainnet/chainstate/vm/index.sqlite"); let conn = Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) .unwrap(); @@ -913,7 +916,7 @@ simulating a miner. let arg4 = argv[4] .parse::() .expect(" not a valid u64"); - let arg5 = argv[5].parse::().expect(" not a valid u64"); + let arg5 = argv[5].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") @@ -922,7 +925,7 @@ simulating a miner. let start = argv[4] .parse::() .expect(" not a valid u64"); - let end = argv[5].parse::().expect(" not a valid u64"); + let end = argv[5].parse::().expect(" not a valid u64"); let blocks = end.saturating_sub(start); format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") } @@ -949,7 +952,7 @@ simulating a miner. if i % 100 == 0 { println!("Checked {i}..."); } - replay_block(stacks_path, index_block_hash); + replay_staging_block(db_path, index_block_hash); } println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); process::exit(0); @@ -1590,11 +1593,94 @@ simulating a miner. process::exit(0); } -fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { - let index_block_hash = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); - let chain_state_path = format!("{stacks_path}/mainnet/chainstate/"); - let sort_db_path = format!("{stacks_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{stacks_path}/mainnet/burnchain/burnchain.sqlite"); +fn replay_mock_mining(argv: Vec) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let db_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(3) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + replay_mock_mined_block(&db_path, block); + } +} + +/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { + let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); + let chain_state_path = format!("{db_path}/mainnet/chainstate/"); + let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); + let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); let (mut chainstate, _) = @@ -1611,14 +1697,14 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { true, ) .unwrap(); - let mut sort_tx = sortdb.tx_begin_at_tip(); + let sort_tx = sortdb.tx_begin_at_tip(); let blocks_path = chainstate.blocks_path.clone(); let (mut chainstate_tx, clarity_instance) = chainstate .chainstate_tx_begin() .expect("Failed to start chainstate tx"); let mut next_staging_block = - StacksChainState::load_staging_block_info(&chainstate_tx.tx, &index_block_hash) + StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) .expect("Failed to load staging block data") .expect("No such index block hash in block database"); @@ -1630,21 +1716,139 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { .unwrap() .unwrap_or_default(); - let Some(next_microblocks) = - StacksChainState::find_parent_microblock_stream(&chainstate_tx.tx, &next_staging_block) - .unwrap() + let Some(parent_header_info) = + StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() else { - println!("No microblock stream found for {index_block_hash_hex}"); + println!("Failed to load parent head info for block: {index_block_hash_hex}"); + return; + }; + + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); + let block_size = next_staging_block.block_data.len() as u64; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &next_staging_block.parent_microblock_hash, + next_staging_block.parent_microblock_seq, + &block_id, + &block, + block_size, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + next_staging_block.commit_burn, + next_staging_block.sortition_burn, + ); +} + +/// Process a mock mined block and call `replay_block()` to validate +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { + let chain_state_path = format!("{db_path}/mainnet/chainstate/"); + let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); + let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + + let block_consensus_hash = &block.consensus_hash; + let block_hash = block.anchored_block.block_hash(); + let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); + let block_size = block + .anchored_block + .block_size() + .map(u64::try_from) + .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) + .expect("u64 overflow"); + + let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( + &mut chainstate_tx, + &block.parent_consensus_hash, + &block.anchored_block.header.parent_block, + ) + .unwrap() else { + println!("Failed to load parent head info for block: {block_hash}"); + return; + }; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &block.anchored_block.header.parent_microblock, + block.anchored_block.header.parent_microblock_sequence, + &block_id, + &block.anchored_block, + block_size, + block_consensus_hash, + &block_hash, + // I think the burn is used for miner rewards but not necessary for validation + 0, + 0, + ); +} + +/// Validate a block against chainstate +fn replay_block( + mut sort_tx: IndexDBTx, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + burnchain_blocks_db: &BurnchainDB, + parent_header_info: &StacksHeaderInfo, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + block_id: &StacksBlockId, + block: &StacksBlock, + block_size: u64, + block_consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + block_commit_burn: u64, + block_sortition_burn: u64, +) { + let parent_block_header = match &parent_header_info.anchored_header { + StacksBlockHeaderTypes::Epoch2(bh) => bh, + StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), + }; + let parent_block_hash = parent_block_header.block_hash(); + + let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( + &chainstate_tx.tx, + &block_hash, + &parent_block_hash, + &parent_header_info.consensus_hash, + parent_microblock_hash, + parent_microblock_seq, + ) + .unwrap() else { + println!("No microblock stream found for {block_id}"); return; }; let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = - match SortitionDB::get_block_snapshot_consensus( - &sort_tx, - &next_staging_block.consensus_hash, - ) - .unwrap() - { + match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { Some(sn) => ( sn.burn_header_hash, sn.block_height as u32, @@ -1653,42 +1857,19 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { ), None => { // shouldn't happen - panic!( - "CORRUPTION: staging block {}/{} does not correspond to a burn block", - &next_staging_block.consensus_hash, &next_staging_block.anchored_block_hash - ); + panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); } }; info!( "Process block {}/{} = {} in burn block {}, parent microblock {}", - next_staging_block.consensus_hash, - next_staging_block.anchored_block_hash, - &index_block_hash, - &burn_header_hash, - &next_staging_block.parent_microblock_hash, + block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, ); - let Some(parent_header_info) = - StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() - else { - println!("Failed to load parent head info for block: {index_block_hash_hex}"); - return; - }; - - let block = - StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); - let block_size = next_staging_block.block_data.len() as u64; - - let parent_block_header = match &parent_header_info.anchored_header { - StacksBlockHeaderTypes::Epoch2(bh) => bh, - StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), - }; - if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { let msg = format!( "Invalid stacks block {}/{} -- does not attach to parent {}/{}", - &next_staging_block.consensus_hash, + &block_consensus_hash, block.block_hash(), parent_block_header.block_hash(), &parent_header_info.consensus_hash @@ -1701,9 +1882,9 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - &block, + &block_consensus_hash, + &block_hash, + block, next_microblocks, ) .unwrap(); @@ -1717,20 +1898,14 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { ) } }; - assert_eq!( - next_staging_block.parent_microblock_hash, - last_microblock_hash - ); - assert_eq!( - next_staging_block.parent_microblock_seq, - last_microblock_seq - ); + assert_eq!(*parent_microblock_hash, last_microblock_hash); + assert_eq!(parent_microblock_seq, last_microblock_seq); let block_am = StacksChainState::find_stacks_tip_affirmation_map( - &burnchain_blocks_db, + burnchain_blocks_db, sort_tx.tx(), - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, + block_consensus_hash, + block_hash, ) .unwrap(); @@ -1742,23 +1917,23 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { &mut sort_tx, &pox_constants, &parent_header_info, - &next_staging_block.consensus_hash, + block_consensus_hash, &burn_header_hash, burn_header_height, burn_header_timestamp, &block, block_size, &next_microblocks, - next_staging_block.commit_burn, - next_staging_block.sortition_burn, + block_commit_burn, + block_sortition_burn, block_am.weight(), true, ) { Ok((_receipt, _, _)) => { - info!("Block processed successfully! block = {index_block_hash}"); + info!("Block processed successfully! block = {block_id}"); } Err(e) => { - println!("Failed processing block! block = {index_block_hash}, error = {e:?}"); + println!("Failed processing block! block = {block_id}, error = {e:?}"); process::exit(1); } }; @@ -1984,85 +2159,3 @@ fn analyze_sortition_mev(argv: Vec) { process::exit(0); } - -fn replay_mock_mining(argv: Vec) { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let chainstate_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); - - let blocks_path = argv - .get(3) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(|| print_help_and_exit()); - - // Validate directory path - if !blocks_path.is_dir() { - panic!("{blocks_path:?} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = blocks_path - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = blocks_path.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - // TODO: Actually replay block - } -} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c6be468a4d9..fa9cc4ad357 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2556,6 +2556,7 @@ impl BlockMinerThread { let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, + consensus_hash: cur_burn_chain_tip.consensus_hash, my_burn_hash: cur_burn_chain_tip.burn_header_hash, my_block_height: cur_burn_chain_tip.block_height, orig_burn_hash: self.burn_block.burn_header_hash, From 29ee7ed425b27bcf076625b25129ef88a596a7d9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 8 Aug 2024 16:39:15 -0400 Subject: [PATCH 239/910] test: Add partial integration test for mock miner replay --- .github/workflows/bitcoin-tests.yml | 1 + stacks-common/src/types/chainstate.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 4 +- testnet/stacks-node/src/neon_node.rs | 86 +++++---- .../src/tests/neon_integrations.rs | 179 +++++++++++++++++- 5 files changed, 229 insertions(+), 43 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a5..3fde40ae47e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -72,6 +72,7 @@ jobs: - tests::neon_integrations::confirm_unparsed_ongoing_ops - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test + - tests::neon_integrations::mock_miner_replay - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 47d6c3c499b..c5208d02f99 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,11 +30,11 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; -#[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); impl_array_hexstring_fmt!(BurnchainHeaderHash); impl_byte_array_newtype!(BurnchainHeaderHash, u8, 32); +impl_byte_array_serde!(BurnchainHeaderHash); pub struct BlockHeaderHash(pub [u8; 32]); impl_array_newtype!(BlockHeaderHash, u8, 32); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 4cf3e1e65e4..f0e4c96307f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -76,9 +76,9 @@ pub struct AssembledAnchorBlock { /// Consensus hash this Stacks block pub consensus_hash: ConsensusHash, /// Burnchain tip's block hash when we finished mining - pub my_burn_hash: BurnchainHeaderHash, + pub burn_hash: BurnchainHeaderHash, /// Burnchain tip's block height when we finished mining - pub my_block_height: u64, + pub burn_block_height: u64, /// Burnchain tip's block hash when we started mining (could be different) pub orig_burn_hash: BurnchainHeaderHash, /// The block we produced diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fa9cc4ad357..afef51f47c1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,7 +140,7 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::io::{Read, Write}; +use std::io::{ErrorKind, Read, Write}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; @@ -1123,7 +1123,7 @@ impl BlockMinerThread { ) -> Vec<&AssembledAnchorBlock> { let mut ret = vec![]; for (_, (assembled_block, _)) in last_mined_blocks.iter() { - if assembled_block.my_block_height >= burn_height { + if assembled_block.burn_block_height >= burn_height { ret.push(assembled_block); } } @@ -1633,7 +1633,7 @@ impl BlockMinerThread { &prev_block.anchored_block.block_hash(), &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, - &prev_block.my_burn_hash, + &prev_block.burn_hash, &prev_block.anchored_block.txs.len() ); max_txs = cmp::max(max_txs, prev_block.anchored_block.txs.len()); @@ -1645,7 +1645,7 @@ impl BlockMinerThread { continue; } if prev_block.parent_consensus_hash == *parent_consensus_hash - && prev_block.my_burn_hash == self.burn_block.burn_header_hash + && prev_block.burn_hash == self.burn_block.burn_header_hash && prev_block.anchored_block.header.parent_block == stacks_parent_header.anchored_header.block_hash() { @@ -1677,7 +1677,7 @@ impl BlockMinerThread { // already have. info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1688,7 +1688,7 @@ impl BlockMinerThread { // fee minus the old BTC fee info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } @@ -1697,20 +1697,20 @@ impl BlockMinerThread { // no microblock stream to confirm, and the stacks tip hasn't changed info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); return None; } } } else { - if self.burn_block.burn_header_hash == prev_block.my_burn_hash { + if self.burn_block.burn_header_hash == prev_block.burn_hash { // only try and re-mine if there was no sortition since the last chain tip info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.my_burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.my_burn_hash); + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } } @@ -2554,34 +2554,48 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); + if res.is_none() { + self.failed_to_submit_last_attempt = true; + if !mock_mining { + warn!("Relayer: Failed to submit Bitcoin transaction"); + return None; + } + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + } else { + self.failed_to_submit_last_attempt = false; + } + let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, consensus_hash: cur_burn_chain_tip.consensus_hash, - my_burn_hash: cur_burn_chain_tip.burn_header_hash, - my_block_height: cur_burn_chain_tip.block_height, + burn_hash: cur_burn_chain_tip.burn_header_hash, + burn_block_height: cur_burn_chain_tip.block_height, orig_burn_hash: self.burn_block.burn_header_hash, anchored_block, attempt, tenure_begin, }; - if res.is_none() { - self.failed_to_submit_last_attempt = true; - if mock_mining { - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - if let Some(dir) = mock_mining_output_dir { - let stacks_block_height = assembled_block.anchored_block.header.total_work.work; - let filename = format!("{stacks_block_height}.json"); - let filepath = dir.join(filename); - assembled_block - .serialize_to_file(&filepath) - .unwrap_or_else(|e| panic!("Failed to write to file '{filepath:?}': {e}")); - } - } else { - warn!("Relayer: Failed to submit Bitcoin transaction"); - return None; + + if mock_mining { + let stacks_block_height = assembled_block.anchored_block.header.total_work.work; + info!("Mock mined Stacks block {stacks_block_height}"); + if let Some(dir) = mock_mining_output_dir { + info!("Writing mock mined Stacks block {stacks_block_height} to file"); + fs::create_dir_all(&dir).unwrap_or_else(|e| match e.kind() { + ErrorKind::AlreadyExists => { /* This is fine */ } + _ => error!("Failed to create directory '{dir:?}': {e}"), + }); + let filename = format!("{stacks_block_height}.json"); + let filepath = dir.join(filename); + assembled_block + .serialize_to_file(&filepath) + .unwrap_or_else(|e| match e.kind() { + ErrorKind::AlreadyExists => { + error!("Failed to overwrite file '{filepath:?}'") + } + _ => error!("Failed to write to file '{filepath:?}': {e}"), + }); } - } else { - self.failed_to_submit_last_attempt = false; } Some(MinerThreadResult::Block( @@ -3010,7 +3024,7 @@ impl RelayerThread { let AssembledAnchorBlock { parent_consensus_hash, anchored_block: mined_block, - my_burn_hash: mined_burn_hash, + burn_hash: mined_burn_hash, attempt: _, .. } = last_mined_block_data; @@ -3423,16 +3437,16 @@ impl RelayerThread { fn clear_stale_mined_blocks(burn_height: u64, last_mined_blocks: MinedBlocks) -> MinedBlocks { let mut ret = HashMap::new(); for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { - if assembled_block.my_block_height < burn_height { + if assembled_block.burn_block_height < burn_height { debug!( "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height + &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height + &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3760,7 +3774,7 @@ impl RelayerThread { ) => { // finished mining a block if BlockMinerThread::find_inflight_mined_blocks( - last_mined_block.my_block_height, + last_mined_block.burn_block_height, &self.last_mined_blocks, ) .len() @@ -3769,7 +3783,7 @@ impl RelayerThread { // first time we've mined a block in this burnchain block debug!( "Bump block processed for burnchain block {}", - &last_mined_block.my_block_height + &last_mined_block.burn_block_height ); self.globals.counters.bump_blocks_processed(); } @@ -3779,7 +3793,7 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.my_burn_hash.clone(); + let bhh = last_mined_block.burn_hash.clone(); let orig_bhh = last_mined_block.orig_burn_hash.clone(); let tenure_begin = last_mined_block.tenure_begin; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978c..4474a9e9918 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1,5 +1,5 @@ use std::collections::{HashMap, HashSet}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; @@ -12389,6 +12389,7 @@ fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, follower_blocks_processed: &[&Arc], + timeout: Option, ) -> bool { let followers_current: Vec<_> = follower_blocks_processed .iter() @@ -12400,17 +12401,26 @@ fn next_block_and_wait_all( } // wait for followers to catch up + let timer = Instant::now(); loop { let finished = follower_blocks_processed .iter() .zip(followers_current.iter()) - .map(|(blocks_processed, current)| blocks_processed.load(Ordering::SeqCst) <= *current) - .fold(true, |acc, loaded| acc && loaded); + .map(|(blocks_processed, start_count)| { + blocks_processed.load(Ordering::SeqCst) > *start_count + }) + .all(|b| b); if finished { break; } + if let Some(t) = timeout { + if timer.elapsed() > t { + panic!("next_block_and_wait_all() timed out after {t:?}") + } + } + thread::sleep(Duration::from_millis(100)); } @@ -12425,6 +12435,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); + let timeout = None; let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12485,13 +12496,19 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all(&mut btc_regtest_controller, &miner_blocks_processed, &[]); + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[], + timeout, + ); // first block will hold our VRF registration next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, &[&follower_blocks_processed], + timeout, ); let mut miner_sort_height = miner_channel.get_sortitions_processed(); @@ -12506,6 +12523,7 @@ fn bitcoin_reorg_flap_with_follower() { &mut btc_regtest_controller, &miner_blocks_processed, &[&follower_blocks_processed], + timeout, ); miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); @@ -12583,3 +12601,156 @@ fn bitcoin_reorg_flap_with_follower() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +/// Tests the following: +/// - Mock miner output to file +/// - Test replay of mock mined blocks using `stacks-inspect replay-mock-mining`` +#[test] +#[ignore] +fn mock_miner_replay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let timeout = Some(Duration::from_secs(30)); + // Had to add this so that mock miner makes an attempt on EVERY block + let block_gap = Duration::from_secs(1); + + let test_dir = PathBuf::from("/tmp/stacks-integration-test-mock_miner_replay"); + _ = fs::remove_dir_all(&test_dir); + + let (conf, _miner_account) = neon_integration_test_conf(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); + let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); + + let mut follower_conf = conf.clone(); + follower_conf.events_observers.clear(); + follower_conf.node.mock_mining = true; + follower_conf.node.mock_mining_output_dir = Some(test_dir.join("mock-miner-output")); + follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b { + break (a, b); + } + }; + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + + thread::spawn(move || miner_run_loop.start(None, 0)); + wait_for_runloop(&miner_blocks_processed); + + // figure out the started node's port + let node_info = get_chain_info(&conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); + let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); + + let miner_blocks_processed_start = miner_channel.get_stacks_blocks_processed(); + let follower_blocks_processed_start = follower_channel.get_stacks_blocks_processed(); + + thread::spawn(move || follower_run_loop.start(None, 0)); + wait_for_runloop(&follower_blocks_processed); + + eprintln!("Follower bootup complete!"); + + // first block wakes up the run loop + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[], + timeout, + ); + + thread::sleep(block_gap); + + // first block will hold our VRF registration + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + + thread::sleep(block_gap); + + // Third block will be the first mined Stacks block. + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + + thread::sleep(block_gap); + + // ---------- Setup finished, start test ---------- + + // Mine some blocks for mock miner output + for _ in 0..10 { + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + thread::sleep(block_gap); + } + + let miner_blocks_processed_end = miner_channel.get_stacks_blocks_processed(); + let follower_blocks_processed_end = follower_channel.get_stacks_blocks_processed(); + + let blocks_dir = follower_conf.node.mock_mining_output_dir.clone().unwrap(); + let file_count = follower_conf + .node + .mock_mining_output_dir + .unwrap() + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) + .count(); + + // Check that expected output files exist + assert!(test_dir.is_dir()); + assert!(blocks_dir.is_dir()); + assert_eq!(file_count, 12); + assert_eq!(miner_blocks_processed_end, follower_blocks_processed_end); + + // ---------- Test finished, clean up ---------- + + btcd_controller.stop_bitcoind().unwrap(); + miner_channel.stop_chains_coordinator(); + follower_channel.stop_chains_coordinator(); +} From 674cb5f3aa7c9d544da43f41528a6d2bf94ba94f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 12 Aug 2024 23:06:56 -0400 Subject: [PATCH 240/910] refactor: Move parts of stackslib CLI out of main.rs for integration testing --- stackslib/src/cli.rs | 471 ++++++++++++++++++ stackslib/src/lib.rs | 9 +- stackslib/src/main.rs | 466 +---------------- .../src/tests/neon_integrations.rs | 22 +- 4 files changed, 508 insertions(+), 460 deletions(-) create mode 100644 stackslib/src/cli.rs diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs new file mode 100644 index 00000000000..27efb44bb99 --- /dev/null +++ b/stackslib/src/cli.rs @@ -0,0 +1,471 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Subcommands used by `stacks-inspect` binary + +use std::path::PathBuf; +use std::time::Instant; +use std::{env, fs, io, process, thread}; + +use clarity::types::chainstate::SortitionId; +use db::ChainstateTx; +use regex::Regex; +use rusqlite::{Connection, OpenFlags}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; + +use crate::burnchains::db::BurnchainDB; +use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleContext}; +use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use crate::chainstate::stacks::db::blocks::StagingBlock; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::miner::*; +use crate::chainstate::stacks::*; +use crate::clarity_vm::clarity::ClarityInstance; +use crate::core::*; +use crate::util_lib::db::IndexDBTx; + +/// Replay blocks from chainstate database +/// Takes args in CLI format: ` [args...]` +/// Terminates on error using `process::exit()` +pub fn command_replay_block(argv: &[String]) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); + eprintln!(" {n} range "); + eprintln!(" {n} "); + process::exit(1); + }; + let start = Instant::now(); + let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); + let mode = argv.get(2).map(String::as_str); + let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite"); + let conn = + Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) + .unwrap(); + + let query = match mode { + Some("prefix") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", + argv[3] + ), + Some("first") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", + argv[3] + ), + Some("range") => { + let arg4 = argv[3] + .parse::() + .expect(" not a valid u64"); + let arg5 = argv[4].parse::().expect(" not a valid u64"); + let start = arg4.saturating_sub(1); + let blocks = arg5.saturating_sub(arg4); + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + } + Some("index-range") => { + let start = argv[3] + .parse::() + .expect(" not a valid u64"); + let end = argv[4].parse::().expect(" not a valid u64"); + let blocks = end.saturating_sub(start); + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + } + Some("last") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", + argv[3] + ), + Some(_) => print_help_and_exit(), + // Default to ALL blocks + None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), + }; + + let mut stmt = conn.prepare(&query).unwrap(); + let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); + + let mut index_block_hashes: Vec = vec![]; + while let Ok(Some(row)) = hashes_set.next() { + index_block_hashes.push(row.get(0).unwrap()); + } + + let total = index_block_hashes.len(); + println!("Will check {total} blocks"); + for (i, index_block_hash) in index_block_hashes.iter().enumerate() { + if i % 100 == 0 { + println!("Checked {i}..."); + } + replay_staging_block(db_path, index_block_hash); + } + println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); +} + +/// Replay mock mined blocks from JSON files +/// Takes args in CLI format: ` [args...]` +/// Terminates on error using `process::exit()` +pub fn command_replay_mock_mining(argv: &[String]) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(2) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+)\.json$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + debug!("Regex capture failed on {filename}"); + return None; + }; + // cap.get(0) return entire filename + // cap.get(1) return block number + let i = 1; + let Some(m) = cap.get(i) else { + debug!("cap.get({i}) failed on {filename} match"); + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + debug!("parse::() failed on '{}'", m.as_str()); + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found in {blocks_path:?}"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + info!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + replay_mock_mined_block(&db_path, block); + } +} + +/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { + let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); + let chain_state_path = format!("{db_path}/chainstate/"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let blocks_path = chainstate.blocks_path.clone(); + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + let mut next_staging_block = + StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) + .expect("Failed to load staging block data") + .expect("No such index block hash in block database"); + + next_staging_block.block_data = StacksChainState::load_block_bytes( + &blocks_path, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + ) + .unwrap() + .unwrap_or_default(); + + let Some(parent_header_info) = + StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() + else { + println!("Failed to load parent head info for block: {index_block_hash_hex}"); + return; + }; + + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); + let block_size = next_staging_block.block_data.len() as u64; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &next_staging_block.parent_microblock_hash, + next_staging_block.parent_microblock_seq, + &block_id, + &block, + block_size, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + next_staging_block.commit_burn, + next_staging_block.sortition_burn, + ); +} + +/// Process a mock mined block and call `replay_block()` to validate +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { + let chain_state_path = format!("{db_path}/chainstate/"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + + let block_consensus_hash = &block.consensus_hash; + let block_hash = block.anchored_block.block_hash(); + let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); + let block_size = block + .anchored_block + .block_size() + .map(u64::try_from) + .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) + .expect("u64 overflow"); + + let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( + &mut chainstate_tx, + &block.parent_consensus_hash, + &block.anchored_block.header.parent_block, + ) + .unwrap() else { + println!("Failed to load parent head info for block: {block_hash}"); + return; + }; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &block.anchored_block.header.parent_microblock, + block.anchored_block.header.parent_microblock_sequence, + &block_id, + &block.anchored_block, + block_size, + block_consensus_hash, + &block_hash, + // I think the burn is used for miner rewards but not necessary for validation + 0, + 0, + ); +} + +/// Validate a block against chainstate +fn replay_block( + mut sort_tx: IndexDBTx, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + burnchain_blocks_db: &BurnchainDB, + parent_header_info: &StacksHeaderInfo, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + block_id: &StacksBlockId, + block: &StacksBlock, + block_size: u64, + block_consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + block_commit_burn: u64, + block_sortition_burn: u64, +) { + let parent_block_header = match &parent_header_info.anchored_header { + StacksBlockHeaderTypes::Epoch2(bh) => bh, + StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), + }; + let parent_block_hash = parent_block_header.block_hash(); + + let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( + &chainstate_tx.tx, + &block_hash, + &parent_block_hash, + &parent_header_info.consensus_hash, + parent_microblock_hash, + parent_microblock_seq, + ) + .unwrap() else { + println!("No microblock stream found for {block_id}"); + return; + }; + + let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = + match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { + Some(sn) => ( + sn.burn_header_hash, + sn.block_height as u32, + sn.burn_header_timestamp, + sn.winning_block_txid, + ), + None => { + // shouldn't happen + panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); + } + }; + + info!( + "Process block {}/{} = {} in burn block {}, parent microblock {}", + block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, + ); + + if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { + let msg = format!( + "Invalid stacks block {}/{} -- does not attach to parent {}/{}", + &block_consensus_hash, + block.block_hash(), + parent_block_header.block_hash(), + &parent_header_info.consensus_hash + ); + println!("{msg}"); + return; + } + + // validation check -- validate parent microblocks and find the ones that connect the + // block's parent to this block. + let next_microblocks = StacksChainState::extract_connecting_microblocks( + &parent_header_info, + &block_consensus_hash, + &block_hash, + block, + next_microblocks, + ) + .unwrap(); + let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() { + 0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0), + _ => { + let l = next_microblocks.len(); + ( + next_microblocks[l - 1].block_hash(), + next_microblocks[l - 1].header.sequence, + ) + } + }; + assert_eq!(*parent_microblock_hash, last_microblock_hash); + assert_eq!(parent_microblock_seq, last_microblock_seq); + + let block_am = StacksChainState::find_stacks_tip_affirmation_map( + burnchain_blocks_db, + sort_tx.tx(), + block_consensus_hash, + block_hash, + ) + .unwrap(); + + let pox_constants = sort_tx.context.pox_constants.clone(); + + match StacksChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut sort_tx, + &pox_constants, + &parent_header_info, + block_consensus_hash, + &burn_header_hash, + burn_header_height, + burn_header_timestamp, + &block, + block_size, + &next_microblocks, + block_commit_burn, + block_sortition_burn, + block_am.weight(), + true, + ) { + Ok((_receipt, _, _)) => { + info!("Block processed successfully! block = {block_id}"); + } + Err(e) => { + println!("Failed processing block! block = {block_id}, error = {e:?}"); + process::exit(1); + } + }; +} diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index bd634cef647..31f97628a6e 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -59,18 +59,15 @@ pub extern crate libstackerdb; pub mod chainstate; pub mod burnchains; - +pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; +pub mod cli; pub mod core; +pub mod cost_estimates; pub mod deps; - pub mod monitoring; -pub mod cost_estimates; - -pub mod clarity_cli; - // set via _compile-time_ envars const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index fbf64655d29..479bfaa9b74 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused_imports)] #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -23,14 +22,9 @@ #[macro_use] extern crate stacks_common; -#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] +#[macro_use(slog_debug, slog_info, slog_warn)] extern crate slog; -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use clarity::types::chainstate::SortitionId; -use db::ChainstateTx; -use regex::Regex; -use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -39,23 +33,16 @@ use tikv_jemallocator::Jemalloc; static GLOBAL: Jemalloc = Jemalloc; use std::collections::{BTreeMap, HashMap, HashSet}; -use std::fs::{File, OpenOptions}; +use std::fs::File; use std::io::prelude::*; use std::io::BufReader; -use std::path::PathBuf; -use std::time::Instant; use std::{env, fs, io, process, thread}; -use blockstack_lib::burnchains::bitcoin::indexer::{ - BitcoinIndexer, BitcoinIndexerConfig, BitcoinIndexerRuntime, -}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; use blockstack_lib::burnchains::db::{BurnchainBlockData, BurnchainDB}; -use blockstack_lib::burnchains::{ - Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, -}; +use blockstack_lib::burnchains::{Address, Burnchain, PoxConstants}; use blockstack_lib::chainstate::burn::db::sortdb::{ - get_block_commit_by_txid, SortitionDB, SortitionHandle, SortitionHandleContext, + get_block_commit_by_txid, SortitionDB, SortitionHandle, }; use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -63,7 +50,7 @@ use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewa use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ - ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, + ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, }; use blockstack_lib::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use blockstack_lib::chainstate::stacks::index::ClarityMarfTrieId; @@ -72,7 +59,6 @@ use blockstack_lib::chainstate::stacks::{StacksBlockHeader, *}; use blockstack_lib::clarity::vm::costs::ExecutionCost; use blockstack_lib::clarity::vm::types::StacksAddressExtensions; use blockstack_lib::clarity::vm::ClarityVersion; -use blockstack_lib::clarity_cli::vm_execute; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; @@ -80,24 +66,24 @@ use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; -use blockstack_lib::util_lib::db::{sqlite_open, IndexDBTx}; +use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; -use blockstack_lib::{clarity_cli, util_lib}; +use blockstack_lib::{clarity_cli, cli}; use libstackerdb::StackerDBChunkData; -use rusqlite::types::ToSql; use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde_json::{json, Value}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::net::PeerAddress; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{get_epoch_time_ms, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -881,83 +867,6 @@ simulating a miner. return; } - if argv[1] == "replay-block" { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - eprintln!(" {n} prefix "); - eprintln!(" {n} index-range "); - eprintln!(" {n} range "); - eprintln!(" {n} "); - process::exit(1); - }; - if argv.len() < 2 { - print_help_and_exit(); - } - let start = Instant::now(); - let db_path = &argv[2]; - let mode = argv.get(3).map(String::as_str); - let staging_blocks_db_path = format!("{db_path}/mainnet/chainstate/vm/index.sqlite"); - let conn = - Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) - .unwrap(); - - let query = match mode { - Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[4] - ), - Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[4] - ), - Some("range") => { - let arg4 = argv[4] - .parse::() - .expect(" not a valid u64"); - let arg5 = argv[5].parse::().expect(" not a valid u64"); - let start = arg4.saturating_sub(1); - let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") - } - Some("index-range") => { - let start = argv[4] - .parse::() - .expect(" not a valid u64"); - let end = argv[5].parse::().expect(" not a valid u64"); - let blocks = end.saturating_sub(start); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") - } - Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[4] - ), - Some(_) => print_help_and_exit(), - // Default to ALL blocks - None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), - }; - - let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); - - let mut index_block_hashes: Vec = vec![]; - while let Ok(Some(row)) = hashes_set.next() { - index_block_hashes.push(row.get(0).unwrap()); - } - - let total = index_block_hashes.len(); - println!("Will check {total} blocks"); - for (i, index_block_hash) in index_block_hashes.iter().enumerate() { - if i % 100 == 0 { - println!("Checked {i}..."); - } - replay_staging_block(db_path, index_block_hash); - } - println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); - process::exit(0); - } - if argv[1] == "deserialize-db" { if argv.len() < 4 { eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]); @@ -1345,8 +1254,13 @@ simulating a miner. return; } + if argv[1] == "replay-block" { + cli::command_replay_block(&argv[1..]); + process::exit(0); + } + if argv[1] == "replay-mock-mining" { - replay_mock_mining(argv); + cli::command_replay_mock_mining(&argv[1..]); process::exit(0); } @@ -1357,7 +1271,7 @@ simulating a miner. } #[cfg_attr(test, mutants::skip)] -fn tip_mine() { +pub fn tip_mine() { let argv: Vec = env::args().collect(); if argv.len() < 6 { eprintln!( @@ -1593,352 +1507,6 @@ simulating a miner. process::exit(0); } -fn replay_mock_mining(argv: Vec) { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let db_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); - - let blocks_path = argv - .get(3) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(|| print_help_and_exit()); - - // Validate directory path - if !blocks_path.is_dir() { - panic!("{blocks_path:?} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = blocks_path - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = blocks_path.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - replay_mock_mined_block(&db_path, block); - } -} - -/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { - let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); - let chain_state_path = format!("{db_path}/mainnet/chainstate/"); - let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); - let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - - let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - - let mut sortdb = SortitionDB::connect( - &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), - None, - true, - ) - .unwrap(); - let sort_tx = sortdb.tx_begin_at_tip(); - - let blocks_path = chainstate.blocks_path.clone(); - let (mut chainstate_tx, clarity_instance) = chainstate - .chainstate_tx_begin() - .expect("Failed to start chainstate tx"); - let mut next_staging_block = - StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) - .expect("Failed to load staging block data") - .expect("No such index block hash in block database"); - - next_staging_block.block_data = StacksChainState::load_block_bytes( - &blocks_path, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - ) - .unwrap() - .unwrap_or_default(); - - let Some(parent_header_info) = - StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() - else { - println!("Failed to load parent head info for block: {index_block_hash_hex}"); - return; - }; - - let block = - StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); - let block_size = next_staging_block.block_data.len() as u64; - - replay_block( - sort_tx, - chainstate_tx, - clarity_instance, - &burnchain_blocks_db, - &parent_header_info, - &next_staging_block.parent_microblock_hash, - next_staging_block.parent_microblock_seq, - &block_id, - &block, - block_size, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - next_staging_block.commit_burn, - next_staging_block.sortition_burn, - ); -} - -/// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { - let chain_state_path = format!("{db_path}/mainnet/chainstate/"); - let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); - let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - - let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - - let mut sortdb = SortitionDB::connect( - &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), - None, - true, - ) - .unwrap(); - let sort_tx = sortdb.tx_begin_at_tip(); - - let (mut chainstate_tx, clarity_instance) = chainstate - .chainstate_tx_begin() - .expect("Failed to start chainstate tx"); - - let block_consensus_hash = &block.consensus_hash; - let block_hash = block.anchored_block.block_hash(); - let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); - let block_size = block - .anchored_block - .block_size() - .map(u64::try_from) - .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) - .expect("u64 overflow"); - - let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( - &mut chainstate_tx, - &block.parent_consensus_hash, - &block.anchored_block.header.parent_block, - ) - .unwrap() else { - println!("Failed to load parent head info for block: {block_hash}"); - return; - }; - - replay_block( - sort_tx, - chainstate_tx, - clarity_instance, - &burnchain_blocks_db, - &parent_header_info, - &block.anchored_block.header.parent_microblock, - block.anchored_block.header.parent_microblock_sequence, - &block_id, - &block.anchored_block, - block_size, - block_consensus_hash, - &block_hash, - // I think the burn is used for miner rewards but not necessary for validation - 0, - 0, - ); -} - -/// Validate a block against chainstate -fn replay_block( - mut sort_tx: IndexDBTx, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, - burnchain_blocks_db: &BurnchainDB, - parent_header_info: &StacksHeaderInfo, - parent_microblock_hash: &BlockHeaderHash, - parent_microblock_seq: u16, - block_id: &StacksBlockId, - block: &StacksBlock, - block_size: u64, - block_consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - block_commit_burn: u64, - block_sortition_burn: u64, -) { - let parent_block_header = match &parent_header_info.anchored_header { - StacksBlockHeaderTypes::Epoch2(bh) => bh, - StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), - }; - let parent_block_hash = parent_block_header.block_hash(); - - let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( - &chainstate_tx.tx, - &block_hash, - &parent_block_hash, - &parent_header_info.consensus_hash, - parent_microblock_hash, - parent_microblock_seq, - ) - .unwrap() else { - println!("No microblock stream found for {block_id}"); - return; - }; - - let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = - match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { - Some(sn) => ( - sn.burn_header_hash, - sn.block_height as u32, - sn.burn_header_timestamp, - sn.winning_block_txid, - ), - None => { - // shouldn't happen - panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); - } - }; - - info!( - "Process block {}/{} = {} in burn block {}, parent microblock {}", - block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, - ); - - if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { - let msg = format!( - "Invalid stacks block {}/{} -- does not attach to parent {}/{}", - &block_consensus_hash, - block.block_hash(), - parent_block_header.block_hash(), - &parent_header_info.consensus_hash - ); - println!("{msg}"); - return; - } - - // validation check -- validate parent microblocks and find the ones that connect the - // block's parent to this block. - let next_microblocks = StacksChainState::extract_connecting_microblocks( - &parent_header_info, - &block_consensus_hash, - &block_hash, - block, - next_microblocks, - ) - .unwrap(); - let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() { - 0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0), - _ => { - let l = next_microblocks.len(); - ( - next_microblocks[l - 1].block_hash(), - next_microblocks[l - 1].header.sequence, - ) - } - }; - assert_eq!(*parent_microblock_hash, last_microblock_hash); - assert_eq!(parent_microblock_seq, last_microblock_seq); - - let block_am = StacksChainState::find_stacks_tip_affirmation_map( - burnchain_blocks_db, - sort_tx.tx(), - block_consensus_hash, - block_hash, - ) - .unwrap(); - - let pox_constants = sort_tx.context.pox_constants.clone(); - - match StacksChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sort_tx, - &pox_constants, - &parent_header_info, - block_consensus_hash, - &burn_header_hash, - burn_header_height, - burn_header_timestamp, - &block, - block_size, - &next_microblocks, - block_commit_burn, - block_sortition_burn, - block_am.weight(), - true, - ) { - Ok((_receipt, _, _)) => { - info!("Block processed successfully! block = {block_id}"); - } - Err(e) => { - println!("Failed processing block! block = {block_id}, error = {e:?}"); - process::exit(1); - } - }; -} - /// Perform an analysis of the anti-MEV algorithm in epoch 3.0, vis-a-vis the status quo. /// Results are printed to stdout. /// Exits with 0 on success, and 1 on failure. diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4474a9e9918..6dbc1fee22f 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -38,6 +38,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; +use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ @@ -12630,7 +12631,7 @@ fn mock_miner_replay() { btc_regtest_controller.bootstrap_chain(201); - eprintln!("Chain bootstrapped..."); + info!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); @@ -12679,13 +12680,10 @@ fn mock_miner_replay() { let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - let miner_blocks_processed_start = miner_channel.get_stacks_blocks_processed(); - let follower_blocks_processed_start = follower_channel.get_stacks_blocks_processed(); - thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); - eprintln!("Follower bootup complete!"); + info!("Follower bootup complete!"); // first block wakes up the run loop next_block_and_wait_all( @@ -12719,6 +12717,9 @@ fn mock_miner_replay() { // ---------- Setup finished, start test ---------- + // PART 1 + // Run mock miner configured to output to files + // Mine some blocks for mock miner output for _ in 0..10 { next_block_and_wait_all( @@ -12730,6 +12731,8 @@ fn mock_miner_replay() { thread::sleep(block_gap); } + info!("Mock minining finished"); + let miner_blocks_processed_end = miner_channel.get_stacks_blocks_processed(); let follower_blocks_processed_end = follower_channel.get_stacks_blocks_processed(); @@ -12748,6 +12751,15 @@ fn mock_miner_replay() { assert_eq!(file_count, 12); assert_eq!(miner_blocks_processed_end, follower_blocks_processed_end); + // PART 2 + // Run `mock_miner_replay()` + let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); + let db_path = format!("{}/neon", conf.node.working_dir); + let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; + + info!("Replaying mock mined blocks..."); + cli::command_replay_mock_mining(&args); + // ---------- Test finished, clean up ---------- btcd_controller.stop_bitcoind().unwrap(); From e5ad64ff538535b2685e8bcecc7d340094a3e454 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 14 Aug 2024 12:14:17 -0400 Subject: [PATCH 241/910] chore: Allow optional config to `command_replay_mock_mining()` --- stackslib/src/cli.rs | 90 ++++++++++++++----- stackslib/src/main.rs | 4 +- .../src/tests/neon_integrations.rs | 26 +++++- 3 files changed, 95 insertions(+), 25 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 27efb44bb99..493ab18de58 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,6 +16,7 @@ //! Subcommands used by `stacks-inspect` binary +use std::cell::LazyCell; use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -39,10 +40,40 @@ use crate::clarity_vm::clarity::ClarityInstance; use crate::core::*; use crate::util_lib::db::IndexDBTx; +/// Can be used with CLI commands to support non-mainnet chainstate +/// Allows integration testing of these functions +pub struct StacksChainConfig { + pub chain_id: u32, + pub first_block_height: u64, + pub first_burn_header_hash: BurnchainHeaderHash, + pub first_burn_header_timestamp: u64, + pub pox_constants: PoxConstants, + pub epochs: Vec, +} + +impl StacksChainConfig { + pub fn default_mainnet() -> Self { + Self { + chain_id: CHAIN_ID_MAINNET, + first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) + .unwrap(), + first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + pox_constants: PoxConstants::mainnet_default(), + epochs: STACKS_EPOCHS_MAINNET.to_vec(), + } + } +} + +const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = + LazyCell::new(StacksChainConfig::default_mainnet); + /// Replay blocks from chainstate database -/// Takes args in CLI format: ` [args...]` /// Terminates on error using `process::exit()` -pub fn command_replay_block(argv: &[String]) { +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -110,15 +141,18 @@ pub fn command_replay_block(argv: &[String]) { if i % 100 == 0 { println!("Checked {i}..."); } - replay_staging_block(db_path, index_block_hash); + replay_staging_block(db_path, index_block_hash, conf); } println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); } /// Replay mock mined blocks from JSON files -/// Takes args in CLI format: ` [args...]` /// Terminates on error using `process::exit()` -pub fn command_replay_mock_mining(argv: &[String]) { +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -202,28 +236,36 @@ pub fn command_replay_mock_mining(argv: &[String]) { "block_height" => bh, "block" => ?block ); - replay_mock_mined_block(&db_path, block); + replay_mock_mined_block(&db_path, block, conf); } } /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { +fn replay_staging_block( + db_path: &str, + index_block_hash_hex: &str, + conf: Option<&StacksChainConfig>, +) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); let mut sortdb = SortitionDB::connect( &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), + conf.first_block_height, + &conf.first_burn_header_hash, + conf.first_burn_header_timestamp, + &conf.epochs, + conf.pox_constants.clone(), None, true, ) @@ -277,22 +319,30 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { +fn replay_mock_mined_block( + db_path: &str, + block: AssembledAnchorBlock, + conf: Option<&StacksChainConfig>, +) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); let mut sortdb = SortitionDB::connect( &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), + conf.first_block_height, + &conf.first_burn_header_hash, + conf.first_burn_header_timestamp, + &conf.epochs, + conf.pox_constants.clone(), None, true, ) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 479bfaa9b74..e22e903e780 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1255,12 +1255,12 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..]); + cli::command_replay_block(&argv[1..], None); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..]); + cli::command_replay_mock_mining(&argv[1..], None); process::exit(0); } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6dbc1fee22f..64b1ca70dac 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,6 +5,7 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; +use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -38,7 +39,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli; +use stacks::cli::{self, StacksChainConfig}; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ @@ -12627,7 +12628,13 @@ fn mock_miner_replay() { .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); btc_regtest_controller.bootstrap_chain(201); @@ -12756,9 +12763,22 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; + let SortitionDB { + first_block_height, + first_burn_header_hash, + .. + } = *btc_regtest_controller.sortdb_mut(); + let replay_config = StacksChainConfig { + chain_id: conf.burnchain.chain_id, + first_block_height, + first_burn_header_hash, + first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), + pox_constants: burnchain_config.pox_constants, + epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), + }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args); + cli::command_replay_mock_mining(&args, Some(&replay_config)); // ---------- Test finished, clean up ---------- From 1d836175730eff88eaec9440c608267eb6303494 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 5 Aug 2024 14:02:14 -0500 Subject: [PATCH 242/910] fix: consistent treatment of mod 0 blocks in nakamoto --- stacks-common/src/types/mod.rs | 1 + stackslib/src/burnchains/burnchain.rs | 74 ++++------ stackslib/src/burnchains/mod.rs | 50 ++++++- stackslib/src/chainstate/burn/db/sortdb.rs | 79 ++-------- stackslib/src/chainstate/coordinator/mod.rs | 13 +- stackslib/src/chainstate/coordinator/tests.rs | 2 +- .../chainstate/nakamoto/coordinator/mod.rs | 135 ++++++++++-------- .../chainstate/nakamoto/coordinator/tests.rs | 9 +- .../src/chainstate/nakamoto/tests/node.rs | 2 +- .../chainstate/stacks/boot/contract_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- stackslib/src/core/tests/mod.rs | 5 +- stackslib/src/net/api/getstackers.rs | 3 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 5 +- stackslib/src/net/tests/download/nakamoto.rs | 5 +- stackslib/src/net/tests/relay/nakamoto.rs | 15 +- stackslib/src/net/unsolicited.rs | 9 +- testnet/stacks-node/src/neon_node.rs | 6 +- testnet/stacks-node/src/tests/epoch_21.rs | 7 +- .../src/tests/nakamoto_integrations.rs | 2 +- 22 files changed, 209 insertions(+), 221 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index c9953594597..23f2b006db7 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -82,6 +82,7 @@ pub enum StacksEpochId { Epoch30 = 0x03000, } +#[derive(Debug)] pub enum MempoolCollectionBehavior { ByStacksHeight, ByReceiveTime, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 4002c253aed..a5ecaa04588 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -549,47 +549,43 @@ impl Burnchain { .expect("Overflowed u64 in calculating expected sunset_burn") } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, burn_height: u64) -> bool { self.pox_constants .is_reward_cycle_start(self.first_block_height, burn_height) } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, burn_height: u64) -> bool { + self.pox_constants + .is_naka_signing_cycle_start(self.first_block_height, burn_height) + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { self.pox_constants .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. - /// The reward set is calculated at reward cycle index 1, so if this block height is at or after - /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, - /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. - pub fn pox_reward_cycle(&self, block_height: u64) -> Option { - let cycle = self.block_height_to_reward_cycle(block_height)?; - let effective_height = block_height.checked_sub(self.first_block_height)?; - if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { - Some(cycle.saturating_sub(1)) - } else { - Some(cycle) - } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, reward_cycle: u64) -> u64 { + self.pox_constants + .nakamoto_first_block_of_cycle(self.first_block_height, reward_cycle) } + /// What is the reward cycle for this block height? + /// This considers the modulo 0 block to be in reward cycle `n`, even though + /// rewards for cycle `n` do not begin until modulo 1. pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { self.pox_constants .block_height_to_reward_cycle(self.first_block_height, block_height) } - pub fn static_block_height_to_reward_cycle( - block_height: u64, - first_block_height: u64, - reward_cycle_length: u64, - ) -> Option { - PoxConstants::static_block_height_to_reward_cycle( - block_height, - first_block_height, - reward_cycle_length, - ) - } - /// Is this block either the first block in a reward cycle or /// right before the reward phase starts? This is the mod 0 or mod 1 /// block. Reward cycle start events (like auto-unlocks) process *after* @@ -607,27 +603,19 @@ impl Burnchain { (effective_height % reward_cycle_length) <= 1 } - pub fn static_is_in_prepare_phase( - first_block_height: u64, - reward_cycle_length: u64, - prepare_length: u64, - block_height: u64, - ) -> bool { - PoxConstants::static_is_in_prepare_phase( - first_block_height, - reward_cycle_length, - prepare_length, - block_height, - ) + /// Does this block include reward slots? + /// This is either in the last prepare_phase_length blocks of the cycle + /// or the modulo 0 block + pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_prepare_phase(self.first_block_height, block_height) } - pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { - Self::static_is_in_prepare_phase( - self.first_block_height, - self.pox_constants.reward_cycle_length as u64, - self.pox_constants.prepare_length.into(), - block_height, - ) + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_naka_prepare_phase(self.first_block_height, block_height) } pub fn regtest(working_dir: &str) -> Burnchain { diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 30cd9f81eef..07a2f73c101 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -517,7 +517,7 @@ impl PoxConstants { } } - /// What's the first block in the prepare phase + /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { let reward_cycle_start = self.reward_cycle_to_block_height(first_block_height, reward_cycle); @@ -526,18 +526,37 @@ impl PoxConstants { prepare_phase_start } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle (effective_height % u64::from(self.reward_cycle_length)) == 1 } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { + let effective_height = burn_height - first_block_height; + // first block of the new reward cycle + (effective_height % u64::from(self.reward_cycle_length)) == 0 + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + } + pub fn reward_cycle_index(&self, first_block_height: u64, burn_height: u64) -> Option { let effective_height = burn_height.checked_sub(first_block_height)?; Some(effective_height % u64::from(self.reward_cycle_length)) @@ -609,6 +628,35 @@ impl PoxConstants { } } + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { + Self::static_is_in_naka_prepare_phase( + first_block_height, + u64::from(self.reward_cycle_length), + u64::from(self.prepare_length), + block_height, + ) + } + + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn static_is_in_naka_prepare_phase( + first_block_height: u64, + reward_cycle_length: u64, + prepare_length: u64, + block_height: u64, + ) -> bool { + if block_height <= first_block_height { + // not a reward cycle start if we're the first block after genesis. + false + } else { + let effective_height = block_height - first_block_height; + let reward_index = effective_height % reward_cycle_length; + reward_index > u64::from(reward_cycle_length - prepare_length) + } + } + /// Returns the active reward cycle at the given burn block height /// * `first_block_ht` - the first burn block height that the Stacks network monitored /// * `reward_cycle_len` - the length of each reward cycle in the network. diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 15a3bf56416..909ea46b9f4 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3584,42 +3584,6 @@ impl SortitionDB { Ok(()) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_end_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). - pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - /// Figure out the reward cycle for `tip` and lookup the preprocessed /// reward set (if it exists) for the active reward cycle during `tip`. /// Returns the reward cycle info on success. @@ -3934,33 +3898,6 @@ impl<'a> SortitionDBConn<'a> { .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - let prepare_phase_end = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(1); - - let last_sortition = - get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { - error!( - "Could not find prepare phase end ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_end_height" => prepare_phase_end - ); - db_error::NotFoundError - })?; - Ok(last_sortition) - } - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned /// sortition will be in the preceding reward cycle) @@ -3971,9 +3908,11 @@ impl<'a> SortitionDBConn<'a> { tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - let prepare_phase_start = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(pox_constants.prepare_length.into()); + let reward_cycle_of_prepare_phase = reward_cycle_id.checked_sub(1).ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; + let prepare_phase_start = pox_constants.prepare_phase_start( + first_block_height, + reward_cycle_of_prepare_phase, + ); let first_sortition = get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { @@ -5945,10 +5884,10 @@ impl<'a> SortitionHandleTx<'a> { /// Get the expected number of PoX payouts per output fn get_num_pox_payouts(&self, burn_block_height: u64) -> usize { - let op_num_outputs = if Burnchain::static_is_in_prepare_phase( + let op_num_outputs = if PoxConstants::static_is_in_prepare_phase( self.context.first_block_height, - self.context.pox_constants.reward_cycle_length as u64, - self.context.pox_constants.prepare_length.into(), + u64::from(self.context.pox_constants.reward_cycle_length), + u64::from(self.context.pox_constants.prepare_length), burn_block_height, ) { 1 @@ -6173,7 +6112,7 @@ impl<'a> SortitionHandleTx<'a> { } // if there are qualifying auto-unlocks, record them if !reward_set.start_cycle_state.is_empty() { - let cycle_number = Burnchain::static_block_height_to_reward_cycle( + let cycle_number = PoxConstants::static_block_height_to_reward_cycle( snapshot.block_height, self.context.first_block_height, self.context.pox_constants.reward_cycle_length.into(), diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index d3b6fd5f3eb..60d86996869 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -297,8 +297,8 @@ pub trait RewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, @@ -372,20 +372,13 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + reward_cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - self.read_reward_set_nakamoto( - cycle_start_burn_height, - chainstate, - burnchain, - sortdb, - block_id, - false, - ) + self.read_reward_set_nakamoto(chainstate, reward_cycle, burnchain, sortdb, block_id, false) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 7bd06aaaeac..81167c64623 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -520,8 +520,8 @@ impl RewardSetProvider for StubbedRewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 15cc7f08526..31549d22b0a 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -29,7 +29,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; -use crate::burnchains::{Burnchain, BurnchainBlockHeader}; +use crate::burnchains::{self, burnchain, Burnchain, BurnchainBlockHeader}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleConn, }; @@ -88,16 +88,13 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { /// RPC endpoints to expose this without flooding loggers. pub fn read_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, debug_log: bool, ) -> Result { - let cycle = burnchain - .block_height_to_reward_cycle(cycle_start_burn_height) - .expect("FATAL: no reward cycle for burn height"); self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) } @@ -280,9 +277,10 @@ fn find_prepare_phase_sortitions( } /// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the -/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of -/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than -/// `burn_height`. +/// `reward_cycle` number. +/// +/// `sortition_tip` can be any sortition ID that's at a higher height than +/// `reward_cycle`'s start height (the 0 block). /// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the /// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). @@ -297,14 +295,16 @@ fn find_prepare_phase_sortitions( /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase pub fn get_nakamoto_reward_cycle_info( - burn_height: u64, sortition_tip: &SortitionId, + reward_cycle: u64, burnchain: &Burnchain, chain_state: &mut StacksChainState, stacks_tip: &StacksBlockId, sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { + let burn_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)) .epoch_id; @@ -314,14 +314,8 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - // calculating the reward set for the current reward cycle - let reward_cycle = burnchain - .pox_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); - debug!("Processing reward set for Nakamoto reward cycle"; "stacks_tip" => %stacks_tip, - "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); @@ -376,33 +370,23 @@ pub fn load_nakamoto_reward_set( sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { - let prepare_end_height = burnchain - .reward_cycle_to_block_height(reward_cycle) - .saturating_sub(1); + let cycle_start_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), cycle_start_height)? .unwrap_or_else(|| { panic!( "FATAL: no epoch defined for burn height {}", - prepare_end_height + cycle_start_height ) }); - let Some(prepare_end_sortition_id) = - get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? - else { - // reward cycle is too far in the future - warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); - return Ok(None); - }; - // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). let first_epoch30_reward_cycle = burnchain - .pox_reward_cycle(epoch_at_height.start_height) + .block_height_to_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); if !epoch_at_height @@ -412,6 +396,14 @@ pub fn load_nakamoto_reward_set( // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), cycle_start_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height, sortition_tip); + return Ok(None); + }; + if let Ok(persisted_reward_cycle_info) = sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { @@ -475,8 +467,18 @@ pub fn load_nakamoto_reward_set( } // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let Some(prior_cycle_end) = get_ancestor_sort_id( + &sort_db.index_conn(), + cycle_start_height.saturating_sub(1), + sortition_tip, + )? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height.saturating_sub(1), sortition_tip); + return Ok(None); + }; let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; + find_prepare_phase_sortitions(sort_db, burnchain, &prior_cycle_end)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -505,7 +507,7 @@ pub fn load_nakamoto_reward_set( Err(e) => return Some(Err(e)), Ok(None) => { // no header for this snapshot (possibly invalid) - debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + info!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); return None } } @@ -542,12 +544,12 @@ pub fn load_nakamoto_reward_set( "block_hash" => %stacks_block_hash, "consensus_hash" => %anchor_block_sn.consensus_hash, "txid" => %txid, - "prepare_end_height" => %prepare_end_height, + "cycle_start_height" => %cycle_start_height, "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - prepare_end_height, chain_state, + reward_cycle, burnchain, sort_db, &block_id, @@ -581,26 +583,28 @@ pub fn get_nakamoto_next_recipients( stacks_tip: &StacksBlockId, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = - if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { - let Some((reward_set, _)) = load_nakamoto_reward_set( - burnchain - .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) - .expect("Sortition block height has no reward cycle"), - &sortition_tip.sortition_id, - burnchain, - chain_state, - stacks_tip, - sort_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(None); - }; - Some(reward_set) - } else { - None + let next_burn_height = sortition_tip.block_height.saturating_add(1); + let Some(reward_cycle) = burnchain.block_height_to_reward_cycle(next_burn_height) else { + error!("CORRUPTION: evaluating burn block height before starting burn height"); + return Err(Error::BurnchainError(burnchains::Error::NoStacksEpoch)); + }; + let reward_cycle_info = if burnchain.is_reward_cycle_start(next_burn_height) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + reward_cycle, + &sortition_tip.sortition_id, + burnchain, + chain_state, + stacks_tip, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); }; + Some(reward_set) + } else { + None + }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -670,7 +674,7 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle. let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -906,7 +910,11 @@ impl< }); // are we in the prepare phase? - if !self.burnchain.is_in_prepare_phase(stacks_sn.block_height) { + // TODO: this should *not* include the 0 block! + if !self + .burnchain + .is_in_naka_prepare_phase(stacks_sn.block_height) + { // next ready stacks block continue; } @@ -930,7 +938,7 @@ impl< // cycle data let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -966,8 +974,8 @@ impl< /// Given a burnchain header, find the PoX reward cycle info fn get_nakamoto_reward_cycle_info( &mut self, - block_height: u64, stacks_tip: &StacksBlockId, + reward_cycle: u64, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -975,8 +983,8 @@ impl< .expect("FATAL: Processing anchor block, but no known sortition tip"); get_nakamoto_reward_cycle_info( - block_height, sortition_tip_id, + reward_cycle, &self.burnchain, &mut self.chain_state_db, stacks_tip, @@ -1117,10 +1125,15 @@ impl< return Ok(false); }; - let reward_cycle_info = self.get_nakamoto_reward_cycle_info( - header.block_height, - &local_best_nakamoto_tip, - )?; + let Some(reward_cycle) = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + else { + error!("CORRUPTION: Evaluating burn block before start burn height"; "burn_height" => header.block_height); + return Ok(false); + }; + let reward_cycle_info = + self.get_nakamoto_reward_cycle_info(&local_best_nakamoto_tip, reward_cycle)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 569114aa124..1b971869bcb 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -598,7 +598,7 @@ impl<'a> TestPeer<'a> { info!( "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + pox_constants.is_naka_signing_cycle_start(first_burn_height, burn_height) ); let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); @@ -761,6 +761,9 @@ fn pox_treatment() { peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); blocks.push(block); + // note: we use `is_reward_cycle_start` here rather than naka_reward_cycle_start + // because in this test, we're interested in getting to the reward blocks, + // not validating the signer set. the reward blocks only begin at modulo 1 if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { break; } @@ -1571,7 +1574,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); @@ -2316,7 +2319,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d3f190de1fe..bd12072a01d 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -737,7 +737,7 @@ impl TestStacksNode { let reward_set = load_nakamoto_reward_set( miner .burnchain - .pox_reward_cycle(sort_tip_sn.block_height) + .block_height_to_reward_cycle(sort_tip_sn.block_height) .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 1a47613c89f..04b74ba2e90 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -490,7 +490,7 @@ impl BurnStateDB for TestSimBurnStateDB { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); - if Burnchain::static_is_in_prepare_phase( + if PoxConstants::static_is_in_prepare_phase( first_block.into(), rc_len.into(), prepare_len.into(), diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 63c22fafb68..ba95d77ead0 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4952,7 +4952,7 @@ impl StacksChainState { chain_tip_burn_header_height: u32, parent_sortition_id: &SortitionId, ) -> Result, Error> { - let pox_reward_cycle = Burnchain::static_block_height_to_reward_cycle( + let pox_reward_cycle = PoxConstants::static_block_height_to_reward_cycle( burn_tip_height, burn_dbconn.get_burn_start_height().into(), burn_dbconn.get_pox_reward_cycle_length().into(), diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 356b117b8bb..d28faed4f22 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2749,7 +2749,7 @@ pub mod test { } pub fn chainstate_path(test_name: &str) -> String { - format!("/tmp/blockstack-test-chainstate-{}", test_name) + format!("/tmp/stacks-node-tests/cs-{}", test_name) } #[test] diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 158feeeba59..72b29cc0979 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1392,8 +1392,9 @@ fn mempool_do_not_replace_tx() { #[case(MempoolCollectionBehavior::ByStacksHeight)] #[case(MempoolCollectionBehavior::ByReceiveTime)] fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let chainstate_path = chainstate_path(function_name!()); + let path_name = format!("{}::{:?}", function_name!(), behavior); + let mut chainstate = instantiate_chainstate(false, 0x80000000, &path_name); + let chainstate_path = chainstate_path(&path_name); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let mut txs = codec_all_transactions( diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4fd42340708..0b494d19a0f 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -92,7 +92,6 @@ impl GetStackersResponse { cycle_number: u64, ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); - let pox_contract_name = burnchain .pox_constants .active_pox_contract(cycle_start_height); @@ -107,7 +106,7 @@ impl GetStackersResponse { let provider = OnChainRewardSetProvider::new(); let stacker_set = provider - .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .read_reward_set_nakamoto(chainstate, cycle_number, burnchain, sortdb, tip, true) .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 861a6e6cfab..26605c7e843 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4292,7 +4292,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { - let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065ab..089f71fdaf3 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -599,7 +599,7 @@ impl Relayer { // is the block signed by the active reward set? let sn_rc = burnchain - .pox_reward_cycle(sn.block_height) + .block_height_to_reward_cycle(sn.block_height) .expect("FATAL: sortition has no reward cycle"); let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { rc_info @@ -840,6 +840,7 @@ impl Relayer { // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition + // TODO: is this true? let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height + 1)? .expect("FATAL: no epoch defined") .epoch_id; @@ -885,7 +886,7 @@ impl Relayer { let reward_info = match load_nakamoto_reward_set( burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 9de9fb087bf..dd0e9e60c4a 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2101,7 +2101,10 @@ fn test_nakamoto_download_run_2_peers() { .get_nakamoto_tip_block_id() .unwrap() .unwrap(); - assert_eq!(tip.block_height, 81); + assert_eq!( + tip.block_height, + 41 + bitvecs.iter().map(|x| x.len() as u64).sum::() + ); // make a neighbor from this peer let boot_observer = TestEventObserver::new(); diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 4df31714741..a0aae1c0354 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -618,7 +618,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .unwrap() ), true @@ -642,7 +642,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle( + .block_height_to_reward_cycle( follower.network.burnchain_tip.block_height ) .unwrap() @@ -670,7 +670,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(ancestor_sn.block_height) + .block_height_to_reward_cycle(ancestor_sn.block_height) .unwrap() ), true @@ -816,9 +816,12 @@ fn test_buffer_nonready_nakamoto_blocks() { let mut all_blocks = vec![]; thread::scope(|s| { - s.spawn(|| { - SeedNode::main(peer, rc_len, seed_comms); - }); + thread::Builder::new() + .name("seed".into()) + .spawn_scoped(s, || { + SeedNode::main(peer, rc_len, seed_comms); + }) + .unwrap(); let mut seed_exited = false; let mut exited_peer = None; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 5aeadc3dfd5..f9ab5de87ea 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -715,10 +715,11 @@ impl PeerNetwork { ) -> bool { let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { info!( - "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set for cycle {}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, - &nakamoto_block.header.block_hash() + &nakamoto_block.header.block_hash(), + reward_cycle, ); return false; }; @@ -733,7 +734,7 @@ impl PeerNetwork { if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { info!( - "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e + "{:?}: signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e ); return false; } @@ -788,7 +789,7 @@ impl PeerNetwork { let reward_set_sn_rc = self .burnchain - .pox_reward_cycle(reward_set_sn.block_height) + .block_height_to_reward_cycle(reward_set_sn.block_height) .expect("FATAL: sortition has no reward cycle"); return (Some(reward_set_sn_rc), can_process); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed1799..f63227441a1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -818,7 +818,7 @@ impl MicroblockMinerThread { &mined_microblock.block_hash() ); - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { @@ -1773,7 +1773,7 @@ impl BlockMinerThread { /// /// In testing, we ignore the parent stacks block hash because we don't have an easy way to /// reproduce it in integration tests. - #[cfg(not(any(test, feature = "testing")))] + #[cfg(not(test))] fn make_microblock_private_key( &mut self, parent_stacks_hash: &StacksBlockId, @@ -1786,7 +1786,7 @@ impl BlockMinerThread { /// Get the microblock private key we'll be using for this tenure, should we win. /// Return the private key on success - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn make_microblock_private_key( &mut self, _parent_stacks_hash: &StacksBlockId, diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 66964679304..bb168b28b97 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1296,12 +1296,7 @@ fn transition_adds_get_pox_addr_recipients() { // NOTE: there's an even number of payouts here, so this works eprintln!("payout at {} = {}", burn_block_height, &payout); - if Burnchain::static_is_in_prepare_phase( - 0, - pox_constants.reward_cycle_length as u64, - pox_constants.prepare_length.into(), - burn_block_height, - ) { + if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase eprintln!("{} in prepare phase", burn_block_height); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f3cf76af04b..3c36565cc8c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -510,7 +510,7 @@ pub fn read_and_sign_block_proposal( let reward_set = load_nakamoto_reward_set( burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) + .block_height_to_reward_cycle(tip.block_height) .unwrap(), &tip.sortition_id, &burnchain, From 94a0039b9541eecd0df2cda6ef839fed3aff4ff8 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 13:02:28 -0700 Subject: [PATCH 243/910] feat: update prepare_pox_4_test to optionally run nakamoto --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 408 ++++++++++++++---- .../chainstate/stacks/boot/signers_tests.rs | 3 +- stackslib/src/net/tests/mod.rs | 27 +- 3 files changed, 352 insertions(+), 86 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index ac59772f320..db890316ec2 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -49,7 +49,7 @@ use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; use crate::burnchains::{Burnchain, PoxConstants}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; @@ -81,6 +81,7 @@ use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; use crate::clarity_vm::database::HeadersDBConn; use crate::core::*; use crate::net::test::{TestEventObserver, TestEventObserverBlock, TestPeer, TestPeerConfig}; +use crate::net::tests::NakamotoBootPlan; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{DBConn, FromRow}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; @@ -138,7 +139,7 @@ fn make_simple_pox_4_lock( ) } -pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { +pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 @@ -147,8 +148,9 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { // this means that cycle 11 should also be treated like a "burn" let EPOCH_2_4_HEIGHT = EPOCH_2_3_HEIGHT + 4; // 56 let EPOCH_2_5_HEIGHT = EPOCH_2_4_HEIGHT + 44; // 100 + let EPOCH_3_0_HEIGHT = EPOCH_2_5_HEIGHT + 23; // 123 - let epochs = vec![ + let mut epochs = vec![ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -201,12 +203,28 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: EPOCH_2_5_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: { + if use_nakamoto { + EPOCH_3_0_HEIGHT + } else { + STACKS_EPOCH_MAX + } + }, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_5, }, ]; + if use_nakamoto { + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: EPOCH_3_0_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }); + } + let mut pox_constants = PoxConstants::mainnet_default(); pox_constants.reward_cycle_length = 5; pox_constants.prepare_length = 2; @@ -230,7 +248,7 @@ fn pox_extend_transition() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -890,7 +908,7 @@ fn pox_lock_unlock() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1064,7 +1082,7 @@ fn pox_3_defunct() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1199,7 +1217,7 @@ fn pox_3_unlocks() { // Config for this test // We are going to try locking for 4 reward cycles (20 blocks) let lock_period = 4; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1350,7 +1368,7 @@ fn pox_3_unlocks() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1739,7 +1757,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2166,7 +2184,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2398,7 +2416,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() #[test] fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2519,7 +2537,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2638,7 +2656,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { #[test] fn pox_4_delegate_stack_increase_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2744,7 +2762,7 @@ fn pox_4_delegate_stack_increase_events() { #[test] fn pox_4_revoke_delegate_stx_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2982,7 +3000,7 @@ fn verify_signer_key_sig( #[test] fn verify_signer_key_signatures() { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -3278,8 +3296,8 @@ fn verify_signer_key_signatures() { fn stack_stx_verify_signer_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -3598,8 +3616,8 @@ fn stack_stx_verify_signer_sig() { fn stack_extend_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -3851,8 +3869,8 @@ fn stack_extend_verify_sig() { fn stack_agg_commit_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -4662,8 +4680,8 @@ fn stack_agg_increase() { fn stack_increase_verify_signer_key() { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -4950,8 +4968,8 @@ fn stack_increase_verify_signer_key() { fn stack_increase_different_signer_keys() { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5133,11 +5151,22 @@ fn balances_from_keys( .collect() } -#[test] -fn stack_stx_signer_key() { +#[rstest] +#[case(true)] +#[case(false)] +fn stack_stx_signer_key(#[case] use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); + + info!("--- starting stack-stx test ---"); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5147,6 +5176,7 @@ fn stack_stx_signer_key() { let signer_key_val = Value::buff_from(signer_public_key.to_bytes_compressed()).unwrap(); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); + info!("Reward cycle: {reward_cycle}"); // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -5181,7 +5211,15 @@ fn stack_stx_signer_key() { ], )]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &txs, + &mut coinbase_nonce, + &mut test_signers, + use_nakamoto, + ); + // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5209,13 +5247,20 @@ fn stack_stx_signer_key() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| { + entry.reward_address == PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap() + }) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), &signer_public_key.to_bytes_compressed().as_slice(), @@ -5226,8 +5271,8 @@ fn stack_stx_signer_key() { /// Test `stack-stx` using signer key authorization fn stack_stx_signer_auth() { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5334,8 +5379,8 @@ fn stack_stx_signer_auth() { fn stack_agg_commit_signer_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5455,8 +5500,8 @@ fn stack_agg_commit_signer_auth() { fn stack_extend_signer_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5561,8 +5606,8 @@ fn stack_extend_signer_auth() { fn test_set_signer_key_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5768,8 +5813,8 @@ fn test_set_signer_key_auth() { #[test] fn stack_extend_signer_key() { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5880,8 +5925,8 @@ fn stack_extend_signer_key() { #[test] fn delegate_stack_stx_signer_key() { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5997,8 +6042,8 @@ fn delegate_stack_stx_signer_key() { #[test] fn delegate_stack_stx_extend_signer_key() { let lock_period: u128 = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let alice_nonce = 0; let alice_stacker_key = &keys[0]; @@ -6189,8 +6234,8 @@ fn delegate_stack_stx_extend_signer_key() { fn stack_increase() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut alice_nonce = 0; let alice_stacking_private_key = &keys[0]; @@ -6332,8 +6377,8 @@ fn stack_increase() { fn delegate_stack_increase() { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8066,8 +8111,8 @@ fn test_scenario_four() { fn delegate_stack_increase_err() { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8358,6 +8403,7 @@ pub fn get_stacking_minimum(peer: &mut TestPeer, latest_block: &StacksBlockId) - pub fn prepare_pox4_test<'a>( test_name: &str, observer: Option<&'a TestEventObserver>, + use_nakamoto: bool, ) -> ( Burnchain, TestPeer<'a>, @@ -8365,8 +8411,9 @@ pub fn prepare_pox4_test<'a>( StacksBlockId, u64, usize, + TestSigners, ) { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(use_nakamoto); let mut burnchain = Burnchain::default_unittest( 0, @@ -8377,33 +8424,228 @@ pub fn prepare_pox4_test<'a>( let (mut peer, keys) = instantiate_pox_peer_with_epoch(&burnchain, test_name, Some(epochs.clone()), observer); - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; + if use_nakamoto { + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let test_stackers = test_keys + .iter() + .map(|key| TestStacker { + signer_private_key: key.clone(), + stacker_private_key: key.clone(), + // amount: u64::MAX as u128 - 10000, + // amount: 2048000 * POX_THRESHOLD_STEPS_USTX * 2, + amount: 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr: Some(pox_addr_from(&key)), + max_amount: None, + }) + .collect::>(); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + let addrs: Vec = keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + + let balances: Vec<(PrincipalData, u64)> = addrs + .clone() + .into_iter() + .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) + .collect(); + boot_plan.initial_balances = balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + + info!("---- Booting into Nakamoto Peer ----"); + let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); + // let mut blocks = vec![]; + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - assert_latest_was_burn(&mut peer); - } - } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + info!("Block height: {}", block_height); - info!("Block height: {}", block_height); + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + test_signers, + ) + } else { + // assert_eq!(burnchain.pox_constants.reward_slots(), 6); - ( - burnchain, - peer, - keys, - latest_block, - block_height, - coinbase_nonce, - ) + // Advance into pox4 + let target_height = burnchain.pox_constants.pox_4_activation_height; + let mut coinbase_nonce = 0; + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + TestSigners::new(vec![]), + ) + // let target_epoch = if use_nakamoto { + // StacksEpochId::Epoch30 + // } else { + // StacksEpochId::Epoch25 + // }; + // let height_25 = epochs + // .iter() + // .find(|e| e.epoch_id == StacksEpochId::Epoch25) + // .unwrap() + // .start_height; + // // let height_25 = epochs.iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; + // let target_height = epochs + // .iter() + // .find(|e| e.epoch_id == target_epoch) + // .unwrap() + // .start_height; + // let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if use_nakamoto { + // // Go to 2.5, stack, then 3.0 + // while get_tip(peer.sortdb.as_ref()).block_height < height_25 { + // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // } + + // let tip = get_tip(peer.sortdb.as_ref()); + // let reward_cycle = peer.get_reward_cycle() as u128; + + // let min_ustx = with_sortdb(&mut peer, |chainstate, sortdb| { + // chainstate.get_stacking_minimum(sortdb, &latest_block) + // }) + // .unwrap(); + // info!("Building stacking txs"); + // // Make all the test Stackers stack + // let stack_txs: Vec<_> = test_stackers + // .clone() + // .iter() + // .map(|test_stacker| { + // info!( + // "Making PoX-4 lockup for {}; {}", + // test_stacker.amount, + // test_stacker.amount > min_ustx + // ); + // let pox_addr = test_stacker.pox_addr.clone().unwrap(); + // let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + // let signature = make_signer_key_signature( + // &pox_addr, + // &test_stacker.signer_private_key, + // reward_cycle.into(), + // &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, + // 12_u128, + // max_amount, + // 1, + // ); + // make_pox_4_lockup( + // &test_stacker.stacker_private_key, + // 0, + // test_stacker.amount, + // &pox_addr, + // 12, + // &StacksPublicKey::from_private(&test_stacker.signer_private_key), + // tip.block_height, + // Some(signature), + // max_amount, + // 1, + // ) + // }) + // .collect(); + // latest_block = peer.tenure_with_txs(&stack_txs, &mut coinbase_nonce); + // } + // while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // // if we reach epoch 2.1, perform the check + // // if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + // // assert_latest_was_burn(&mut peer); + // // } + // } + } +} + +pub fn tenure_with_txs( + peer: &mut TestPeer, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + test_signers: &mut TestSigners, + use_nakamoto: bool, +) -> StacksBlockId { + if use_nakamoto { + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + test_signers, + |_miner, _chainstate, _sort_dbconn, _blocks| { + info!("Building nakamoto block. Blocks len {}", _blocks.len()); + if _blocks.len() == 0 { + txs.to_vec() + } else { + vec![] + } + }, + ); + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + // let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + // .unwrap() + // .unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + // let tip = StacksBlockId:: + latest_block + } else { + peer.tenure_with_txs(txs, coinbase_nonce) + } } + pub fn get_last_block_sender_transactions( observer: &TestEventObserver, address: StacksAddress, @@ -8434,7 +8676,7 @@ fn missed_slots_no_unlock() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( @@ -8685,7 +8927,7 @@ fn no_lockups_2_5() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 37b2e016b75..bf3b5f312c6 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -171,7 +171,8 @@ fn make_signer_sanity_panic_1() { #[test] fn signers_get_config() { - let (burnchain, mut peer, keys, latest_block, ..) = prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, ..) = + prepare_pox4_test(function_name!(), None, false); assert_eq!( readonly_call( diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 05477bb08c0..462a9fcb97a 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -89,6 +89,8 @@ pub struct NakamotoBootPlan { pub test_signers: TestSigners, pub observer: Option, pub num_peers: usize, + /// Whether to add an initial balance for `private_key`'s account + pub add_default_balance: bool, } impl NakamotoBootPlan { @@ -103,6 +105,7 @@ impl NakamotoBootPlan { test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, + add_default_balance: true, } } @@ -347,8 +350,12 @@ impl NakamotoBootPlan { + 1) .into(), )); - peer_config.initial_balances = - vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances = vec![]; + if self.add_default_balance { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } peer_config .initial_balances .append(&mut self.initial_balances.clone()); @@ -467,6 +474,17 @@ impl NakamotoBootPlan { .block_height_to_reward_cycle(sortition_height.into()) .unwrap(); + let sortdb = peer.sortdb(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip_index_block = tip.get_canonical_stacks_block_id(); + + let min_ustx = with_sortdb(peer, |chainstate, sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + + info!("Minimum USTX for stacking: {}", min_ustx); + // Make all the test Stackers stack let stack_txs: Vec<_> = peer .config @@ -475,6 +493,11 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { + info!( + "Making PoX-4 lockup for {}; {}", + test_stacker.amount, + test_stacker.amount > min_ustx + ); let pox_addr = test_stacker .pox_addr .clone() From de2e88e38e15a4715ef41eae3b6a030a800756d5 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 13:04:38 -0700 Subject: [PATCH 244/910] fix: remove comments & debugging log --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 80 ------------------- stackslib/src/net/tests/mod.rs | 16 ---- 2 files changed, 96 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index db890316ec2..779da57e3c1 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -8435,8 +8435,6 @@ pub fn prepare_pox4_test<'a>( .map(|key| TestStacker { signer_private_key: key.clone(), stacker_private_key: key.clone(), - // amount: u64::MAX as u128 - 10000, - // amount: 2048000 * POX_THRESHOLD_STEPS_USTX * 2, amount: 1024 * POX_THRESHOLD_STEPS_USTX, pox_addr: Some(pox_addr_from(&key)), max_amount: None, @@ -8466,7 +8464,6 @@ pub fn prepare_pox4_test<'a>( info!("---- Booting into Nakamoto Peer ----"); let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); - // let mut blocks = vec![]; let sort_db = peer.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() @@ -8489,8 +8486,6 @@ pub fn prepare_pox4_test<'a>( test_signers, ) } else { - // assert_eq!(burnchain.pox_constants.reward_slots(), 6); - // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; let mut coinbase_nonce = 0; @@ -8512,81 +8507,6 @@ pub fn prepare_pox4_test<'a>( coinbase_nonce, TestSigners::new(vec![]), ) - // let target_epoch = if use_nakamoto { - // StacksEpochId::Epoch30 - // } else { - // StacksEpochId::Epoch25 - // }; - // let height_25 = epochs - // .iter() - // .find(|e| e.epoch_id == StacksEpochId::Epoch25) - // .unwrap() - // .start_height; - // // let height_25 = epochs.iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; - // let target_height = epochs - // .iter() - // .find(|e| e.epoch_id == target_epoch) - // .unwrap() - // .start_height; - // let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if use_nakamoto { - // // Go to 2.5, stack, then 3.0 - // while get_tip(peer.sortdb.as_ref()).block_height < height_25 { - // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // } - - // let tip = get_tip(peer.sortdb.as_ref()); - // let reward_cycle = peer.get_reward_cycle() as u128; - - // let min_ustx = with_sortdb(&mut peer, |chainstate, sortdb| { - // chainstate.get_stacking_minimum(sortdb, &latest_block) - // }) - // .unwrap(); - // info!("Building stacking txs"); - // // Make all the test Stackers stack - // let stack_txs: Vec<_> = test_stackers - // .clone() - // .iter() - // .map(|test_stacker| { - // info!( - // "Making PoX-4 lockup for {}; {}", - // test_stacker.amount, - // test_stacker.amount > min_ustx - // ); - // let pox_addr = test_stacker.pox_addr.clone().unwrap(); - // let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - // let signature = make_signer_key_signature( - // &pox_addr, - // &test_stacker.signer_private_key, - // reward_cycle.into(), - // &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - // 12_u128, - // max_amount, - // 1, - // ); - // make_pox_4_lockup( - // &test_stacker.stacker_private_key, - // 0, - // test_stacker.amount, - // &pox_addr, - // 12, - // &StacksPublicKey::from_private(&test_stacker.signer_private_key), - // tip.block_height, - // Some(signature), - // max_amount, - // 1, - // ) - // }) - // .collect(); - // latest_block = peer.tenure_with_txs(&stack_txs, &mut coinbase_nonce); - // } - // while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // // if we reach epoch 2.1, perform the check - // // if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - // // assert_latest_was_burn(&mut peer); - // // } - // } } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 462a9fcb97a..de07c601405 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -474,17 +474,6 @@ impl NakamotoBootPlan { .block_height_to_reward_cycle(sortition_height.into()) .unwrap(); - let sortdb = peer.sortdb(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - let tip_index_block = tip.get_canonical_stacks_block_id(); - - let min_ustx = with_sortdb(peer, |chainstate, sortdb| { - chainstate.get_stacking_minimum(sortdb, &tip_index_block) - }) - .unwrap(); - - info!("Minimum USTX for stacking: {}", min_ustx); - // Make all the test Stackers stack let stack_txs: Vec<_> = peer .config @@ -493,11 +482,6 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { - info!( - "Making PoX-4 lockup for {}; {}", - test_stacker.amount, - test_stacker.amount > min_ustx - ); let pox_addr = test_stacker .pox_addr .clone() From 0fa2d24c1672fb4fb546817acd3a9043434ff83f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 16:43:39 -0700 Subject: [PATCH 245/910] feat: update majority of pox4 tests to use nakamoto case --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 395 ++++++++++++------ 1 file changed, 271 insertions(+), 124 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 779da57e3c1..362ac3670ad 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -97,6 +97,14 @@ pub fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() } +/// Helper rstest template for running tests in both 2.5 +/// and 3.0 epochs. +#[template] +#[rstest] +#[case::epoch_30(true)] +#[case::epoch_25(false)] +fn nakamoto_cases(#[case] use_nakamoto: bool) {} + fn make_simple_pox_4_lock( key: &StacksPrivateKey, peer: &mut TestPeer, @@ -4676,12 +4684,12 @@ fn stack_agg_increase() { assert_eq!(bob_aggregate_commit_reward_index, &Value::UInt(1)); } -#[test] -fn stack_increase_verify_signer_key() { +#[apply(nakamoto_cases)] +fn stack_increase_verify_signer_key(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -4919,7 +4927,8 @@ fn stack_increase_verify_signer_key() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ stack_tx, invalid_cycle_tx, @@ -4933,6 +4942,7 @@ fn stack_increase_verify_signer_key() { stack_increase, ], &mut coinbase_nonce, + &mut test_signers, ); let txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -4961,15 +4971,15 @@ fn stack_increase_verify_signer_key() { .expect("Expected ok result from tx"); } -#[test] +#[apply(nakamoto_cases)] /// Verify that when calling `stack-increase`, the function /// fails if the signer key for each cycle being updated is not the same /// as the provided `signer-key` argument -fn stack_increase_different_signer_keys() { +fn stack_increase_different_signer_keys(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5054,8 +5064,12 @@ fn stack_increase_different_signer_keys() { 1, ); - let latest_block = - peer.tenure_with_txs(&[stack_tx, extend_tx, stack_increase], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[stack_tx, extend_tx, stack_increase], + &mut coinbase_nonce, + &mut test_signers, + ); let txs = get_last_block_sender_transactions(&observer, stacker_addr.clone()); @@ -5151,10 +5165,8 @@ fn balances_from_keys( .collect() } -#[rstest] -#[case(true)] -#[case(false)] -fn stack_stx_signer_key(#[case] use_nakamoto: bool) { +#[apply(nakamoto_cases)] +fn stack_stx_signer_key(use_nakamoto: bool) { let observer = TestEventObserver::new(); let ( burnchain, @@ -5212,13 +5224,7 @@ fn stack_stx_signer_key(#[case] use_nakamoto: bool) { )]; // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - let latest_block = tenure_with_txs( - &mut peer, - &txs, - &mut coinbase_nonce, - &mut test_signers, - use_nakamoto, - ); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, @@ -5267,12 +5273,19 @@ fn stack_stx_signer_key(#[case] use_nakamoto: bool) { ); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-stx` using signer key authorization -fn stack_stx_signer_auth() { +fn stack_stx_signer_auth(use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5336,7 +5349,7 @@ fn stack_stx_signer_auth() { let txs = vec![failed_stack_tx, enable_auth_tx, valid_stack_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5374,13 +5387,13 @@ fn stack_stx_signer_auth() { assert_eq!(enable_tx_result, Value::okay_true()); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-aggregation-commit` using signer key authorization -fn stack_agg_commit_signer_auth() { +fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5479,7 +5492,7 @@ fn stack_agg_commit_signer_auth() { valid_agg_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_txs = get_last_block_sender_transactions(&observer, delegate_addr); @@ -5494,14 +5507,14 @@ fn stack_agg_commit_signer_auth() { .expect("Expected ok result from stack-agg-commit tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-extend` using signer key authorization /// instead of signatures -fn stack_extend_signer_auth() { +fn stack_extend_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5585,7 +5598,7 @@ fn stack_extend_signer_auth() { let txs = vec![stack_tx, invalid_cycle_tx, enable_auth_tx, valid_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -5601,13 +5614,13 @@ fn stack_extend_signer_auth() { .expect("Expected ok result from stack-extend tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `set-signer-key-authorization` function -fn test_set_signer_key_auth() { +fn test_set_signer_key_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5684,7 +5697,8 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ invalid_enable_tx, invalid_tx_period, @@ -5692,6 +5706,7 @@ fn test_set_signer_key_auth() { disable_auth_tx, ], &mut coinbase_nonce, + &mut test_signers, ); let alice_txs = get_last_block_sender_transactions(&observer, alice_addr); @@ -5761,7 +5776,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[enable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[enable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5793,7 +5813,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[disable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[disable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5810,11 +5835,18 @@ fn test_set_signer_key_auth() { assert_eq!(signer_key_enabled.unwrap(), false); } -#[test] -fn stack_extend_signer_key() { +#[apply(nakamoto_cases)] +fn stack_extend_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5864,7 +5896,7 @@ fn stack_extend_signer_key() { stacker_nonce += 1; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let mut latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let signature = make_signer_key_signature( &pox_addr, @@ -5887,7 +5919,12 @@ fn stack_extend_signer_key() { 1, )]; - latest_block = peer.tenure_with_txs(&update_txs, &mut coinbase_nonce); + latest_block = tenure_with_txs( + &mut peer, + &update_txs, + &mut coinbase_nonce, + &mut test_signers, + ); let new_stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5900,21 +5937,35 @@ fn stack_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address + &reward_entry.signer.unwrap(), + signer_extend_bytes.as_slice(), ); assert_eq!( &reward_entry.signer.unwrap(), @@ -5922,11 +5973,18 @@ fn stack_extend_signer_key() { ); } -#[test] -fn delegate_stack_stx_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -6000,7 +6058,7 @@ fn delegate_stack_stx_signer_key() { ), ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -6019,13 +6077,18 @@ fn delegate_stack_stx_signer_key() { .expect_tuple(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_key.to_bytes_compressed().as_slice() @@ -6039,11 +6102,18 @@ fn delegate_stack_stx_signer_key() { // // This test asserts that the signing key in Alice's stacking state // is equal to Bob's 'new' signer key. -#[test] -fn delegate_stack_stx_extend_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_extend_signer_key(use_nakamoto: bool) { let lock_period: u128 = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let alice_nonce = 0; let alice_stacker_key = &keys[0]; @@ -6094,7 +6164,7 @@ fn delegate_stack_stx_extend_signer_key() { // Both are pox_4 helpers found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -6199,7 +6269,7 @@ fn delegate_stack_stx_extend_signer_key() { // Next tx arr calls a delegate_stack_extend pox_4 helper found in mod.rs let txs = vec![delegate_stack_extend, agg_tx_0, agg_tx_1]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let new_stacking_state = get_stacking_state_pox_4(&mut peer, &latest_block, &alice_principal) .unwrap() .expect_tuple(); @@ -6207,16 +6277,32 @@ fn delegate_stack_stx_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_extend_bytes.as_slice(), @@ -6230,12 +6316,19 @@ fn delegate_stack_stx_extend_signer_key() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling stack-increase. -#[test] -fn stack_increase() { +#[apply(nakamoto_cases)] +fn stack_increase(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut alice_nonce = 0; let alice_stacking_private_key = &keys[0]; @@ -6243,6 +6336,7 @@ fn stack_increase() { let signing_sk = StacksPrivateKey::from_seed(&[1]); let signing_pk = StacksPublicKey::from_private(&signing_sk); let signing_bytes = signing_pk.to_bytes_compressed(); + let alice_balance = get_balance(&mut peer, &alice_address.into()); let min_ustx = get_stacking_minimum(&mut peer, &latest_block); let pox_addr = PoxAddress::from_legacy( @@ -6278,7 +6372,7 @@ fn stack_increase() { // Initial tx arr includes a stack_stx pox_4 helper found in mod.rs let txs = vec![stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -6310,7 +6404,7 @@ fn stack_increase() { ); // Next tx arr includes a stack_increase pox_4 helper found in mod.rs let txs = vec![stack_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_transactions = get_last_block_sender_transactions(&observer, alice_address); let actual_result = stacker_transactions.first().cloned().unwrap().result; @@ -6342,12 +6436,29 @@ fn stack_increase() { ("auth-id", Value::UInt(1)), ]); + let alice_expected_balance = alice_balance - min_ustx; + + // Compute the expected unlock height because the 3.0 and 2.5 cases + // have different PoX constants + let cycle_len = burnchain.pox_constants.reward_cycle_length as u128; + let unlock_cycle = burnchain + .pox_constants + .block_height_to_reward_cycle( + 0, + ((block_height as u128) + ((lock_period + 1) * cycle_len)) + .try_into() + .unwrap(), + ) + .unwrap(); + let expected_unlock_height = + unlock_cycle * (burnchain.pox_constants.reward_cycle_length as u64); + let common_data = PoxPrintFields { op_name: "stack-increase".to_string(), stacker: Value::Principal(PrincipalData::from(alice_address.clone())), - balance: Value::UInt(10234866375000), - locked: Value::UInt(5133625000), - burnchain_unlock_height: Value::UInt(125), + balance: Value::UInt(alice_expected_balance), + locked: Value::UInt(min_ustx), + burnchain_unlock_height: Value::UInt(expected_unlock_height as u128), }; check_pox_print_event(&increase_event, common_data, increase_op_data); @@ -6360,10 +6471,18 @@ fn stack_increase() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), &signing_bytes.as_slice()); } @@ -6373,12 +6492,19 @@ fn stack_increase() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling delegate-stack-increase. -#[test] -fn delegate_stack_increase() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -6425,7 +6551,7 @@ fn delegate_stack_increase() { // Initial tx arr includes a delegate_stx & delegate_stack_stx pox_4 helper found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -6464,7 +6590,7 @@ fn delegate_stack_increase() { // Next tx arr includes a delegate_increase pox_4 helper found in mod.rs let txs = vec![delegate_increase, agg_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -6489,9 +6615,18 @@ fn delegate_stack_increase() { // test that the reward set contains the increased amount and the expected key let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(pox_addr, reward_entry.reward_address); assert_eq!(min_ustx * 2, reward_entry.amount_stacked); assert_eq!(&reward_entry.signer.unwrap(), signer_pk_bytes.as_slice()); @@ -8107,12 +8242,19 @@ fn test_scenario_four() { // In this test case, Alice delegates twice the stacking minimum to Bob. // Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. // This should return a clarity user error. -#[test] -fn delegate_stack_increase_err() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase_err(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8158,7 +8300,7 @@ fn delegate_stack_increase_err() { let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -8172,13 +8314,15 @@ fn delegate_stack_increase_err() { 1, ); + let slot_idx = if use_nakamoto { 1 } else { 0 }; + // Bob's Aggregate Increase let bobs_aggregate_increase = make_pox_4_aggregation_increase( &bob_delegate_key, bob_nonce, &pox_addr, next_reward_cycle.into(), - 0, + slot_idx, Some(signature), &signer_pk, u128::MAX, @@ -8187,7 +8331,7 @@ fn delegate_stack_increase_err() { let txs = vec![bobs_aggregate_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -8202,7 +8346,11 @@ fn delegate_stack_increase_err() { // test that the reward set is empty let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert!(reward_set.is_empty()); + if use_nakamoto { + assert_eq!(reward_set.len(), 1); + } else { + assert!(reward_set.is_empty()); + } } pub fn get_stacking_state_pox_4( @@ -8411,7 +8559,7 @@ pub fn prepare_pox4_test<'a>( StacksBlockId, u64, usize, - TestSigners, + Option, ) { let (epochs, pox_constants) = make_test_epochs_pox(use_nakamoto); @@ -8483,7 +8631,7 @@ pub fn prepare_pox4_test<'a>( latest_block, block_height, coinbase_nonce, - test_signers, + Some(test_signers), ) } else { // Advance into pox4 @@ -8505,7 +8653,7 @@ pub fn prepare_pox4_test<'a>( latest_block, block_height, coinbase_nonce, - TestSigners::new(vec![]), + None, ) } } @@ -8514,10 +8662,9 @@ pub fn tenure_with_txs( peer: &mut TestPeer, txs: &[StacksTransaction], coinbase_nonce: &mut usize, - test_signers: &mut TestSigners, - use_nakamoto: bool, + test_signers: &mut Option, ) -> StacksBlockId { - if use_nakamoto { + if let Some(test_signers) = test_signers { let (burn_ops, mut tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); From f746932617340c1c1b57a9a6e46864326329f128 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 14 Aug 2024 16:58:21 -0700 Subject: [PATCH 246/910] wip: updating pox4_scenario_tests --- stackslib/src/chainstate/nakamoto/mod.rs | 6 + .../src/chainstate/nakamoto/test_signers.rs | 6 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 430 ++++++++++++++---- 3 files changed, 355 insertions(+), 87 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb63..a9065a27fc7 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -835,6 +835,12 @@ impl NakamotoBlockHeader { public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + warn!( + "Found an invalid public key. Reward set has {} signers. Chain length {}. Signatures length {}", + signers.len(), + self.chain_length, + self.signer_signature.len(), + ); ChainstateError::InvalidStacksBlock(format!( "Public key {} not found in the reward set", public_key.to_hex() diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 13d7f2ff1ed..4ab76137510 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -324,6 +324,12 @@ impl TestSigners { .map(|s| s.signing_key.to_vec()) .collect::>(); + info!( + "TestSigners: Signing Nakamoto block. TestSigners has {} signers. Reward set has {} signers.", + test_signers_by_pk.len(), + reward_set_keys.len(), + ); + let mut signatures = Vec::with_capacity(reward_set_keys.len()); let mut missing_keys = 0; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 362ac3670ad..f879479d20a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -4244,12 +4244,24 @@ fn advance_to_block_height( txs: &[StacksTransaction], peer_nonce: &mut usize, target_height: u64, -) -> (StacksBlockId, TestEventObserverBlock) { + test_signers: &mut Option, +) -> ( + StacksBlockId, + TestEventObserverBlock, + Vec, +) { let mut tx_block = None; let mut latest_block = None; let mut passed_txs = txs; while peer.get_burn_block_height() < target_height { - latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); + info!( + "Advancing to block height: {} from {} with {} txs", + target_height, + peer.get_burn_block_height(), + passed_txs.len() + ); + // latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); + latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { tx_block = Some(observer.get_blocks().last().unwrap().clone()); @@ -4257,7 +4269,13 @@ fn advance_to_block_height( } let latest_block = latest_block.expect("Failed to get tip"); let tx_block = tx_block.expect("Failed to get tx block"); - (latest_block, tx_block) + let tx_block_receipts = if test_signers.is_some() { + tx_block.receipts[1..].to_vec() // remove TenureChange + } else { + tx_block.receipts.clone() + }; + // let tx_block_receipts = tx_block.receipts[2..].to_vec(); + (latest_block, tx_block, tx_block_receipts) } #[test] @@ -4456,12 +4474,14 @@ fn stack_agg_increase() { // Advance to next block in order to collect aggregate commit reward index target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + &mut None, + // Some(&mut test_signers), ); // Get Bob's aggregate commit reward index @@ -4602,12 +4622,14 @@ fn stack_agg_increase() { // Advance to next block in order to attempt aggregate increase target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + // &mut test_signers, + &mut None, ); // Fetch the error aggregate increase result & check that the err is ERR_INVALID_SIGNER_KEY @@ -6636,6 +6658,7 @@ pub fn pox_4_scenario_test_setup<'a>( test_name: &str, observer: &'a TestEventObserver, initial_balances: Vec<(PrincipalData, u64)>, + use_nakamoto: bool, ) -> ( TestPeer<'a>, usize, @@ -6643,9 +6666,12 @@ pub fn pox_4_scenario_test_setup<'a>( u128, u128, u128, - u128, TestPeerConfig, + Option, ) { + if use_nakamoto { + return pox_4_scenario_test_setup_nakamoto(test_name, observer, initial_balances); + } // Setup code extracted from your original test let test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); @@ -6702,20 +6728,132 @@ pub fn pox_4_scenario_test_setup<'a>( peer, peer_nonce, burn_block_height, - target_height as u128, reward_cycle as u128, next_reward_cycle as u128, min_ustx as u128, peer_config.clone(), + None, + ) +} + +pub fn pox_4_scenario_test_setup_nakamoto<'a>( + test_name: &str, + observer: &'a TestEventObserver, + initial_balances: Vec<(PrincipalData, u64)>, +) -> ( + TestPeer<'a>, + usize, + u64, + u128, + u128, + u128, + TestPeerConfig, + Option, +) { + let (epochs, pox_constants) = make_test_epochs_pox(true); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let (peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + test_name, + Some(epochs.clone()), + Some(observer), + ); + + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + let test_addr = key_to_stacks_addr(&test_key); + let test_signers = TestSigners::new(vec![test_key.clone()]); + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let addrs: Vec = test_keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let initial_stacker_balance = initial_balances + .get(0) + .expect("Expected at least 1 initial balance") + .1; + let test_stackers = vec![TestStacker { + signer_private_key: test_key.clone(), + stacker_private_key: test_key.clone(), + amount: initial_stacker_balance as u128, + pox_addr: Some(pox_addr_from(&test_key)), + max_amount: None, + }]; + let mut peer_config = TestPeerConfig::default(); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + let mut pox_constants = peer_config.clone().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + + // let balances: Vec<(PrincipalData, u64)> = addrs + // .clone() + // .into_iter() + // .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) + // .collect(); + boot_plan.initial_balances = initial_balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + + info!("---- Booting into Nakamoto Peer ----"); + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; + + let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; + // let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap() as u128; + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); + + info!("Block height: {}", burn_block_height); + + // ( + // burnchain, + // peer, + // keys, + // latest_block, + // block_height, + // coinbase_nonce, + // Some(test_signers), + // ) + ( + peer, + coinbase_nonce, + burn_block_height, + reward_cycle as u128, + reward_cycle.wrapping_add(1), + min_ustx as u128, + peer_config.clone(), + Some(test_signers), ) } +#[apply(nakamoto_cases)] // In this test two solo stacker-signers Alice & Bob sign & stack // for two reward cycles. Alice provides a signature, Bob uses // 'set-signer-key-authorizations' to authorize. Two cycles later, // when no longer stacked, they both try replaying their auths. -#[test] -fn test_scenario_one() { +fn test_scenario_one(use_nakamoto: bool) { // Alice solo stacker-signer setup let mut alice = StackerSignerInfo::new(); // Bob solo stacker-signer setup @@ -6731,12 +6869,24 @@ fn test_scenario_one() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_one", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_one", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add alice and bob to test_signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } // Alice Signatures let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -6863,8 +7013,14 @@ fn test_scenario_one() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -6880,9 +7036,26 @@ fn test_scenario_one() { assert_eq!(first_reward_cycle, next_reward_cycle); assert_eq!(pox_address, bob.pox_address); + info!("Got {} receipts", receipts.clone().len()); + + for receipt in receipts.clone() { + info!("Receipt: {:?}", receipt); + } + + let signer_keys_len = test_signers + .clone() + .map(|t| t.signer_keys.len()) + .unwrap_or(0); + // let signer_keys_len = if let Some(ref test_signers) = test_signers { + // test_signers.signer_keys.len() + // } else { + // 0 + // }; + + info!("Test signers now has {} keys", signer_keys_len); + // 1. Check bob's low authorization transaction - let bob_tx_result_low = tx_block - .receipts + let bob_tx_result_low = receipts .get(1) .unwrap() .result @@ -6892,8 +7065,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_low, Value::Bool(true)); // 2. Check bob's expected authorization transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(2) .unwrap() .result @@ -6903,8 +7075,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_ok, Value::Bool(true)); // 3. Check alice's low stack transaction - let alice_tx_result_err = tx_block - .receipts + let alice_tx_result_err = receipts .get(3) .unwrap() .result @@ -6914,8 +7085,7 @@ fn test_scenario_one() { assert_eq!(alice_tx_result_err, Value::Int(38)); // Get alice's expected stack transaction - let alice_tx_result_ok = tx_block - .receipts + let alice_tx_result_ok = receipts .get(4) .unwrap() .result @@ -6958,8 +7128,7 @@ fn test_scenario_one() { assert_eq!(unlock_height_expected, unlock_height_actual); // 5. Check bob's error stack transaction - let bob_tx_result_err = tx_block - .receipts + let bob_tx_result_err = receipts .get(5) .unwrap() .result @@ -6969,8 +7138,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_err, Value::Int(38)); // Get bob's expected stack transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(6) .unwrap() .result @@ -7044,14 +7212,30 @@ fn test_scenario_one() { bob.nonce += 1; let txs = vec![alice_vote, bob_vote]; - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!("Submitting block with vote transactions"); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height + 1, + &mut test_signers, + ); + info!("Submitting empty block."); + let (latest_block, _tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &vec![], + &mut peer_nonce, + target_height + 2, + &mut test_signers, + ); let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); @@ -7089,12 +7273,17 @@ fn test_scenario_one() { // Commit replay txs & advance to the second burn block of reward cycle 8 (block 162) target_height += 1; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice replay, expect (err 35) - ERR_INVALID_SIGNATURE_PUBKEY - let alice_replay_result = tx_block - .receipts + let alice_replay_result = receipts .get(1) .unwrap() .result @@ -7104,8 +7293,7 @@ fn test_scenario_one() { assert_eq!(alice_replay_result, Value::Int(35)); // Check Bob replay, expect (err 19) - ERR_SIGNER_AUTH_USED - let bob_tx_result = tx_block - .receipts + let bob_tx_result = receipts .get(2) .unwrap() .result @@ -7115,11 +7303,11 @@ fn test_scenario_one() { assert_eq!(bob_tx_result, Value::Int(19)); } +#[apply(nakamoto_cases)] // In this test two solo service signers, Alice & Bob, provide auth // for Carl & Dave, solo stackers. Alice provides a signature for Carl, // Bob uses 'set-signer-key...' for Dave. -#[test] -fn test_scenario_two() { +fn test_scenario_two(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -7141,12 +7329,17 @@ fn test_scenario_two() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_two", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_two", + &observer, + initial_balances, + use_nakamoto, + ); // Alice Signature For Carl let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7250,8 +7443,14 @@ fn test_scenario_two() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Carl Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -7266,8 +7465,7 @@ fn test_scenario_two() { assert_eq!(pox_address, dave.pox_address); // Check Carl's malformed signature stack transaction (err 35 - INVALID_SIGNATURE_PUBKEY) - let carl_tx_result_err = tx_block - .receipts + let carl_tx_result_err = receipts .get(2) .unwrap() .result @@ -7277,8 +7475,7 @@ fn test_scenario_two() { assert_eq!(carl_tx_result_err, Value::Int(35)); // Check Carl's expected stack transaction - let carl_tx_result_ok = tx_block - .receipts + let carl_tx_result_ok = receipts .get(3) .unwrap() .result @@ -7307,8 +7504,7 @@ fn test_scenario_two() { assert_eq!(signer_key_expected, signer_key_actual); // Check Dave's malformed pox stack transaction (err 19 - INVALID_SIGNER_AUTH) - let dave_tx_result_err = tx_block - .receipts + let dave_tx_result_err = receipts .get(4) .unwrap() .result @@ -7318,8 +7514,7 @@ fn test_scenario_two() { assert_eq!(dave_tx_result_err, Value::Int(19)); // Check Dave's expected stack transaction - let dave_tx_result_ok = tx_block - .receipts + let dave_tx_result_ok = receipts .get(5) .unwrap() .result @@ -7408,18 +7603,23 @@ fn test_scenario_two() { bob_vote_expected, ]; - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(1) .unwrap() .result @@ -7429,8 +7629,7 @@ fn test_scenario_two() { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Alice's duplicate vote (err 15 - DUPLICATE_ROUND) - let alice_duplicate_vote = tx_block - .receipts + let alice_duplicate_vote = receipts .get(2) .unwrap() .result @@ -7440,8 +7639,7 @@ fn test_scenario_two() { assert_eq!(alice_duplicate_vote, Value::UInt(15)); // Check Bob's round err vote (err 17 - INVALID_ROUND) - let bob_round_err_vote = tx_block - .receipts + let bob_round_err_vote = receipts .get(3) .unwrap() .result @@ -7451,8 +7649,7 @@ fn test_scenario_two() { assert_eq!(bob_round_err_vote, Value::UInt(17)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(4) .unwrap() .result @@ -7462,10 +7659,10 @@ fn test_scenario_two() { assert_eq!(bob_expected_vote, Value::Bool(true)); } +#[apply(nakamoto_cases)] // In this scenario, two solo stacker-signers (Alice, Bob), one service signer (Carl), // one stacking pool operator (Dave), & three pool stackers (Eve, Frank, Grace). -#[test] -fn test_scenario_three() { +fn test_scenario_three(use_nakamoto: bool) { // Alice stacker signer setup let mut alice = StackerSignerInfo::new(); // Bob stacker signer setup @@ -7496,12 +7693,17 @@ fn test_scenario_three() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_three", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_three", + &observer, + initial_balances, + use_nakamoto, + ); let lock_period = 2; let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7743,8 +7945,14 @@ fn test_scenario_three() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Start of test checks // 1. Check that Alice can't stack with an lock_period different than signature @@ -7937,11 +8145,11 @@ fn test_scenario_three() { assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); } +#[apply(nakamoto_cases)] // In this test scenario two solo stacker-signers (Alice & Bob), // test out the updated stack-extend & stack-increase functions // across multiple cycles. -#[test] -fn test_scenario_four() { +fn test_scenario_four(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -7957,12 +8165,17 @@ fn test_scenario_four() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_four", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_four", + &observer, + initial_balances, + use_nakamoto, + ); // Initial Alice Signature let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -8044,8 +8257,14 @@ fn test_scenario_four() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -8115,8 +8334,14 @@ fn test_scenario_four() { .burnchain .reward_cycle_to_block_height(7 as u64) .wrapping_add(15); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) let alice_err_vote = tx_block @@ -8199,8 +8424,14 @@ fn test_scenario_four() { alice_vote_expected_err.clone(), ]; let target_height = target_height.wrapping_add(1); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) let alice_err_extend = tx_block @@ -9092,8 +9323,8 @@ fn no_lockups_2_5() { // 5. Carl stx-stacks & self-signs for 3 reward cycle // 6. In Carl's second reward cycle, he calls stx-extend for 3 more reward cycles // 7. In Carl's third reward cycle, he calls stx-increase and should fail as he is straddling 2 keys -#[test] -fn test_scenario_five() { +#[apply(nakamoto_cases)] +fn test_scenario_five(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -9136,12 +9367,17 @@ fn test_scenario_five() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, mut peer_config, - ) = pox_4_scenario_test_setup("test_scenario_five", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_five", + &observer, + initial_balances, + use_nakamoto, + ); // Lock periods for each stacker let carl_lock_period = 3; @@ -9400,8 +9636,14 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers have been added to the reward set for (stacker, stacker_lock_period) in davids_stackers { @@ -9481,12 +9723,13 @@ fn test_scenario_five() { .config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9587,8 +9830,14 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers are stacked for (stacker, stacker_lock_period) in davids_stackers { @@ -9673,12 +9922,13 @@ fn test_scenario_five() { .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9787,8 +10037,14 @@ fn test_scenario_five() { (heidi.clone(), heidi_lock_period), ]; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); for (stacker, _) in davids_stackers { let (pox_address, first_reward_cycle, _lock_period, _indices) = From fb4a2fa63eebbd3b5566612e4fc0907a80e724d7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 14 Aug 2024 18:20:03 -0700 Subject: [PATCH 247/910] feat: update (most) pox4 scenario tests --- stackslib/src/burnchains/mod.rs | 2 +- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +- stackslib/src/chainstate/coordinator/mod.rs | 4 +- stackslib/src/chainstate/coordinator/tests.rs | 1 - .../chainstate/nakamoto/coordinator/mod.rs | 10 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 166 +++++++++++------- stackslib/src/net/api/getstackers.rs | 2 +- 7 files changed, 117 insertions(+), 78 deletions(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 07a2f73c101..0bc68897cbd 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -517,7 +517,7 @@ impl PoxConstants { } } - /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. + /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { let reward_cycle_start = self.reward_cycle_to_block_height(first_block_height, reward_cycle); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 909ea46b9f4..3cf13a8a55e 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3908,11 +3908,11 @@ impl<'a> SortitionDBConn<'a> { tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - let reward_cycle_of_prepare_phase = reward_cycle_id.checked_sub(1).ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; - let prepare_phase_start = pox_constants.prepare_phase_start( - first_block_height, - reward_cycle_of_prepare_phase, - ); + let reward_cycle_of_prepare_phase = reward_cycle_id + .checked_sub(1) + .ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; + let prepare_phase_start = + pox_constants.prepare_phase_start(first_block_height, reward_cycle_of_prepare_phase); let first_sortition = get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 60d86996869..72e44f981c0 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -299,7 +299,6 @@ pub trait RewardSetProvider { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result; @@ -374,11 +373,10 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider &self, chainstate: &mut StacksChainState, reward_cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - self.read_reward_set_nakamoto(chainstate, reward_cycle, burnchain, sortdb, block_id, false) + self.read_reward_set_nakamoto(chainstate, reward_cycle, sortdb, block_id, false) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 81167c64623..50127af1760 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -522,7 +522,6 @@ impl RewardSetProvider for StubbedRewardSetProvider { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 31549d22b0a..de884a8d9c9 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -90,7 +90,6 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, debug_log: bool, @@ -547,13 +546,8 @@ pub fn load_nakamoto_reward_set( "cycle_start_height" => %cycle_start_height, "burnchain_height" => %anchor_block_sn.block_height); - let reward_set = provider.get_reward_set_nakamoto( - chain_state, - reward_cycle, - burnchain, - sort_db, - &block_id, - )?; + let reward_set = + provider.get_reward_set_nakamoto(chain_state, reward_cycle, sort_db, &block_id)?; debug!( "Stacks anchor block (ch {}) {} cycle {} is processed", &anchor_block_header.consensus_hash, &block_id, reward_cycle; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f879479d20a..f7f0f211161 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -7210,30 +7210,45 @@ fn test_scenario_one(use_nakamoto: bool) { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![alice_vote, bob_vote]; + let mut txs = vec![alice_vote, bob_vote]; + + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } - let target_reward_cycle = next_reward_cycle; + let target_reward_cycle = next_reward_cycle + 1; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - info!("Submitting block with vote transactions"); + info!( + "Submitting block with vote transactions and advancing to reward cycle {} at block {}", + target_reward_cycle, target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, - target_height + 1, - &mut test_signers, - ); - info!("Submitting empty block."); - let (latest_block, _tx_block, _receipts) = advance_to_block_height( - &mut peer, - &observer, - &vec![], - &mut peer_nonce, - target_height + 2, + target_height, &mut test_signers, ); @@ -7250,7 +7265,7 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.pox_address, lock_period, &alice.public_key, - 161, + target_height, Some(alice_signature.clone()), u128::MAX, 1, @@ -7264,7 +7279,7 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.pox_address, lock_period, &bob.public_key, - 161, + target_height, None, u128::MAX, 3, @@ -7303,10 +7318,10 @@ fn test_scenario_one(use_nakamoto: bool) { assert_eq!(bob_tx_result, Value::Int(19)); } -#[apply(nakamoto_cases)] // In this test two solo service signers, Alice & Bob, provide auth // for Carl & Dave, solo stackers. Alice provides a signature for Carl, // Bob uses 'set-signer-key...' for Dave. +#[apply(nakamoto_cases)] fn test_scenario_two(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); @@ -7341,6 +7356,13 @@ fn test_scenario_two(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } + // Alice Signature For Carl let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let lock_period = 1; @@ -7705,6 +7727,15 @@ fn test_scenario_three(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + ]); + } + let lock_period = 2; let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let alice_signature_for_alice_err = make_signer_key_signature( @@ -7945,7 +7976,7 @@ fn test_scenario_three(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -7956,8 +7987,7 @@ fn test_scenario_three(use_nakamoto: bool) { // Start of test checks // 1. Check that Alice can't stack with an lock_period different than signature - let alice_stack_tx_err = tx_block - .receipts + let alice_stack_tx_err = receipts .get(1) .unwrap() .result @@ -7967,8 +7997,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(alice_stack_tx_err, Value::Int(35)); // 2. Check that Alice can solo stack-sign - let alice_stack_tx_ok = tx_block - .receipts + let alice_stack_tx_ok = receipts .get(2) .unwrap() .result @@ -7997,8 +8026,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(signer_key_expected, signer_key_actual); // 3. Check that Bob can't stack with a signature that points to a reward cycle in the past - let bob_stack_tx_err = tx_block - .receipts + let bob_stack_tx_err = receipts .get(3) .unwrap() .result @@ -8008,8 +8036,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(bob_stack_tx_err, Value::Int(35)); // 4. Check that Bob can solo stack-sign - let bob_stack_tx_ok = tx_block - .receipts + let bob_stack_tx_ok = receipts .get(4) .unwrap() .result @@ -8030,8 +8057,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(signer_key_actual, signer_key_actual); // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period - let eve_delegate_stx_to_david_err = tx_block - .receipts + let eve_delegate_stx_to_david_err = receipts .get(9) .unwrap() .result @@ -8041,8 +8067,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(eve_delegate_stx_to_david_err, Value::Int(21)); // 6. Check that Frank is correctly delegated to David - let frank_delegate_stx_to_david_tx = tx_block - .receipts + let frank_delegate_stx_to_david_tx = receipts .get(10) .unwrap() .result @@ -8071,8 +8096,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(stacker_expected, stacker_actual); // 7. Check that Grace is correctly delegated to David - let grace_delegate_stx_to_david_tx = tx_block - .receipts + let grace_delegate_stx_to_david_tx = receipts .get(11) .unwrap() .result @@ -8101,8 +8125,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(stacker_expected, stacker_actual); // 8. Check that Alice can't delegate-stack if already stacking - let alice_delegate_stx_to_david_err = tx_block - .receipts + let alice_delegate_stx_to_david_err = receipts .get(12) .unwrap() .result @@ -8112,8 +8135,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(alice_delegate_stx_to_david_err, Value::Int(3)); // 9. Check that David can't aggregate-commit-indexed if pointing to a reward cycle in the future - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(13) .unwrap() .result @@ -8123,8 +8145,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); // 10. Check that David can aggregate-commit-indexed if using the incorrect signature topic - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(14) .unwrap() .result @@ -8133,16 +8154,17 @@ fn test_scenario_three(use_nakamoto: bool) { .unwrap(); assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + let david_index = if use_nakamoto { 3 } else { 2 }; + // 11. Check that David can aggregate-commit-indexed successfully, checking stacking index = 2 - let david_aggregate_commit_indexed_ok = tx_block - .receipts + let david_aggregate_commit_indexed_ok = receipts .get(15) .unwrap() .result .clone() .expect_result_ok() .unwrap(); - assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); + assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(david_index)); } #[apply(nakamoto_cases)] @@ -8177,6 +8199,13 @@ fn test_scenario_four(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } + // Initial Alice Signature let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let lock_period = 2; @@ -8193,17 +8222,21 @@ fn test_scenario_four(use_nakamoto: bool) { let alice_signature_extend_err = make_signer_key_signature( &bob.pox_address, &bob.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, 1, ); + info!( + "Generating stack-extend signature for cycle {}", + next_reward_cycle + ); // Extend Alice Signature Expected let alice_signature_extend = make_signer_key_signature( &alice.pox_address, &alice.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, @@ -8322,19 +8355,40 @@ fn test_scenario_four(use_nakamoto: bool) { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![ + let mut txs = vec![ alice_vote_err.clone(), alice_vote_expected.clone(), bob_vote_expected.clone(), ]; + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } + // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) let target_height = peer .config .burnchain - .reward_cycle_to_block_height(7 as u64) - .wrapping_add(15); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + .reward_cycle_to_block_height(next_reward_cycle as u64 + 1) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -8344,8 +8398,7 @@ fn test_scenario_four(use_nakamoto: bool) { ); // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) - let alice_err_vote = tx_block - .receipts + let alice_err_vote = receipts .get(1) .unwrap() .result @@ -8355,8 +8408,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_err_vote, Value::UInt(10)); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(2) .unwrap() .result @@ -8366,8 +8418,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(3) .unwrap() .result @@ -8424,7 +8475,7 @@ fn test_scenario_four(use_nakamoto: bool) { alice_vote_expected_err.clone(), ]; let target_height = target_height.wrapping_add(1); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -8434,8 +8485,7 @@ fn test_scenario_four(use_nakamoto: bool) { ); // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) - let alice_err_extend = tx_block - .receipts + let alice_err_extend = receipts .get(1) .unwrap() .result @@ -8445,8 +8495,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_err_extend, Value::Int(35)); // Check Alice's stack-extend tx - let alice_extend_receipt = tx_block - .receipts + let alice_extend_receipt = receipts .get(2) .unwrap() .result @@ -8455,8 +8504,7 @@ fn test_scenario_four(use_nakamoto: bool) { .unwrap(); // Check Alice's expected err vote (err 14 - DUPLICATE_AGGREGATE_PUBLIC_KEY) - let alice_expected_vote_err = tx_block - .receipts + let alice_expected_vote_err = receipts .get(3) .unwrap() .result diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 0b494d19a0f..1ee61e853a7 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -106,7 +106,7 @@ impl GetStackersResponse { let provider = OnChainRewardSetProvider::new(); let stacker_set = provider - .read_reward_set_nakamoto(chainstate, cycle_number, burnchain, sortdb, tip, true) + .read_reward_set_nakamoto(chainstate, cycle_number, sortdb, tip, true) .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) From 993d55b2d97044018f9e01771cf415860c1a7f85 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 09:54:32 -0400 Subject: [PATCH 248/910] WIP: missing stackerdb messages Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 4 ++- stackslib/src/net/stackerdb/mod.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 32 ++++++++++----------- testnet/stacks-node/src/tests/signer/v0.rs | 33 ++++++++++++---------- 4 files changed, 37 insertions(+), 34 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 117a8c4912e..f51d7965de0 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,7 +88,9 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1 + BlockPushed = 1, + /// Mock Miner Message from the miner + MockMinerMessage = 2 }); impl MessageSlotIDTrait for MessageSlotID { diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 847363b2e31..bfbb6e0a105 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -152,7 +152,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 2; +pub const MINER_SLOT_COUNT: u32 = 3; /// Final result of synchronizing state with a remote set of DB replicas #[derive(Clone)] diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 43eb1144141..19d8bb966fa 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2288,11 +2288,8 @@ impl BlockMinerThread { let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - let slot_id = MinerSlotID::BlockProposal.to_u8().into(); - if let Ok(messages) = - miners_stackerdb.get_latest_chunks(&[slot_id, slot_id * MINER_SLOT_COUNT]) - { - debug!("Miner got messages: {:?}", messages.len()); + let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); + if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { for message in messages { if let Some(message) = message { if message.is_empty() { @@ -2303,7 +2300,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.peer_info.burn_block_height == self.burn_block.block_height { + if miner_message.tenure_burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2368,15 +2365,6 @@ impl BlockMinerThread { server_version, }; - info!("Sending mock miner message in response to mock signatures for burn block {:?}", &self.burn_block.block_height; - "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_info.stacks_tip.clone(), - "peer_burn_block_height" => peer_info.burn_block_height, - "pox_consensus" => ?peer_info.pox_consensus.clone(), - "server_version" => peer_info.server_version.clone(), - "chain_id" => self.config.burnchain.chain_id, - "num_mock_signatures" => mock_signatures.len(), - ); let message = MockMinerMessage { peer_info, chain_id: self.config.burnchain.chain_id, @@ -2384,13 +2372,23 @@ impl BlockMinerThread { tenure_burn_block_height: self.burn_block.block_height, }; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.tenure_burn_block_height; + "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?message.peer_info.stacks_tip.clone(), + "peer_burn_block_height" => message.peer_info.burn_block_height, + "pox_consensus" => ?message.peer_info.pox_consensus.clone(), + "server_version" => message.peer_info.server_version.clone(), + "chain_id" => message.chain_id, + "num_mock_signatures" => message.mock_signatures.len(), + ); + if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), &burn_db, &self.burn_block, &stackerdbs, - SignerMessage::MockMinerMessage(message), - MinerSlotID::BlockProposal, // We are sending a mock miner message NOT a block proposal, but since we do not propose blocks in epoch 2.5, it is fine + SignerMessage::MockMinerMessage(message.clone()), + MinerSlotID::MockMinerMessage, self.config.is_mainnet(), &mut miners_stackerdb, &self.burn_block.consensus_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9034a8a5235..0f948982448 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -2408,6 +2408,7 @@ fn mock_miner_message_epoch_25() { .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; let epoch_3_start_height = epoch_3.start_height; + debug!("Epoch 3.0 starts at height {}", epoch_3_start_height); signer_test.boot_to_epoch_25_reward_cycle(); @@ -2447,20 +2448,22 @@ fn mock_miner_message_epoch_25() { }) .flatten() { - if chunk.slot_id == MinerSlotID::BlockProposal.to_u8() as u32 { - if chunk.data.is_empty() { - continue; - } - let SignerMessage::MockMinerMessage(message) = - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize MockMinerMessage") - else { - continue; - }; - if message.peer_info.burn_block_height == current_burn_block_height { - mock_miner_message = Some(message); - break; - } + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockMinerMessage(message) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if message.tenure_burn_block_height == current_burn_block_height { + mock_miner_message = Some(message); + break; + } else { + info!( + "Received MockMinerMessage for burn block height {} but expected {current_burn_block_height}", message.tenure_burn_block_height + ); } } assert!( From d93d07da0068e92da97226ae0a52d086be0d1ac8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 10:31:40 -0400 Subject: [PATCH 249/910] WIP: use latest election winner to send mock miner messages Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 19d8bb966fa..b88ec5ffe24 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2381,6 +2381,15 @@ impl BlockMinerThread { "chain_id" => message.chain_id, "num_mock_signatures" => message.mock_signatures.len(), ); + let (_, miners_info) = + NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; + + // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway + let idx = miners_info.get_latest_winner_index(); + let sortitions = miners_info.get_sortitions(); + let election_sortition = *sortitions + .get(idx as usize) + .expect("FATAL: latest winner index out of bounds"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), @@ -2391,7 +2400,7 @@ impl BlockMinerThread { MinerSlotID::MockMinerMessage, self.config.is_mainnet(), &mut miners_stackerdb, - &self.burn_block.consensus_hash, + &election_sortition, ) { warn!("Failed to send mock miner message: {:?}", &e); } From b597a119cab6c014e59cf4abd80d707ad5eff269 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 11:42:07 -0400 Subject: [PATCH 250/910] WIP: stuck at 250 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 0f948982448..2fe0cf934c2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2422,10 +2422,6 @@ fn mock_miner_message_epoch_25() { .get_headers_height() < epoch_3_start_height { - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); let mock_poll_time = Instant::now(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -2433,6 +2429,10 @@ fn mock_miner_message_epoch_25() { || Ok(true), ) .unwrap(); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); while mock_miner_message.is_none() { From 37a2533fcae14f2d61655940925e1ae2f60f5c2b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 12:32:58 -0400 Subject: [PATCH 251/910] WIP: need to fix stacks tip consensus hash and stacks tip Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2fe0cf934c2..74790bd15d2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2407,8 +2407,7 @@ fn mock_miner_message_epoch_25() { .clone() .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_start_height = epoch_3.start_height; - debug!("Epoch 3.0 starts at height {}", epoch_3_start_height); + let epoch_3_boundary = epoch_3.start_height - 1; signer_test.boot_to_epoch_25_reward_cycle(); @@ -2416,11 +2415,12 @@ fn mock_miner_message_epoch_25() { let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); let main_poll_time = Instant::now(); let mut mock_miner_message = None; + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. while signer_test .running_nodes .btc_regtest_controller .get_headers_height() - < epoch_3_start_height + < epoch_3_boundary { let mock_poll_time = Instant::now(); next_block_and( From be1f6ed8c2d1e5cc905d5e6fe76ec71db0794bc3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 13:44:59 -0400 Subject: [PATCH 252/910] Fix consensus hash and stacks tip in MockMinerMessage Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 6 ---- testnet/stacks-node/src/neon_node.rs | 36 +++++++++++++--------- testnet/stacks-node/src/tests/signer/v0.rs | 6 +--- 3 files changed, 23 insertions(+), 25 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f51d7965de0..40a679d0f85 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -459,8 +459,6 @@ impl StacksMessageCodec for MockSignature { pub struct MockMinerMessage { /// The view of the stacks node peer information at the time of the mock signature pub peer_info: PeerInfo, - /// The burn block height of the miner's tenure - pub tenure_burn_block_height: u64, /// The chain id for the mock signature pub chain_id: u32, /// The mock signatures that the miner received @@ -470,7 +468,6 @@ pub struct MockMinerMessage { impl StacksMessageCodec for MockMinerMessage { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.tenure_burn_block_height)?; write_next(fd, &self.chain_id)?; write_next(fd, &self.mock_signatures)?; Ok(()) @@ -478,12 +475,10 @@ impl StacksMessageCodec for MockMinerMessage { fn consensus_deserialize(fd: &mut R) -> Result { let peer_info = PeerInfo::consensus_deserialize(fd)?; - let tenure_burn_block_height = read_next::(fd)?; let chain_id = read_next::(fd)?; let mock_signatures = read_next::, _>(fd)?; Ok(Self { peer_info, - tenure_burn_block_height, chain_id, mock_signatures, }) @@ -1002,7 +997,6 @@ mod test { }; let mock_miner_message = MockMinerMessage { peer_info: random_peer_data(), - tenure_burn_block_height: thread_rng().next_u64(), chain_id: thread_rng().gen_range(0..=1), mock_signatures: vec![mock_signature_1, mock_signature_2], }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b88ec5ffe24..c4880b3980c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -194,6 +194,7 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_S use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; +use stacks::types::StacksEpoch; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; @@ -2273,12 +2274,20 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let p2p_net = StacksNode::setup_peer_network( - &self.config, - &self.config.atlas, - self.burnchain.clone(), - ); - let epoch_id = p2p_net.get_current_epoch().epoch_id; + let epochs = SortitionDB::get_stacks_epochs(burn_db.conn()) + .expect("Error while loading stacks epochs"); + let epoch_index = StacksEpoch::find_epoch(&epochs, self.burn_block.block_height) + .unwrap_or_else(|| { + panic!( + "BUG: block {} is not in a known epoch", + self.burn_block.block_height + ) + }); + let epoch_id = epochs + .get(epoch_index) + .expect("BUG: no epoch at found index") + .epoch_id; + if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "epoch_id" => epoch_id.to_string() @@ -2300,7 +2309,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.tenure_burn_block_height == self.burn_block.block_height { + if miner_message.peer_info.burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2350,11 +2359,11 @@ impl BlockMinerThread { .or(option_env!("CARGO_PKG_VERSION")) .unwrap_or("0.0.0.0"), ); - let stacks_tip_height = p2p_net.stacks_tip.height; - let stacks_tip = p2p_net.stacks_tip.block_hash.clone(); - let stacks_tip_consensus_hash = p2p_net.stacks_tip.consensus_hash.clone(); - let pox_consensus = p2p_net.burnchain_tip.consensus_hash.clone(); - let burn_block_height = p2p_net.chain_view.burn_block_height; + let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; + let stacks_tip = self.burn_block.canonical_stacks_tip_hash; + let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; + let pox_consensus = self.burn_block.consensus_hash; + let burn_block_height = self.burn_block.block_height; let peer_info = PeerInfo { burn_block_height, @@ -2369,10 +2378,9 @@ impl BlockMinerThread { peer_info, chain_id: self.config.burnchain.chain_id, mock_signatures, - tenure_burn_block_height: self.burn_block.block_height, }; - info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.tenure_burn_block_height; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.peer_info.burn_block_height; "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), "stacks_tip" => ?message.peer_info.stacks_tip.clone(), "peer_burn_block_height" => message.peer_info.burn_block_height, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 74790bd15d2..631d92c83cd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2457,13 +2457,9 @@ fn mock_miner_message_epoch_25() { else { continue; }; - if message.tenure_burn_block_height == current_burn_block_height { + if message.peer_info.burn_block_height == current_burn_block_height { mock_miner_message = Some(message); break; - } else { - info!( - "Received MockMinerMessage for burn block height {} but expected {current_burn_block_height}", message.tenure_burn_block_height - ); } } assert!( From 666119458c246822a7dca276b87fd0ddbb87ac24 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 13:49:49 -0400 Subject: [PATCH 253/910] CRC: get sort db from SortitionDB Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 4 +--- stackslib/src/net/stackerdb/mod.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 18 +++--------------- 3 files changed, 5 insertions(+), 19 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 40a679d0f85..779497b196d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,9 +88,7 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1, - /// Mock Miner Message from the miner - MockMinerMessage = 2 + BlockPushed = 1 }); impl MessageSlotIDTrait for MessageSlotID { diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index bfbb6e0a105..847363b2e31 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -152,7 +152,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 3; +pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas #[derive(Clone)] diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c4880b3980c..818a2cf00be 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -194,7 +194,6 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_S use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; -use stacks::types::StacksEpoch; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; @@ -2274,20 +2273,9 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let epochs = SortitionDB::get_stacks_epochs(burn_db.conn()) - .expect("Error while loading stacks epochs"); - let epoch_index = StacksEpoch::find_epoch(&epochs, self.burn_block.block_height) - .unwrap_or_else(|| { - panic!( - "BUG: block {} is not in a known epoch", - self.burn_block.block_height - ) - }); - let epoch_id = epochs - .get(epoch_index) - .expect("BUG: no epoch at found index") + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + .expect("FATAL: no epoch defined") .epoch_id; - if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "epoch_id" => epoch_id.to_string() @@ -2405,7 +2393,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockMinerMessage(message.clone()), - MinerSlotID::MockMinerMessage, + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, From 22ea25684d50a45bda94a91c23b73f99edf88572 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:32:33 -0400 Subject: [PATCH 254/910] CRC: simulate block proposal, signatures, and appending a block in mock signing Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 315 ++++++++++-------- stacks-signer/src/v0/signer.rs | 61 ++-- .../src/nakamoto_node/sign_coordinator.rs | 10 +- testnet/stacks-node/src/neon_node.rs | 246 ++++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 220 ++---------- 5 files changed, 374 insertions(+), 478 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 779497b196d..b767431c60b 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -77,9 +77,7 @@ define_u8_enum!( /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1, - /// Mock Signature message from Epoch 2.5 signers - MockSignature = 2 + BlockResponse = 1 }); define_u8_enum!( @@ -115,10 +113,12 @@ SignerMessageTypePrefix { BlockResponse = 1, /// Block Pushed message from miners BlockPushed = 2, - /// Mock Signature message from Epoch 2.5 signers - MockSignature = 3, - /// Mock Pre-Nakamoto message from Epoch 2.5 miners - MockMinerMessage = 4 + /// Mock block proposal message from Epoch 2.5 miners + MockProposal = 3, + /// Mock block signature message from Epoch 2.5 signers + MockSignature = 4, + /// Mock block message from Epoch 2.5 miners + MockBlock = 5 }); #[cfg_attr(test, mutants::skip)] @@ -161,8 +161,9 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, + SignerMessage::MockProposal(_) => SignerMessageTypePrefix::MockProposal, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, - SignerMessage::MockMinerMessage(_) => SignerMessageTypePrefix::MockMinerMessage, + SignerMessage::MockBlock(_) => SignerMessageTypePrefix::MockBlock, } } } @@ -179,7 +180,9 @@ pub enum SignerMessage { /// A mock signature from the epoch 2.5 signers MockSignature(MockSignature), /// A mock message from the epoch 2.5 miners - MockMinerMessage(MockMinerMessage), + MockProposal(MockProposal), + /// A mock block from the epoch 2.5 miners + MockBlock(MockBlock), } impl SignerMessage { @@ -189,9 +192,11 @@ impl SignerMessage { #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> Option { match self { - Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockMinerMessage(_) => None, - Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), - Self::MockSignature(_) => Some(MessageSlotID::MockSignature), + Self::BlockProposal(_) + | Self::BlockPushed(_) + | Self::MockProposal(_) + | Self::MockBlock(_) => None, + Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature uses the same slot as block response since its exclusively for epoch 2.5 testing } } } @@ -206,7 +211,8 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), - SignerMessage::MockMinerMessage(message) => message.consensus_serialize(fd), + SignerMessage::MockProposal(message) => message.consensus_serialize(fd), + SignerMessage::MockBlock(block) => block.consensus_serialize(fd), }?; Ok(()) } @@ -228,13 +234,17 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockPushed(block) } + SignerMessageTypePrefix::MockProposal => { + let message = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockProposal(message) + } SignerMessageTypePrefix::MockSignature => { let signature = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockSignature(signature) } - SignerMessageTypePrefix::MockMinerMessage => { - let message = StacksMessageCodec::consensus_deserialize(fd)?; - SignerMessage::MockMinerMessage(message) + SignerMessageTypePrefix::MockBlock => { + let block = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockBlock(block) } }; Ok(message) @@ -305,110 +315,75 @@ impl StacksMessageCodec for PeerInfo { } } -/// A snapshot of the signer view of the stacks node to be used for mock signing. +/// A mock block proposal for Epoch 2.5 mock signing #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignData { - /// The view of the stacks node peer information at the time of the mock signature +pub struct MockProposal { + /// The view of the stacks node peer information at the time of the mock proposal pub peer_info: PeerInfo, - /// The burn block height of the event that triggered the mock signature - pub event_burn_block_height: u64, - /// The chain id for the mock signature + /// The chain id for the mock proposal pub chain_id: u32, -} - -impl StacksMessageCodec for MockSignData { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.event_burn_block_height)?; - write_next(fd, &self.chain_id)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let peer_info = PeerInfo::consensus_deserialize(fd)?; - let event_burn_block_height = read_next::(fd)?; - let chain_id = read_next::(fd)?; - Ok(Self { - peer_info, - event_burn_block_height, - chain_id, - }) - } -} - -/// A mock signature for the stacks node to be used for mock signing. -/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignature { - /// The signature of the mock signature + /// The miner's signature across the peer info signature: MessageSignature, - /// The data that was signed across - pub sign_data: MockSignData, } -impl MockSignature { - /// Create a new mock sign data struct from the provided event burn block height, peer info, chain id, and private key. - /// Note that peer burn block height and event burn block height may not be the same if the peer view is stale. - pub fn new( - event_burn_block_height: u64, - peer_info: PeerInfo, - chain_id: u32, - stacks_private_key: &StacksPrivateKey, - ) -> Self { +impl MockProposal { + /// Create a new mock proposal data struct from the provided peer info, chain id, and private key. + pub fn new(peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - sign_data: MockSignData { - peer_info, - event_burn_block_height, - chain_id, - }, + chain_id, + peer_info, }; sig.sign(stacks_private_key) - .expect("Failed to sign MockSignature"); + .expect("Failed to sign MockProposal"); sig } - /// The signature hash for the mock signature - pub fn signature_hash(&self) -> Sha256Sum { - let domain_tuple = - make_structured_data_domain("mock-signer", "1.0.0", self.sign_data.chain_id); + /// The signature hash for the mock proposal + pub fn miner_signature_hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("mock-miner", "1.0.0", self.chain_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( "stacks-tip-consensus-hash".into(), - Value::buff_from( - self.sign_data - .peer_info - .stacks_tip_consensus_hash - .as_bytes() - .into(), - ) - .unwrap(), + Value::buff_from(self.peer_info.stacks_tip_consensus_hash.as_bytes().into()) + .unwrap(), ), ( "stacks-tip".into(), - Value::buff_from(self.sign_data.peer_info.stacks_tip.as_bytes().into()) - .unwrap(), + Value::buff_from(self.peer_info.stacks_tip.as_bytes().into()).unwrap(), ), ( "stacks-tip-height".into(), - Value::UInt(self.sign_data.peer_info.stacks_tip_height.into()), + Value::UInt(self.peer_info.stacks_tip_height.into()), ), ( "server-version".into(), - Value::string_ascii_from_bytes( - self.sign_data.peer_info.server_version.clone().into(), - ) - .unwrap(), + Value::string_ascii_from_bytes(self.peer_info.server_version.clone().into()) + .unwrap(), + ), + ( + "pox-consensus".into(), + Value::buff_from(self.peer_info.pox_consensus.as_bytes().into()).unwrap(), ), + ]) + .expect("Error creating signature hash"), + ); + structured_data_message_hash(data_tuple, domain_tuple) + } + + /// The signature hash including the miner's signature. Used by signers. + fn signer_signature_hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("mock-signer", "1.0.0", self.chain_id); + let data_tuple = Value::Tuple( + TupleData::from_data(vec![ ( - "event-burn-block-height".into(), - Value::UInt(self.sign_data.event_burn_block_height.into()), + "miner-signature-hash".into(), + Value::buff_from(self.miner_signature_hash().as_bytes().into()).unwrap(), ), ( - "pox-consensus".into(), - Value::buff_from(self.sign_data.peer_info.pox_consensus.as_bytes().into()) - .unwrap(), + "miner-signature".into(), + Value::buff_from(self.signature.as_bytes().into()).unwrap(), ), ]) .expect("Error creating signature hash"), @@ -416,18 +391,79 @@ impl MockSignature { structured_data_message_hash(data_tuple, domain_tuple) } + /// Sign the mock proposal and set the internal signature field + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.miner_signature_hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; + Ok(()) + } + /// Verify the mock proposal against the provided miner public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); + } + let signature_hash = self.miner_signature_hash(); + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } +} + +impl StacksMessageCodec for MockProposal { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.signature)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let chain_id = read_next::(fd)?; + let signature = read_next::(fd)?; + Ok(Self { + peer_info, + chain_id, + signature, + }) + } +} + +/// A mock signature for the stacks node to be used for mock signing. +/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignature { + /// The signer's signature across the mock proposal + signature: MessageSignature, + /// The mock block proposal that was signed across + pub mock_proposal: MockProposal, +} + +impl MockSignature { + /// Create a new mock signature from the provided proposal and signer private key. + pub fn new(mock_proposal: MockProposal, stacks_private_key: &StacksPrivateKey) -> Self { + let mut sig = Self { + signature: MessageSignature::empty(), + mock_proposal, + }; + sig.sign(stacks_private_key) + .expect("Failed to sign MockSignature"); + sig + } + /// Sign the mock signature and set the internal signature field fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { - let signature_hash = self.signature_hash(); + let signature_hash = self.mock_proposal.signer_signature_hash(); self.signature = private_key.sign(signature_hash.as_bytes())?; Ok(()) } - /// Verify the mock signature against the provided public key + + /// Verify the mock signature against the provided signer public key pub fn verify(&self, public_key: &StacksPublicKey) -> Result { if self.signature == MessageSignature::empty() { return Ok(false); } - let signature_hash = self.signature_hash(); + let signature_hash = self.mock_proposal.signer_signature_hash(); public_key .verify(&signature_hash.0, &self.signature) .map_err(|e| e.to_string()) @@ -437,47 +473,41 @@ impl MockSignature { impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; - self.sign_data.consensus_serialize(fd)?; + self.mock_proposal.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; - let sign_data = read_next::(fd)?; + let mock_proposal = MockProposal::consensus_deserialize(fd)?; Ok(Self { signature, - sign_data, + mock_proposal, }) } } -/// A mock message for the stacks node to be used for mock mining messages -/// This is only used by Epoch 2.5 miners to simulate miners responding to mock signatures +/// The mock block data for epoch 2.5 miners to broadcast to simulate block signing #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockMinerMessage { - /// The view of the stacks node peer information at the time of the mock signature - pub peer_info: PeerInfo, - /// The chain id for the mock signature - pub chain_id: u32, +pub struct MockBlock { + /// The mock proposal that was signed across + pub mock_proposal: MockProposal, /// The mock signatures that the miner received pub mock_signatures: Vec, } -impl StacksMessageCodec for MockMinerMessage { +impl StacksMessageCodec for MockBlock { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.chain_id)?; + self.mock_proposal.consensus_serialize(fd)?; write_next(fd, &self.mock_signatures)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { - let peer_info = PeerInfo::consensus_deserialize(fd)?; - let chain_id = read_next::(fd)?; + let mock_proposal = MockProposal::consensus_deserialize(fd)?; let mock_signatures = read_next::, _>(fd)?; Ok(Self { - peer_info, - chain_id, + mock_proposal, mock_signatures, }) } @@ -781,6 +811,7 @@ mod test { use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; use clarity::util::secp256k1::MessageSignature; + use rand::rngs::mock; use rand::{thread_rng, Rng, RngCore}; use rand_core::OsRng; use stacks_common::bitvec::BitVec; @@ -910,7 +941,7 @@ mod test { pox_consensus: ConsensusHash([pox_consensus_byte; 20]), } } - fn random_mock_sign_data() -> MockSignData { + fn random_mock_proposal() -> MockProposal { let chain_byte: u8 = thread_rng().gen_range(0..=1); let chain_id = if chain_byte == 1 { CHAIN_ID_TESTNET @@ -918,25 +949,23 @@ mod test { CHAIN_ID_MAINNET }; let peer_info = random_peer_data(); - MockSignData { + MockProposal { peer_info, - event_burn_block_height: thread_rng().next_u64(), chain_id, + signature: MessageSignature::empty(), } } #[test] - fn verify_sign_mock_signature() { + fn verify_sign_mock_proposal() { let private_key = StacksPrivateKey::new(); let public_key = StacksPublicKey::from_private(&private_key); let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let mut mock_signature = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; + let mut mock_signature = random_mock_proposal(); + mock_signature.sign(&private_key).unwrap(); assert!(!mock_signature .verify(&public_key) .expect("Failed to verify MockSignature")); @@ -962,12 +991,25 @@ mod test { assert_eq!(peer_data, deserialized_data); } + #[test] + fn serde_mock_proposal() { + let mut mock_signature = random_mock_proposal(); + mock_signature.sign(&StacksPrivateKey::new()).unwrap(); + let serialized_signature = mock_signature.serialize_to_vec(); + let deserialized_signature = read_next::(&mut &serialized_signature[..]) + .expect("Failed to deserialize MockSignature"); + assert_eq!(mock_signature, deserialized_signature); + } + #[test] fn serde_mock_signature() { - let mock_signature = MockSignature { + let mut mock_signature = MockSignature { signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), + mock_proposal: random_mock_proposal(), }; + mock_signature + .sign(&StacksPrivateKey::new()) + .expect("Failed to sign MockSignature"); let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) .expect("Failed to deserialize MockSignature"); @@ -975,32 +1017,17 @@ mod test { } #[test] - fn serde_sign_data() { - let sign_data = random_mock_sign_data(); - let serialized_data = sign_data.serialize_to_vec(); - let deserialized_data = read_next::(&mut &serialized_data[..]) - .expect("Failed to deserialize MockSignData"); - assert_eq!(sign_data, deserialized_data); - } - - #[test] - fn serde_mock_miner_message() { - let mock_signature_1 = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; - let mock_signature_2 = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; - let mock_miner_message = MockMinerMessage { - peer_info: random_peer_data(), - chain_id: thread_rng().gen_range(0..=1), + fn serde_mock_block() { + let mock_proposal = random_mock_proposal(); + let mock_signature_1 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); + let mock_signature_2 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); + let mock_block = MockBlock { + mock_proposal, mock_signatures: vec![mock_signature_1, mock_signature_2], }; - let serialized_data = mock_miner_message.serialize_to_vec(); - let deserialized_data = read_next::(&mut &serialized_data[..]) + let serialized_data = mock_block.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) .expect("Failed to deserialize MockSignData"); - assert_eq!(mock_miner_message, deserialized_data); + assert_eq!(mock_block, deserialized_data); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c32af06f3fd..64622646e3c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,13 +16,12 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockResponse, MessageSlotID, MockSignature, RejectCode, SignerMessage, + BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; @@ -140,6 +139,25 @@ impl SignerTrait for Signer { "push_result" => ?block_push_result, ); } + SignerMessage::MockProposal(mock_proposal) => { + let epoch = match stacks_client.get_node_epoch() { + Ok(epoch) => epoch, + Err(e) => { + warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); + return; + } + }; + debug!("{self}: received a mock block proposal."; + "current_reward_cycle" => current_reward_cycle, + "epoch" => ?epoch + ); + if epoch == StacksEpochId::Epoch25 + && self.reward_cycle == current_reward_cycle + { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + self.mock_sign(mock_proposal.clone()); + } + } _ => {} } } @@ -165,22 +183,6 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - let epoch = match stacks_client.get_node_epoch() { - Ok(epoch) => epoch, - Err(e) => { - warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); - return; - } - }; - debug!("{self}: Epoch 2.5 signer received a new burn block event."; - "burn_height" => burn_height, - "current_reward_cycle" => current_reward_cycle, - "epoch" => ?epoch - ); - if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. - self.mock_sign(*burn_height, stacks_client); - } } } } @@ -482,26 +484,9 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { - let Ok(peer_info) = stacks_client.get_peer_info() else { - warn!("{self}: Failed to get peer info. Cannot mock sign."); - return; - }; - let chain_id = if self.mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET - }; - info!("Mock signing for burn block {burn_block_height:?}"; - "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_info.stacks_tip.clone(), - "peer_burn_block_height" => peer_info.burn_block_height, - "pox_consensus" => ?peer_info.pox_consensus.clone(), - "server_version" => peer_info.server_version.clone(), - "chain_id" => chain_id - ); - let mock_signature = - MockSignature::new(burn_block_height, peer_info, chain_id, &self.private_key); + fn mock_sign(&mut self, mock_proposal: MockProposal) { + info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); + let mock_signature = MockSignature::new(mock_proposal, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b366d931320..b266d700d44 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -770,12 +770,10 @@ impl SignCoordinator { debug!("Received block pushed message. Ignoring."); continue; } - SignerMessageV0::MockSignature(_) => { - debug!("Received mock signature message. Ignoring."); - continue; - } - SignerMessageV0::MockMinerMessage(_) => { - debug!("Received mock miner message. Ignoring."); + SignerMessageV0::MockSignature(_) + | SignerMessageV0::MockProposal(_) + | SignerMessageV0::MockBlock(_) => { + debug!("Received mock message. Ignoring."); continue; } }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 818a2cf00be..fc5e0d80553 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -144,7 +144,7 @@ use std::io::{Read, Write}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{fs, mem, thread}; use clarity::boot_util::boot_code_id; @@ -152,7 +152,7 @@ use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ - MessageSlotID, MinerSlotID, MockMinerMessage, PeerInfo, SignerMessage, + MessageSlotID, MinerSlotID, MockBlock, MockProposal, MockSignature, PeerInfo, SignerMessage, }; use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; @@ -2262,26 +2262,79 @@ impl BlockMinerThread { return false; } - /// Read any mock signatures from stackerdb and respond to them - pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { - let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto mock miner messaging is disabled"); - return Ok(()); + /// Only used in mock signing to generate a peer info view + fn generate_peer_info(&self) -> PeerInfo { + // Create a peer info view of the current state + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; + let stacks_tip = self.burn_block.canonical_stacks_tip_hash; + let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; + let pox_consensus = self.burn_block.consensus_hash; + let burn_block_height = self.burn_block.block_height; + + PeerInfo { + burn_block_height, + stacks_tip_consensus_hash, + stacks_tip, + stacks_tip_height, + pox_consensus, + server_version, } + } - let burn_db_path = self.config.get_burn_db_file_path(); - let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) - .expect("FATAL: could not open sortition DB"); - let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? - .expect("FATAL: no epoch defined") - .epoch_id; - if epoch_id != StacksEpochId::Epoch25 { - debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; - "epoch_id" => epoch_id.to_string() - ); - return Ok(()); + /// Only used in mock signing to retrieve the mock signatures for the given mock proposal + fn wait_for_mock_signatures( + &self, + mock_proposal: &MockProposal, + stackerdbs: &StackerDBs, + timeout: Duration, + ) -> Result, ChainstateError> { + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("BUG: block commit exists before first block height"); + let signers_contract_id = MessageSlotID::BlockResponse + .stacker_db_contract(self.config.is_mainnet(), reward_cycle); + let slot_ids: Vec<_> = stackerdbs + .get_signers(&signers_contract_id) + .expect("FATAL: could not get signers from stacker DB") + .into_iter() + .enumerate() + .map(|(slot_id, _)| { + u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") + }) + .collect(); + let mock_poll_start = Instant::now(); + let mut mock_signatures = vec![]; + // Because we don't care really if all signers reach quorum and this is just for testing purposes, + // we don't need to wait for ALL signers to sign the mock proposal and should not slow down mining too much + // Just wait a min amount of time for the mock signatures to come in + while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { + let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; + for chunk in chunks { + if let Some(chunk) = chunk { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) + { + mock_signatures.push(mock_signature); + } + } + } + } } + Ok(mock_signatures) + } + + /// Only used in mock signing to determine if the peer info view was already signed across + fn mock_block_exists(&self, peer_info: &PeerInfo) -> bool { let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); @@ -2292,113 +2345,110 @@ impl BlockMinerThread { if message.is_empty() { continue; } - let Ok(SignerMessage::MockMinerMessage(miner_message)) = + let Ok(SignerMessage::MockBlock(mock_block)) = SignerMessage::consensus_deserialize(&mut message.as_slice()) else { continue; }; - if miner_message.peer_info.burn_block_height == self.burn_block.block_height { - debug!( - "Already sent mock miner message for tenure burn block height {:?}", - self.burn_block.block_height - ); - return Ok(()); + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } } - // Retrieve any MockSignatures from stackerdb - let mut mock_signatures = Vec::new(); - let reward_cycle = self - .burnchain - .block_height_to_reward_cycle(self.burn_block.block_height) - .expect("BUG: block commit exists before first block height"); - let signers_contract_id = MessageSlotID::MockSignature - .stacker_db_contract(self.config.is_mainnet(), reward_cycle); - // Get the slots for every signer - let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; - let slot_ids: Vec<_> = stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB") - .into_iter() - .enumerate() - .map(|(slot_id, _)| { - u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") - }) - .collect(); - let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) - { - if mock_signature.sign_data.event_burn_block_height - == self.burn_block.block_height - { - mock_signatures.push(mock_signature); - } - } - } + false + } + + /// Read any mock signatures from stackerdb and respond to them + pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { + let miner_config = self.config.get_miner_config(); + if !miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto mock miner messaging is disabled"); + return Ok(()); } - let server_version = version_string( - "stacks-node", - option_env!("STACKS_NODE_VERSION") - .or(option_env!("CARGO_PKG_VERSION")) - .unwrap_or("0.0.0.0"), - ); - let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; - let stacks_tip = self.burn_block.canonical_stacks_tip_hash; - let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; - let pox_consensus = self.burn_block.consensus_hash; - let burn_block_height = self.burn_block.block_height; + let burn_db_path = self.config.get_burn_db_file_path(); + let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + .expect("FATAL: no epoch defined") + .epoch_id; + if epoch_id != StacksEpochId::Epoch25 { + debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; + "epoch_id" => epoch_id.to_string() + ); + return Ok(()); + } - let peer_info = PeerInfo { - burn_block_height, - stacks_tip_consensus_hash, - stacks_tip, - stacks_tip_height, - pox_consensus, - server_version, - }; + let mining_key = miner_config + .mining_key + .expect("Cannot mock sign without mining key"); - let message = MockMinerMessage { - peer_info, - chain_id: self.config.burnchain.chain_id, - mock_signatures, - }; + // Create a peer info view of the current state + let peer_info = self.generate_peer_info(); + if self.mock_block_exists(&peer_info) { + debug!( + "Already sent mock miner block proposal for current peer info view. Not sending another mock proposal." + ); + return Ok(()); + } - info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.peer_info.burn_block_height; - "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?message.peer_info.stacks_tip.clone(), - "peer_burn_block_height" => message.peer_info.burn_block_height, - "pox_consensus" => ?message.peer_info.pox_consensus.clone(), - "server_version" => message.peer_info.server_version.clone(), - "chain_id" => message.chain_id, - "num_mock_signatures" => message.mock_signatures.len(), - ); + // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; let (_, miners_info) = NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; - - // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway let idx = miners_info.get_latest_winner_index(); let sortitions = miners_info.get_sortitions(); let election_sortition = *sortitions .get(idx as usize) .expect("FATAL: latest winner index out of bounds"); + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + let mock_proposal = + MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); + + info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); + + if let Err(e) = SignCoordinator::send_miners_message( + &mining_key, + &burn_db, + &self.burn_block, + &stackerdbs, + SignerMessage::MockProposal(mock_proposal.clone()), + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages. We use BlockProposal for MockProposal as well. + self.config.is_mainnet(), + &mut miners_stackerdb, + &election_sortition, + ) { + warn!("Failed to send mock proposal to stackerdb: {:?}", &e); + return Ok(()); + } + + // Retrieve any MockSignatures from stackerdb + let mock_signatures = + self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; + + let mock_block = MockBlock { + mock_proposal, + mock_signatures, + }; + + info!("Sending mock block to stackerdb: {mock_block:?}"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), &burn_db, &self.burn_block, &stackerdbs, - SignerMessage::MockMinerMessage(message.clone()), - MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages + SignerMessage::MockBlock(mock_block.clone()), + MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, ) { - warn!("Failed to send mock miner message: {:?}", &e); + warn!("Failed to send mock block to stackerdb: {:?}", &e); } Ok(()) } @@ -3744,8 +3794,8 @@ impl RelayerThread { .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { - if let Err(e) = miner_thread_state.send_mock_miner_message() { - warn!("Failed to send mock miner message: {}", e); + if let Err(e) = miner_thread_state.send_mock_miner_messages() { + warn!("Failed to send mock miner messages: {}", e); } miner_thread_state.run_tenure() }) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 631d92c83cd..79bed1739f8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2216,205 +2216,27 @@ fn mock_sign_epoch_25() { .clone() .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_start_height = epoch_3.start_height; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary signer_test.boot_to_epoch_25_reward_cycle(); info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); // Mine until epoch 3.0 and ensure that no more mock signatures are received - let mut reward_cycle = signer_test.get_current_reward_cycle(); - let mut stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - let mut signer_slot_ids: Vec<_> = signer_test + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test .get_signer_indices(reward_cycle) .iter() .map(|id| id.0) .collect(); + let signer_keys = signer_test.get_signer_public_keys(reward_cycle); + let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); assert_eq!(signer_slot_ids.len(), num_signers); - // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition - let main_poll_time = Instant::now(); - while signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_start_height - { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - if current_burn_block_height - % signer_test - .running_nodes - .conf - .get_burnchain() - .pox_constants - .reward_cycle_length as u64 - == 0 - { - reward_cycle += 1; - debug!("Rolling over reward cycle to {:?}", reward_cycle); - stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - signer_slot_ids = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); - } - let mut mock_signatures = vec![]; - let mock_poll_time = Instant::now(); - debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); - while mock_signatures.len() != num_signers { - std::thread::sleep(Duration::from_millis(100)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { - if let SignerMessage::MockSignature(mock_signature) = message { - if mock_signature.sign_data.event_burn_block_height == current_burn_block_height - { - if !mock_signatures.contains(&mock_signature) { - mock_signatures.push(mock_signature); - } - } - } - } - assert!( - mock_poll_time.elapsed() <= Duration::from_secs(15), - "Failed to find mock signatures within timeout" - ); - } - assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0" - ); - } - info!("------------------------- Test Processing Epoch 3.0 Tenure -------------------------"); - let old_messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - let old_signatures = old_messages - .iter() - .filter_map(|message| { - if let SignerMessage::MockSignature(mock_signature) = message { - Some(mock_signature) - } else { - None - } - }) - .collect::>(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - // Wait a bit to ensure no new mock signatures show up - std::thread::sleep(Duration::from_secs(5)); - let new_messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - let new_signatures = new_messages - .iter() - .filter_map(|message| { - if let SignerMessage::MockSignature(mock_signature) = message { - Some(mock_signature) - } else { - None - } - }) - .collect::>(); - assert_eq!(old_signatures, new_signatures); -} - -#[test] -#[ignore] -/// This test checks that Epoch 2.5 miners will issue a MockMinerMessage per burn block they receive -/// including the mock signature from the signers. -fn mock_miner_message_epoch_25() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), - |_| {}, - |node_config| { - let epochs = node_config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } - }, - &[], - ); - - let epochs = signer_test - .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_boundary = epoch_3.start_height - 1; - - signer_test.boot_to_epoch_25_reward_cycle(); - - info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition let main_poll_time = Instant::now(); - let mut mock_miner_message = None; // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. while signer_test .running_nodes @@ -2422,6 +2244,7 @@ fn mock_miner_message_epoch_25() { .get_headers_height() < epoch_3_boundary { + let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -2434,8 +2257,7 @@ fn mock_miner_message_epoch_25() { .btc_regtest_controller .get_headers_height(); debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); - - while mock_miner_message.is_none() { + while mock_block_mesage.is_none() { std::thread::sleep(Duration::from_millis(100)); let chunks = test_observer::get_stackerdb_chunks(); for chunk in chunks @@ -2451,14 +2273,29 @@ fn mock_miner_message_epoch_25() { if chunk.data.is_empty() { continue; } - let SignerMessage::MockMinerMessage(message) = + let SignerMessage::MockBlock(mock_block) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage") else { continue; }; - if message.peer_info.burn_block_height == current_burn_block_height { - mock_miner_message = Some(message); + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + assert_eq!(mock_block.mock_signatures.len(), num_signers); + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); break; } } @@ -2467,10 +2304,9 @@ fn mock_miner_message_epoch_25() { "Failed to find mock miner message within timeout" ); } - mock_miner_message = None; assert!( main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0" + "Timed out waiting to advance epoch 3.0 boundary" ); } } From f12961e7b704705bcca56bad154f2326afa101bb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:36:40 -0400 Subject: [PATCH 255/910] Rename pre_nakamoto_miner_messaging to pre_nakamoto_mock_signing Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 10 +++++----- testnet/stacks-node/src/neon_node.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4528e072221..c6c0abfd254 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2332,8 +2332,8 @@ pub struct MinerConfig { pub max_reorg_depth: u64, /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block pub wait_on_signers: Duration, - /// Whether to send miner messages in Epoch 2.5 through the .miners contract. This is used for testing. - pub pre_nakamoto_miner_messaging: bool, + /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. + pub pre_nakamoto_mock_signing: bool, } impl Default for MinerConfig { @@ -2364,7 +2364,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), - pre_nakamoto_miner_messaging: true, + pre_nakamoto_mock_signing: true, } } } @@ -2696,7 +2696,7 @@ pub struct MinerConfigFile { pub filter_origins: Option, pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, - pub pre_nakamoto_miner_messaging: Option, + pub pre_nakamoto_mock_signing: Option, } impl MinerConfigFile { @@ -2799,7 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true), + pre_nakamoto_mock_signing: self.pre_nakamoto_mock_signing.unwrap_or(true), }) } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fc5e0d80553..238a677e4da 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2362,8 +2362,8 @@ impl BlockMinerThread { /// Read any mock signatures from stackerdb and respond to them pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto mock miner messaging is disabled"); + if !miner_config.pre_nakamoto_mock_signing { + debug!("Pre-Nakamoto mock signing is disabled"); return Ok(()); } From 82e390d19ba3d782d3a2a21d0bd85933afc604ad Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:41:07 -0400 Subject: [PATCH 256/910] Add a bit more logging Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 238a677e4da..5f0720d1a45 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2418,7 +2418,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockProposal(mock_proposal.clone()), - MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages. We use BlockProposal for MockProposal as well. + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages so we use BlockProposal for MockProposal as well. self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, @@ -2428,6 +2428,7 @@ impl BlockMinerThread { } // Retrieve any MockSignatures from stackerdb + info!("Waiting for mock signatures..."); let mock_signatures = self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; @@ -2443,7 +2444,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockBlock(mock_block.clone()), - MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages + MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages. Let's use BlockPushed for MockBlock since MockProposal uses BlockProposal. self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, From 4ada7e2c8dc75b4fb287acf22d652df6c70ebbc1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 15 Aug 2024 16:57:15 -0400 Subject: [PATCH 257/910] fix: shorten miner thread ID string, and add a bump-blocks-processed call when we process a sortition (just as we have in neon node) --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 148c80d030e..435305472a9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -453,6 +453,7 @@ impl RelayerThread { increment_stx_blocks_mined_counter(); } self.globals.set_last_sortition(sn.clone()); + self.globals.counters.bump_blocks_processed(); // there may be a bufferred stacks block to process, so wake up the coordinator to check self.globals.coord_comms.announce_new_stacks_block(); @@ -812,10 +813,7 @@ impl RelayerThread { )?; let new_miner_handle = std::thread::Builder::new() - .name(format!( - "miner.{parent_tenure_start} (bound ({},{})", - &self.config.node.p2p_bind, &self.config.node.rpc_bind - )) + .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) .map_err(|e| { From 572c5476e3e2a0b529eef052414180084de59348 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 15 Aug 2024 16:58:06 -0400 Subject: [PATCH 258/910] fix: remove sleeps in favor of wait_for() --- .../src/tests/nakamoto_integrations.rs | 121 +++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 15 ++- 2 files changed, 112 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index df8c5127564..c370ca53f63 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3914,8 +3914,6 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - sleep_ms(1000); - info!("Tenure B broadcasted but did not process a block. Issue the next bitcon block and unstall block commits."); // the block will be stored, not processed, so load it out of staging @@ -3947,18 +3945,25 @@ fn forked_tenure_is_ignored() { // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { test_skip_commit_op.0.lock().unwrap().replace(false); TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); - // allow blocks B and C to be processed - sleep_ms(1000); - info!("Tenure C produced a block!"); let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() @@ -3976,6 +3981,10 @@ fn forked_tenure_is_ignored() { // Now let's produce a second block for tenure C and ensure it builds off of block C. let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); let start_time = Instant::now(); // submit a tx so that the miner will mine an extra block @@ -3993,8 +4002,14 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - // give C's second block a moment to process - sleep_ms(1000); + wait_for(10, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!("Tenure C produced a second block!"); @@ -4014,16 +4029,23 @@ fn forked_tenure_is_ignored() { // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); - // give tenure D's block a moment to process - sleep_ms(1000); - let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -5703,6 +5725,11 @@ fn continue_tenure_extend() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure @@ -5720,7 +5747,20 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); info!("Pausing commit ops to trigger a tenure extend."); test_skip_commit_op.0.lock().unwrap().replace(true); @@ -5733,7 +5773,15 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); // Submit a TX let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); @@ -5759,6 +5807,11 @@ fn continue_tenure_extend() { ) .unwrap(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5768,7 +5821,20 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5778,7 +5844,15 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -5792,11 +5866,7 @@ fn continue_tenure_extend() { .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(commits_count > commits_before && blocks_processed > blocks_processed_before) + Ok(commits_count > commits_before) }) .unwrap(); @@ -5807,6 +5877,15 @@ fn continue_tenure_extend() { &signers, ); + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + sleep_ms(5_000); } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5e366ba488b..c2ce878e28f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -540,11 +540,19 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); - signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); - // give the system a chance to mine a Nakamoto block - sleep_ms(30_000); + signer_test.boot_to_epoch_3(); + + // give the system a chance to reach the Nakamoto start tip + // mine a Nakamoto block + wait_for(30, || { + let blocks_mined = mined_blocks.load(Ordering::SeqCst); + Ok(blocks_mined > blocks_mined_before) + }) + .unwrap(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -943,6 +951,7 @@ fn forked_tenure_testing( // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, From b1b60d14d8c32fb5266a205320156481260b3fed Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 15 Aug 2024 14:07:24 -0700 Subject: [PATCH 259/910] fix: update scenario_five_test --- stackslib/src/chainstate/stacks/boot/mod.rs | 1 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 51 ++++++++++++++++--- 2 files changed, 44 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 0f45d7a6d02..88ecc8887e2 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1773,6 +1773,7 @@ pub mod test { let data = if let Some(d) = value_opt.expect_optional().unwrap() { d } else { + warn!("get_stacker_info: No PoX info for {}", addr); return None; }; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f7f0f211161..70a175b87b8 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -6793,6 +6793,7 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; let mut boot_plan = NakamotoBootPlan::new(test_name) .with_test_stackers(test_stackers) .with_test_signers(test_signers.clone()) @@ -6807,6 +6808,8 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants.clone(); + peer_config.burnchain = burnchain.clone(); + peer_config.test_signers = Some(test_signers.clone()); info!("---- Booting into Nakamoto Peer ----"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); @@ -8873,6 +8876,7 @@ pub fn prepare_pox4_test<'a>( pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; let mut boot_plan = NakamotoBootPlan::new(test_name) .with_test_stackers(test_stackers) .with_test_signers(test_signers.clone()) @@ -9427,6 +9431,17 @@ fn test_scenario_five(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + david.private_key.clone(), + eve.private_key.clone(), + ]); + } + // Lock periods for each stacker let carl_lock_period = 3; let frank_lock_period = 1; @@ -9684,6 +9699,12 @@ fn test_scenario_five(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); + info!( + "Scenario five: submitting stacking txs."; + "target_height" => target_height, + "next_reward_cycle" => next_reward_cycle, + "prepare_length" => peer_config.burnchain.pox_constants.prepare_length, + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -9765,12 +9786,15 @@ fn test_scenario_five(use_nakamoto: bool) { alice.nonce += 1; bob.nonce += 1; carl.nonce += 1; - // Mine vote txs & advance to the reward set calculation of the next reward cycle let target_height = peer .config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -9878,7 +9902,11 @@ fn test_scenario_five(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + info!( + "Scenario five: submitting extend and aggregate commit txs. Target height: {}", + target_height + ); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -9888,17 +9916,19 @@ fn test_scenario_five(use_nakamoto: bool) { ); // Check that all of David's stackers are stacked - for (stacker, stacker_lock_period) in davids_stackers { + for (idx, (stacker, stacker_lock_period)) in davids_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, david.pox_address); assert_eq!(lock_period, *stacker_lock_period); } // Check that all of Eve's stackers are stacked - for (stacker, stacker_lock_period) in eves_stackers { + for (idx, (stacker, stacker_lock_period)) in eves_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, eve.pox_address); assert_eq!(lock_period, *stacker_lock_period); @@ -9970,6 +10000,10 @@ fn test_scenario_five(use_nakamoto: bool) { .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -10085,7 +10119,8 @@ fn test_scenario_five(use_nakamoto: bool) { (heidi.clone(), heidi_lock_period), ]; - let (latest_block, tx_block, _receipts) = advance_to_block_height( + info!("Scenario five: submitting increase and aggregate-commit txs"); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -10116,6 +10151,6 @@ fn test_scenario_five(use_nakamoto: bool) { assert_eq!(pox_address, carl.pox_address); // Assert that carl's error is err(40) - let carl_increase_err = tx_block.receipts[1].clone().result; + let carl_increase_err = receipts[1].clone().result; assert_eq!(carl_increase_err, Value::error(Value::Int(40)).unwrap()); } From fe3d7dc2b7e866621ef81aa49eecc1bcb1307bc3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 15 Aug 2024 17:12:16 -0400 Subject: [PATCH 260/910] chore: add warn logs for block validate rejections --- stackslib/src/net/api/postblock_proposal.rs | 24 +++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5d..853cf8fc620 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -215,6 +215,14 @@ impl NakamotoBlockProposal { let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { + warn!( + "Rejected block proposal"; + "reason" => "Wrong network/chain_id", + "expected_chain_id" => chainstate.chain_id, + "expected_mainnet" => chainstate.mainnet, + "received_chain_id" => self.chain_id, + "received_mainnet" => mainnet, + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Wrong network/chain_id".into(), @@ -227,6 +235,10 @@ impl NakamotoBlockProposal { let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; if expected_burn_opt.is_none() { + warn!( + "Rejected block proposal"; + "reason" => "Failed to find parent expected burns", + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::UnknownParent, reason: "Failed to find parent expected burns".into(), @@ -259,6 +271,12 @@ impl NakamotoBlockProposal { &parent_stacks_header.anchored_header { if self.block.header.timestamp <= parent_nakamoto_header.timestamp { + warn!( + "Rejected block proposal"; + "reason" => "Block timestamp is not greater than parent block", + "block_timestamp" => self.block.header.timestamp, + "parent_block_timestamp" => parent_nakamoto_header.timestamp, + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Block timestamp is not greater than parent block".into(), @@ -266,6 +284,12 @@ impl NakamotoBlockProposal { } } if self.block.header.timestamp > get_epoch_time_secs() + 15 { + warn!( + "Rejected block proposal"; + "reason" => "Block timestamp is too far into the future", + "block_timestamp" => self.block.header.timestamp, + "current_time" => get_epoch_time_secs(), + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Block timestamp is too far into the future".into(), From f18a6b9d1861327b71329c08775de9c7e6448148 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 15 Aug 2024 15:26:53 -0700 Subject: [PATCH 261/910] fix: remove unneeded comments --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 58 ++----------------- 1 file changed, 5 insertions(+), 53 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 70a175b87b8..0968cc4de3f 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -3300,12 +3300,12 @@ fn verify_signer_key_signatures() { assert_eq!(result, Value::okay_true()); } -#[test] -fn stack_stx_verify_signer_sig() { +#[apply(nakamoto_cases)] +fn stack_stx_verify_signer_sig(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -3564,7 +3564,7 @@ fn stack_stx_verify_signer_sig() { valid_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); let expected_error = Value::error(Value::Int(35)).unwrap(); @@ -4260,7 +4260,6 @@ fn advance_to_block_height( peer.get_burn_block_height(), passed_txs.len() ); - // latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { @@ -4274,7 +4273,6 @@ fn advance_to_block_height( } else { tx_block.receipts.clone() }; - // let tx_block_receipts = tx_block.receipts[2..].to_vec(); (latest_block, tx_block, tx_block_receipts) } @@ -4481,7 +4479,6 @@ fn stack_agg_increase() { &mut peer_nonce, target_height.into(), &mut None, - // Some(&mut test_signers), ); // Get Bob's aggregate commit reward index @@ -4628,7 +4625,6 @@ fn stack_agg_increase() { &txs, &mut peer_nonce, target_height.into(), - // &mut test_signers, &mut None, ); @@ -5200,8 +5196,6 @@ fn stack_stx_signer_key(use_nakamoto: bool) { mut test_signers, ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); - info!("--- starting stack-stx test ---"); - let stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); @@ -5210,7 +5204,6 @@ fn stack_stx_signer_key(use_nakamoto: bool) { let signer_key_val = Value::buff_from(signer_public_key.to_bytes_compressed()).unwrap(); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); - info!("Reward cycle: {reward_cycle}"); // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -5245,9 +5238,7 @@ fn stack_stx_signer_key(use_nakamoto: bool) { ], )]; - // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); - // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -6800,11 +6791,6 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( .with_private_key(private_key); boot_plan.add_default_balance = false; - // let balances: Vec<(PrincipalData, u64)> = addrs - // .clone() - // .into_iter() - // .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) - // .collect(); boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants.clone(); @@ -6822,23 +6808,11 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( let coinbase_nonce = 0; let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; - // let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); let reward_cycle = burnchain .block_height_to_reward_cycle(burn_block_height) .unwrap() as u128; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - info!("Block height: {}", burn_block_height); - - // ( - // burnchain, - // peer, - // keys, - // latest_block, - // block_height, - // coinbase_nonce, - // Some(test_signers), - // ) ( peer, coinbase_nonce, @@ -7039,24 +7013,6 @@ fn test_scenario_one(use_nakamoto: bool) { assert_eq!(first_reward_cycle, next_reward_cycle); assert_eq!(pox_address, bob.pox_address); - info!("Got {} receipts", receipts.clone().len()); - - for receipt in receipts.clone() { - info!("Receipt: {:?}", receipt); - } - - let signer_keys_len = test_signers - .clone() - .map(|t| t.signer_keys.len()) - .unwrap_or(0); - // let signer_keys_len = if let Some(ref test_signers) = test_signers { - // test_signers.signer_keys.len() - // } else { - // 0 - // }; - - info!("Test signers now has {} keys", signer_keys_len); - // 1. Check bob's low authorization transaction let bob_tx_result_low = receipts .get(1) @@ -8981,15 +8937,11 @@ pub fn tenure_with_txs( let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - // let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) - // .unwrap() - // .unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() .unwrap() .unwrap(); - // let tip = StacksBlockId:: latest_block } else { peer.tenure_with_txs(txs, coinbase_nonce) From 3e4b875ca8ac7f920810e4ed5de876efd66ac00b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 15 Aug 2024 19:59:58 -0400 Subject: [PATCH 262/910] docs: improve docs on new tests --- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3cf7a148046..ce95049744a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2645,7 +2645,9 @@ fn signer_set_rollover() { #[test] #[ignore] -// This test involves two miners, each mining tenures with 6 blocks each. +/// This test involves two miners, each mining tenures with 6 blocks each. Half +/// of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. fn multiple_miners_with_nakamoto_blocks() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -2892,11 +2894,11 @@ fn multiple_miners_with_nakamoto_blocks() { #[test] #[ignore] -// This test involves two miners, 1 and 2. During miner 1's first tenure, miner -// 2 is forced to ignore one of the blocks in that tenure. The next time miner -// 2 mines a block, it should attempt to fork the chain at that point. The test -// verifies that the fork is not successful and that miner 1 is able to -// continue mining after this fork attempt. +/// This test involves two miners, 1 and 2. During miner 1's first tenure, miner +/// 2 is forced to ignore one of the blocks in that tenure. The next time miner +/// 2 mines a block, it should attempt to fork the chain at that point. The test +/// verifies that the fork is not successful and that miner 1 is able to +/// continue mining after this fork attempt. fn partial_tenure_fork() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From b4987f7ed7b2324db70196ed1e495cdf03c16f24 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 08:57:40 -0400 Subject: [PATCH 263/910] CRC: improve logging Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 64622646e3c..9245220e941 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -147,14 +147,14 @@ impl SignerTrait for Signer { return; } }; - debug!("{self}: received a mock block proposal."; + info!("{self}: received a mock block proposal."; "current_reward_cycle" => current_reward_cycle, "epoch" => ?epoch ); if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. + // We are in epoch 2.5, so we should mock sign to prove we are still alive. self.mock_sign(mock_proposal.clone()); } } From aae44aba19a9570a264bc07e94196b91efd11197 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 16 Aug 2024 10:28:29 -0400 Subject: [PATCH 264/910] chore: Address Brice's PR comments --- testnet/stacks-node/src/tests/neon_integrations.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 64b1ca70dac..03c8eb2df86 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12408,10 +12408,9 @@ fn next_block_and_wait_all( let finished = follower_blocks_processed .iter() .zip(followers_current.iter()) - .map(|(blocks_processed, start_count)| { + .all(|(blocks_processed, start_count)| { blocks_processed.load(Ordering::SeqCst) > *start_count - }) - .all(|b| b); + }); if finished { break; @@ -12702,7 +12701,7 @@ fn mock_miner_replay() { thread::sleep(block_gap); - // first block will hold our VRF registration + // second block will hold our VRF registration next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, From 314b4b88b4062ce5d01d0fa470f6f34f7e18747b Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 19:11:53 +0300 Subject: [PATCH 265/910] action membership run --- .github/workflows/pr-differences-mutants.yml | 50 +- stackslib/src/net/download/nakamoto/mod.rs | 6 + .../nakamoto/tenure_downloader_copy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_opy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_set_copy.rs | 660 +++++++++++++ .../nakamoto/tenure_downloader_set_opy.rs | 660 +++++++++++++ .../tenure_downloader_unconfirmed_copy.rs | 867 ++++++++++++++++++ .../tenure_downloader_unconfirmed_opy.rs | 867 ++++++++++++++++++ 8 files changed, 4487 insertions(+), 9 deletions(-) create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a7256873..9c5cc34c7b5 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,16 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: + inputs: + ignore_timeout: + description: "Ignore mutants timeout limit" + required: false + type: choice + options: + - true + # - false + default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -16,9 +26,27 @@ concurrency: cancel-in-progress: true jobs: + check-right-permissions: + name: Check Right Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Right Permissions To Trigger This + id: check_right_permissions + uses: stacks-network/actions/team-membership@feat/mutation-testing + with: + username: ${{ github.actor }} + team: 'Blockchain Team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + - name: Fail if the user does not have the right permissions + if: ${{ inputs.ignore_timeout == true && steps.check_right_permissions.outputs.is_team_member != 'true' }} + run: exit 1 + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-right-permissions runs-on: ubuntu-latest @@ -30,10 +58,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards - uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing + with: + ignore_timeout: ${{ inputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -49,7 +80,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'small' @@ -72,7 +103,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'small' @@ -94,7 +125,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stackslib' @@ -120,7 +151,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stackslib' @@ -142,7 +173,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-node' @@ -168,7 +199,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stacks-node' @@ -186,7 +217,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-signer' @@ -211,7 +242,7 @@ jobs: steps: - name: Output Mutants - uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main + uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing with: stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }} shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }} @@ -220,3 +251,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110f..7643c54ff7d 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,8 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; +mod tenure_downloader_copy; +mod tenure_downloader_opy; mod tenure_downloader_set; +mod tenure_downloader_set_copy; +mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; +mod tenure_downloader_unconfirmed_copy; +mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs new file mode 100644 index 00000000000..f7fb970bb6f --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs new file mode 100644 index 00000000000..f7fb970bb6f --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs new file mode 100644 index 00000000000..28a40e7eb50 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs new file mode 100644 index 00000000000..28a40e7eb50 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs new file mode 100644 index 00000000000..c96f718d2b9 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs new file mode 100644 index 00000000000..c96f718d2b9 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} From d7385b1c1c61ac100bcc60427191fa65fdc22587 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 19:17:59 +0300 Subject: [PATCH 266/910] add workfow dispatch mutants --- .github/workflows/pr-differences-mutants.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a7256873..85b2a49ea66 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,16 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: + inputs: + ignore_timeout: + description: "Ignore mutants timeout limit" + required: false + type: choice + options: + - true + - false + default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} From 8107bf97dbdc26fb822ba57175ac373df600dd64 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 19:50:52 +0300 Subject: [PATCH 267/910] update team --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 9c5cc34c7b5..51456c91721 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@feat/mutation-testing with: username: ${{ github.actor }} - team: 'Blockchain Team' + team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From febf3cef5dd8acf95b3f692c1d62ec36bad2a75c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:47:43 +0300 Subject: [PATCH 268/910] update to actions' repo main branch --- .github/workflows/pr-differences-mutants.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 51456c91721..12d891297b6 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Check Right Permissions To Trigger This id: check_right_permissions - uses: stacks-network/actions/team-membership@feat/mutation-testing + uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} team: 'blockchain-team' @@ -62,7 +62,7 @@ jobs: steps: - id: check_packages_and_shards - uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: ignore_timeout: ${{ inputs.ignore_timeout }} @@ -80,7 +80,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'small' @@ -103,7 +103,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'small' @@ -125,7 +125,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stackslib' @@ -151,7 +151,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'stackslib' @@ -173,7 +173,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stacks-node' @@ -199,7 +199,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'stacks-node' @@ -217,7 +217,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stacks-signer' @@ -242,7 +242,7 @@ jobs: steps: - name: Output Mutants - uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main with: stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }} shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }} From c3183204830c061da1681e48a287ad3f391cb644 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:49:16 +0300 Subject: [PATCH 269/910] test with team i am not part of --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 12d891297b6..920f0f0701e 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} - team: 'blockchain-team' + team: 'devops' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From abaf606585a65a8fb5ee91418254a30fddbc8168 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:51:27 +0300 Subject: [PATCH 270/910] fix true type on equal --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 920f0f0701e..3c560cdbbc6 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -40,7 +40,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions - if: ${{ inputs.ignore_timeout == true && steps.check_right_permissions.outputs.is_team_member != 'true' }} + if: ${{ inputs.ignore_timeout == 'true' && steps.check_right_permissions.outputs.is_team_member != 'true' }} run: exit 1 # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards From bca227e7c0ef2b88ffbee124de0932f51ebe313c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:53:57 +0300 Subject: [PATCH 271/910] update the mutants team to blockchain-team --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 3c560cdbbc6..edafb42bf1f 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} - team: 'devops' + team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From a52464e88c19f89bc2f79301aa7261e2a04d11e4 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:56:23 +0300 Subject: [PATCH 272/910] removed extra files for testing the mutants membership dispatch --- stackslib/src/net/download/nakamoto/mod.rs | 6 - .../nakamoto/tenure_downloader_copy.rs | 693 -------------- .../nakamoto/tenure_downloader_opy.rs | 693 -------------- .../nakamoto/tenure_downloader_set_copy.rs | 660 ------------- .../nakamoto/tenure_downloader_set_opy.rs | 660 ------------- .../tenure_downloader_unconfirmed_copy.rs | 867 ------------------ .../tenure_downloader_unconfirmed_opy.rs | 867 ------------------ 7 files changed, 4446 deletions(-) delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 7643c54ff7d..dd440ac110f 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,8 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy; -mod tenure_downloader_opy; mod tenure_downloader_set; -mod tenure_downloader_set_copy; -mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy; -mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs deleted file mode 100644 index f7fb970bb6f..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs deleted file mode 100644 index f7fb970bb6f..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs deleted file mode 100644 index 28a40e7eb50..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs deleted file mode 100644 index 28a40e7eb50..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs deleted file mode 100644 index c96f718d2b9..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs deleted file mode 100644 index c96f718d2b9..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} From 62f5a6a7813ab94999063ba4aecdcc9612fe5cea Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 16:42:36 -0400 Subject: [PATCH 273/910] Do not enable pre nakamoto mock signing unless the miner key is set Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 20b5d07355a..30a59903197 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1168,6 +1168,10 @@ impl Config { .validate() .map_err(|e| format!("Atlas config error: {e}"))?; + if miner.mining_key.is_none() && miner.pre_nakamoto_mock_signing { + return Err("Cannot use pre_nakamoto_mock_signing without a mining_key".to_string()); + } + Ok(Config { config_path: config_file.__path, node, @@ -2384,7 +2388,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), - pre_nakamoto_mock_signing: true, + pre_nakamoto_mock_signing: false, // Should only default true if mining key is set } } } @@ -2739,6 +2743,12 @@ pub struct MinerConfigFile { impl MinerConfigFile { fn into_config_default(self, miner_default_config: MinerConfig) -> Result { + let mining_key = self + .mining_key + .as_ref() + .map(|x| Secp256k1PrivateKey::from_hex(x)) + .transpose()?; + let pre_nakamoto_mock_signing = mining_key.is_some(); Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms @@ -2837,7 +2847,9 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_mock_signing: self.pre_nakamoto_mock_signing.unwrap_or(true), + pre_nakamoto_mock_signing: self + .pre_nakamoto_mock_signing + .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set }) } } From c8d8743cd01d6cbfed1e1e9444ea2adf6621344e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 16:43:02 -0400 Subject: [PATCH 274/910] Remove panic in tests when deserializing the block proposal slot due to mock signing using it for mock proposals Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c370ca53f63..24b77454198 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -391,7 +391,8 @@ pub fn get_latest_block_proposal( let message: SignerMessageV0 = miners_stackerdb.get_latest(miner_slot_id.start).ok()??; let SignerMessageV0::BlockProposal(block_proposal) = message else { - panic!("Expected a signer message block proposal. Got {message:?}"); + warn!("Expected a block proposal. Got {message:?}"); + return None; }; block_proposal.block }; From 8a160dc5d5cfc0ead5da5a9f3c50e97562f7f0ac Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 16 Aug 2024 16:47:11 -0400 Subject: [PATCH 275/910] test: add assertion about node 2's chainstate --- testnet/stacks-node/src/tests/signer/v0.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ce95049744a..e03b0d392b4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3139,5 +3139,24 @@ fn partial_tenure_fork() { u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); + let sortdb = SortitionDB::open( + &conf_node_2.get_burn_db_file_path(), + false, + conf_node_2.get_burnchain().pox_constants, + ) + .unwrap(); + + let (chainstate, _) = StacksChainState::open( + false, + conf_node_2.burnchain.chain_id, + &conf_node_2.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(tip.stacks_block_height, ignore_block - 1); + signer_test.shutdown(); } From 1610ce3698c0ab4a5fe12c75a3b72ec05674c8f9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 17:24:44 -0400 Subject: [PATCH 276/910] Set pre nakamoto mock signing to true for mock sign test Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 19 +++++++++---------- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index b767431c60b..5f7b82a937e 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -964,22 +964,21 @@ mod test { let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let mut mock_signature = random_mock_proposal(); - mock_signature.sign(&private_key).unwrap(); - assert!(!mock_signature + let mut mock_proposal = random_mock_proposal(); + assert!(!mock_proposal .verify(&public_key) - .expect("Failed to verify MockSignature")); + .expect("Failed to verify MockProposal")); - mock_signature + mock_proposal .sign(&private_key) - .expect("Failed to sign MockSignature"); + .expect("Failed to sign MockProposal"); - assert!(mock_signature + assert!(mock_proposal .verify(&public_key) - .expect("Failed to verify MockSignature")); - assert!(!mock_signature + .expect("Failed to verify MockProposal")); + assert!(!mock_proposal .verify(&bad_public_key) - .expect("Failed to verify MockSignature")); + .expect("Failed to verify MockProposal")); } #[test] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1d06c429e9a..115fce4c833 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2440,6 +2440,7 @@ fn mock_sign_epoch_25() { Some(Duration::from_secs(5)), |_| {}, |node_config| { + node_config.miner.pre_nakamoto_mock_signing = true; let epochs = node_config.burnchain.epochs.as_mut().unwrap(); for epoch in epochs.iter_mut() { if epoch.epoch_id == StacksEpochId::Epoch25 { From 32e2875558ba6f90d5cf43e719f5a0b3b36aa52c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 17 Aug 2024 10:59:30 -0400 Subject: [PATCH 277/910] Fix block proposal rejection by enforcing fetch view to be up to date Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 55 +++++++---- testnet/stacks-node/src/tests/signer/v0.rs | 101 ++++++++++---------- testnet/stacks-node/src/tests/signer/v1.rs | 12 ++- 3 files changed, 96 insertions(+), 72 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 91371578cb9..57613728038 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -43,7 +43,9 @@ use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; -use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::net::api::postblock_proposal::{ + BlockValidateOk, BlockValidateReject, BlockValidateResponse, +}; use stacks::types::chainstate::StacksAddress; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; @@ -99,6 +101,7 @@ pub struct SignerTest { // The spawned signers and their threads pub spawned_signers: Vec, // The spawned signers and their threads + #[allow(dead_code)] pub signer_configs: Vec, // the private keys of the signers pub signer_stacks_private_keys: Vec, @@ -481,35 +484,47 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockValidateResponse { + fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> BlockValidateOk { // Wait for the block to show up in the test observer let t_start = Instant::now(); - while test_observer::get_proposal_responses().is_empty() { + loop { + let responses = test_observer::get_proposal_responses(); + for response in responses { + let BlockValidateResponse::Ok(validation) = response else { + continue; + }; + return validation; + } assert!( t_start.elapsed() < timeout, - "Timed out while waiting for block proposal response event" + "Timed out while waiting for block proposal ok event" ); thread::sleep(Duration::from_secs(1)); } - test_observer::get_proposal_responses() - .pop() - .expect("No block proposal") - } - - fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { - let validate_response = self.wait_for_block_validate_response(timeout); - match validate_response { - BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, - _ => panic!("Unexpected response"), - } } - fn wait_for_validate_reject_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + fn wait_for_validate_reject_response( + &mut self, + timeout: Duration, + signer_signature_hash: Sha512Trunc256Sum, + ) -> BlockValidateReject { // Wait for the block to show up in the test observer - let validate_response = self.wait_for_block_validate_response(timeout); - match validate_response { - BlockValidateResponse::Reject(block_rejection) => block_rejection.signer_signature_hash, - _ => panic!("Unexpected response"), + let t_start = Instant::now(); + loop { + let responses = test_observer::get_proposal_responses(); + for response in responses { + let BlockValidateResponse::Reject(rejection) = response else { + continue; + }; + if rejection.signer_signature_hash == signer_signature_hash { + return rejection; + } + } + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for block proposal reject event" + ); + thread::sleep(Duration::from_secs(1)); } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c2ce878e28f..a4c4b935ac3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -36,7 +36,7 @@ use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, S use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -51,7 +51,6 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -64,9 +63,8 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -298,7 +296,9 @@ impl SignerTest { self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = self.wait_for_validate_ok_response(timeout); + let proposed_signer_signature_hash = self + .wait_for_validate_ok_response(timeout) + .signer_signature_hash; let message = proposed_signer_signature_hash.0; info!("------------------------- Test Block Signed -------------------------"); @@ -368,7 +368,7 @@ impl SignerTest { } /// Propose an invalid block to the signers - fn propose_block(&mut self, slot_id: u32, version: u32, block: NakamotoBlock) { + fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new(&self.running_nodes.conf.node.rpc_bind, miners_contract_id); @@ -388,17 +388,26 @@ impl SignerTest { .miner .mining_key .expect("No mining key"); - // Submit the block proposal to the miner's slot - let mut chunk = StackerDBChunkData::new(slot_id, version, message.serialize_to_vec()); - chunk.sign(&miner_sk).expect("Failed to sign message chunk"); - debug!("Produced a signature: {:?}", chunk.sig); - let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - debug!("Test Put Chunk ACK: {result:?}"); - assert!( - result.accepted, - "Failed to submit block proposal to signers" - ); + let mut accepted = false; + let mut version = 0; + let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; + let start = Instant::now(); + debug!("Proposing invalid block to signers"); + while !accepted { + let mut chunk = + StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + accepted = result.accepted; + version += 1; + debug!("Test Put Chunk ACK: {result:?}"); + assert!( + start.elapsed() < timeout, + "Timed out waiting for block proposal to be accepted" + ); + } } } @@ -434,12 +443,10 @@ fn block_proposal_rejection() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], @@ -448,48 +455,44 @@ fn block_proposal_rejection() { // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. let block_signer_signature_hash_1 = block.header.signer_signature_hash(); - signer_test.propose_block(0, 1, block.clone()); + signer_test.propose_block(block.clone(), short_timeout); + + // Wait for the first block to be mined successfully so we have the most up to date sortition view + signer_test.wait_for_validate_ok_response(short_timeout); // Propose a block to the signers that passes initial checks but will be rejected by the stacks node + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); block.header.pox_treatment = BitVec::ones(1).unwrap(); block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = 35; // We have mined 35 blocks so far. let block_signer_signature_hash_2 = block.header.signer_signature_hash(); - signer_test.propose_block(0, 2, block); + signer_test.propose_block(block, short_timeout); info!("------------------------- Test Block Proposal Rejected -------------------------"); // Verify the signers rejected the second block via the endpoint - let rejected_block_hash = signer_test.wait_for_validate_reject_response(short_timeout); - assert_eq!(rejected_block_hash, block_signer_signature_hash_2); - - let mut stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); + let reject = + signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); + assert!(matches!( + reject.reason_code, + ValidateRejectCode::UnknownParent + )); let start_polling = Instant::now(); let mut found_signer_signature_hash_1 = false; let mut found_signer_signature_hash_2 = false; while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { std::thread::sleep(Duration::from_secs(1)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .map(|chunk| chunk.modified_slots) + .flatten() + { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason: _reason, reason_code, @@ -503,10 +506,10 @@ fn block_proposal_rejection() { found_signer_signature_hash_2 = true; assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); } else { - panic!("Unexpected signer signature hash"); + continue; } } else { - panic!("Unexpected message type"); + continue; } } assert!( diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 44bbc572282..6e9ed71f365 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -893,7 +893,9 @@ fn block_proposal() { info!("------------------------- Test Block Proposal -------------------------"); // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block @@ -1115,7 +1117,9 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block -------------------------"); signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; let signature = signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); @@ -1136,7 +1140,9 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); let last_block = signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; let frost_signature = signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); From eef4b0e81bb50cfb4b389ab1bdc399983c4d20f9 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Sun, 18 Aug 2024 18:30:56 +0300 Subject: [PATCH 278/910] keep workflow dispatch only true as it runs as false from PRs --- .github/workflows/pr-differences-mutants.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index edafb42bf1f..c109b69cfec 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -17,7 +17,6 @@ on: type: choice options: - true - # - false default: 'true' concurrency: From 42bfde05a347e9646911c722cadd5ab83f50899e Mon Sep 17 00:00:00 2001 From: shangchengbabaiban Date: Mon, 19 Aug 2024 00:47:07 +0800 Subject: [PATCH 279/910] chore: fix some comments Signed-off-by: shangchengbabaiban --- clarity/src/vm/ast/definition_sorter/mod.rs | 2 +- contrib/boot-contracts-unit-tests/tests/misc.test.ts | 2 +- stacks-signer/src/client/stacks_client.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index eee66253104..a5a551298cf 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -173,7 +173,7 @@ impl DefinitionSorter { return Ok(()); } DefineFunctions::Map => { - // Args: [name, key, value]: with key value being potentialy tuples + // Args: [name, key, value]: with key value being potentially tuples if function_args.len() == 3 { self.probe_for_dependencies( function_args[1], diff --git a/contrib/boot-contracts-unit-tests/tests/misc.test.ts b/contrib/boot-contracts-unit-tests/tests/misc.test.ts index d50f2ef6d38..09a6fe11743 100644 --- a/contrib/boot-contracts-unit-tests/tests/misc.test.ts +++ b/contrib/boot-contracts-unit-tests/tests/misc.test.ts @@ -575,7 +575,7 @@ describe("test `get-total-ustx-stacked`", () => { expect(response.result).toBeUint(amount * 3n); }); - it("expires stacking after the stacking duration has finsihed", () => { + it("expires stacking after the stacking duration has finished", () => { const amount = getStackingMinimum() * 2n; stackers.forEach((stacker, i) => { diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 0aeb30bb6e0..cd65f7914bd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -556,7 +556,7 @@ impl StacksClient { Ok(stackers_response.stacker_set.signers) } - /// Retreive the current pox data from the stacks node + /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); #[cfg(feature = "monitoring_prom")] From ee7929100c83070651e34790817aa49890779e9b Mon Sep 17 00:00:00 2001 From: Will Corcoran <94402722+will-corcoran@users.noreply.github.com> Date: Mon, 19 Aug 2024 11:57:06 -0400 Subject: [PATCH 280/910] Create signer_bug Adding an issue template for Signers to submit bugs and issues with the Signer --- .github/ISSUE_TEMPLATE/signer_bug | 49 +++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/signer_bug diff --git a/.github/ISSUE_TEMPLATE/signer_bug b/.github/ISSUE_TEMPLATE/signer_bug new file mode 100644 index 00000000000..9ca69928780 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/signer_bug @@ -0,0 +1,49 @@ + + +## Expected Behavior + + +## Current Behavior + + +## Steps to Reproduce + + +1. +2. +3. +4. + +## Context (Environment) + + +- +- +- +- +- +- + +## Expected Behavior + + +## Current Behavior + + +## Steps to Reproduce + + +1. +2. +3. +4. + +## Context (Environment) + + +- +- +- +- +- +- - - -- From f65476ef08fb6f4cec4afae6b02ac170e6b0bcc5 Mon Sep 17 00:00:00 2001 From: Will Corcoran <94402722+will-corcoran@users.noreply.github.com> Date: Mon, 19 Aug 2024 15:25:32 -0400 Subject: [PATCH 284/910] Rename signer_bug to signer_bug.md added file extension --- .github/ISSUE_TEMPLATE/{signer_bug => signer_bug.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{signer_bug => signer_bug.md} (100%) diff --git a/.github/ISSUE_TEMPLATE/signer_bug b/.github/ISSUE_TEMPLATE/signer_bug.md similarity index 100% rename from .github/ISSUE_TEMPLATE/signer_bug rename to .github/ISSUE_TEMPLATE/signer_bug.md From 79f3a2f2d54fd4c3a333a9bfde259a4413536643 Mon Sep 17 00:00:00 2001 From: Will Corcoran <94402722+will-corcoran@users.noreply.github.com> Date: Mon, 19 Aug 2024 15:48:30 -0400 Subject: [PATCH 285/910] Update .github/ISSUE_TEMPLATE/signer_bug.md LGTM Co-authored-by: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/signer_bug.md | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/signer_bug.md b/.github/ISSUE_TEMPLATE/signer_bug.md index 54a43573a8b..45f1695a0f9 100644 --- a/.github/ISSUE_TEMPLATE/signer_bug.md +++ b/.github/ISSUE_TEMPLATE/signer_bug.md @@ -23,27 +23,3 @@ - - - - -## Expected Behavior - - -## Current Behavior - - -## Steps to Reproduce - - -1. -2. -3. -4. - -## Context (Environment) - - -- -- -- -- -- -- From 147559707584fa77a6beb6a7f7e153da209d7b5b Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 19 Aug 2024 13:37:42 -0700 Subject: [PATCH 286/910] reformat signer issue template --- .github/ISSUE_TEMPLATE/signer_bug.md | 57 +++++++++++++++------------ .github/ISSUE_TEMPLATE/testnet-bug.md | 2 +- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/signer_bug.md b/.github/ISSUE_TEMPLATE/signer_bug.md index 45f1695a0f9..f7ef8e7e106 100644 --- a/.github/ISSUE_TEMPLATE/signer_bug.md +++ b/.github/ISSUE_TEMPLATE/signer_bug.md @@ -1,25 +1,32 @@ - - -## Expected Behavior - - -## Current Behavior - - -## Steps to Reproduce - - -1. -2. -3. -4. - -## Context (Environment) - - -- -- -- -- -- -- +--- +name: Signer Bug +about: Create a report to help us improve the signer +title: "[SIGNER BUG]" +labels: signer +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Steps To Reproduce** +Please provide detailed instructions (e.g. command line invocation with parameters) to reproduce the behavior. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment (please complete the following information):** + - OS: [e.g. Ubuntu / Debian] + - Rust version + - Version of the appropriate binary / software packages + - Signer public key + - Relevant log messages + - Tx ID of any transaction you were trying to execute + - Tx ID of `aggregation-commit-indexed` call in registered cycle (if applicable) + + +**Additional context** +Please include any relevant stack traces, error messages and logs. + + diff --git a/.github/ISSUE_TEMPLATE/testnet-bug.md b/.github/ISSUE_TEMPLATE/testnet-bug.md index 314e8ab237b..405deac2512 100644 --- a/.github/ISSUE_TEMPLATE/testnet-bug.md +++ b/.github/ISSUE_TEMPLATE/testnet-bug.md @@ -3,7 +3,7 @@ name: Testnet Bug about: Use this template to submit Stacks 2.0 testnet bugs title: "[TESTNET BUG]" labels: bug, testnet -assignees: 'timstackblock' +assignees: '' --- From 45eda2cbe42ad55e98cbeb06333e1d80ef16e817 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:42:49 -0400 Subject: [PATCH 287/910] feat: upgrade CONSENSUS to INFO --- stackslib/src/chainstate/burn/db/processing.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 70f170a60c9..82318bfe372 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -211,7 +211,7 @@ impl<'a> SortitionHandleTx<'a> { "SORTITION-HASH({}): {}", this_block_height, &snapshot.sortition_hash ); - debug!( + info!( "CONSENSUS({}): {}", this_block_height, &snapshot.consensus_hash ); From 33ffeb90f35a43362b1f8f93cbca2a78bd6fadec Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:05 -0400 Subject: [PATCH 288/910] chore: log advance to new tip _after_ the tx commits in order to get a better understanding of when it becomes readable by other threads --- stackslib/src/chainstate/nakamoto/mod.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 657315e993e..80aef0a74ad 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2036,6 +2036,12 @@ impl NakamotoChainState { panic!() }); + info!( + "Advanced to new tip! {}/{}", + &receipt.header.consensus_hash, + &receipt.header.anchored_header.block_hash() + ); + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2994,7 +3000,6 @@ impl NakamotoChainState { ); let parent_hash = new_tip.parent_block_id.clone(); - let new_block_hash = new_tip.block_hash(); let index_block_hash = new_tip.block_id(); let mut marf_keys = vec![]; @@ -3186,10 +3191,6 @@ impl NakamotoChainState { headers_tx.deref_mut().execute(sql, args)?; } - debug!( - "Advanced to new tip! {}/{}", - &new_tip.consensus_hash, new_block_hash, - ); Ok(new_tip_info) } From dea8b57fc7b03ad4ddd5807b7efb1118b92dcb86 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:35 -0400 Subject: [PATCH 289/910] chore: advance to new tip is now INFO --- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 356b117b8bb..ca8acd2dce1 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2675,7 +2675,7 @@ impl StacksChainState { headers_tx.deref_mut().execute(sql, args)?; } - debug!( + info!( "Advanced to new tip! {}/{}", new_consensus_hash, new_tip.block_hash() From 424d7bf2dc3850e7c3df311f8ab1a4c27855c346 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:50 -0400 Subject: [PATCH 290/910] feat: unconfirmed downloader interval and nakamoto inventory sync burst config --- stackslib/src/net/connection.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 36b1fc18ff0..3737f70c0f7 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -398,6 +398,12 @@ pub struct ConnectionOptions { /// maximum number of confirmations for a nakamoto block's sortition for which it will be /// pushed pub max_nakamoto_block_relay_age: u64, + /// minimum amount of time between requests to push nakamoto blocks (millis) + pub nakamoto_push_interval_ms: u128, + /// minimum amount of time between requests to push nakamoto blocks (millis) + pub nakamoto_inv_sync_burst_interval_ms: u128, + /// time between unconfirmed downloader runs + pub nakamoto_unconfirmed_downloader_interval_ms: u128, /// The authorization token to enable privileged RPC endpoints pub auth_token: Option, @@ -521,6 +527,9 @@ impl std::default::Default for ConnectionOptions { socket_send_buffer_size: 16384, // Linux default private_neighbors: true, max_nakamoto_block_relay_age: 6, + nakamoto_push_interval_ms: 30_000, // re-send a block no more than once every 30 seconds + nakamoto_inv_sync_burst_interval_ms: 1_000, // wait 1 second after a sortition before running inventory sync + nakamoto_unconfirmed_downloader_interval_ms: 5_000, // run unconfirmed downloader once every 5 seconds auth_token: None, // no faults on by default From bd3e7c5abf1314f8b0e54596ecf5ce715a77f54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:44:25 -0400 Subject: [PATCH 291/910] fix: use a consistent reward-cycle-start index in the downloader, fix wanted tenure and availability calculations at reward cycle boundaries, and fix the transition logic for confirmed/unconfirmed states --- .../nakamoto/download_state_machine.rs | 450 +++++++----------- 1 file changed, 178 insertions(+), 272 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 6e298470e01..2e7be7f9772 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -52,10 +52,11 @@ use crate::core::{ }; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; +use crate::net::connection::ConnectionOptions; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, + downloader_block_height_to_reward_cycle, AvailableTenures, NakamotoTenureDownloader, + NakamotoTenureDownloaderSet, NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, }; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; @@ -120,6 +121,8 @@ pub struct NakamotoDownloadStateMachine { pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip nakamoto_tip: StacksBlockId, + /// last time an unconfirmed downloader was run + last_unconfirmed_download_run_ms: u128, } impl NakamotoDownloadStateMachine { @@ -140,6 +143,7 @@ impl NakamotoDownloadStateMachine { tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, + last_unconfirmed_download_run_ms: 0, } } @@ -194,8 +198,6 @@ impl NakamotoDownloadStateMachine { ) -> Result<(), NetError> { let highest_tenure_height = wanted_tenures.last().map(|wt| wt.burn_height).unwrap_or(0); - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc) @@ -243,18 +245,22 @@ impl NakamotoDownloadStateMachine { sortdb: &SortitionDB, loaded_so_far: &[WantedTenure], ) -> Result, NetError> { - let tip_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap_or(0); + let tip_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + tip.block_height, + ) + .expect("FATAL: tip.block_height before system start"); + // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at + // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = if let Some(highest_wanted_tenure) = loaded_so_far.last() { highest_wanted_tenure.burn_height.saturating_add(1) } else if let Some(last_tip) = last_tip.as_ref() { last_tip.block_height.saturating_add(1) } else { // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len. + // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc) @@ -403,50 +409,13 @@ impl NakamotoDownloadStateMachine { ) } - /// Update `self.wanted_tenures` and `self.prev_wanted_tenures` with newly-discovered sortition - /// data. These lists are extended in three possible ways, depending on the sortition tip: - /// - /// * If the sortition tip is in the same reward cycle that the block downloader is tracking, - /// then any newly-available sortitions are loaded via `load_wanted_tenures_at_tip()` and appended - /// to `self.wanted_tenures`. This is what happens most of the time in steady-state. - /// - /// * Otherwise, if the sortition tip is different (i.e. ahead) of the block downloader's - /// tracked reward cycle, _and_ if it's safe to do so (discussed below), then the next reward - /// cycle's sortitions are loaded. `self.prev_wanted_tenures` is populated with all of the - /// wanted tenures from the prior reward cycle, and `self.wanted_tenures` is populated with all - /// of the wanted tenures from the current reward cycle. - /// - /// Due to the way the chains coordinator works, the sortition DB will never be more than one - /// reward cycle ahead of the block downloader. This is because sortitions cannot be processed - /// (and will not be processed) until their corresponding PoX anchor block has been processed. - /// As such, the second case above only occurs at a reward cycle boundary -- specifically, the - /// sortition DB is in the process of being updated by the chains coordinator with the next - /// reward cycle's sortitions. - /// - /// Naturally, processing a new reward cycle is disruptive to the download state machine, which - /// can be in the process of finishing up downloading the prepare phase for a reward cycle at - /// the same time as the sortition DB processing the next reward cycle. To ensure that the - /// downloader doesn't miss anything, this code checks (via `have_unprocessed_tenures()`) that - /// all wanted tenures for which we have inventory data have been downloaded before advancing - /// `self.wanted_tenures` and `self.prev_wanted_tenures.` + /// Update `self.wanted_tenures` with newly-discovered sortition data. fn extend_wanted_tenures( &mut self, network: &PeerNetwork, sortdb: &SortitionDB, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; - - let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); - let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) - .expect("FATAL: burnchain tip is before system start"); let mut new_wanted_tenures = Self::load_wanted_tenures_at_tip( self.last_sort_tip.as_ref(), @@ -455,76 +424,13 @@ impl NakamotoDownloadStateMachine { &self.wanted_tenures, )?; - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: first nakamoto block from before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - debug!("No prev_wanted_tenures yet"); - true - }; - - if can_advance_wanted_tenures && self.reward_cycle != sort_rc { - let mut prev_wanted_tenures = vec![]; - let mut cur_wanted_tenures = vec![]; - let prev_wts = self.prev_wanted_tenures.take().unwrap_or(vec![]); - let cur_wts = std::mem::replace(&mut self.wanted_tenures, vec![]); - - for wt in new_wanted_tenures - .into_iter() - .chain(prev_wts.into_iter()) - .chain(cur_wts.into_iter()) - { - debug!("Consider wanted tenure: {:?}", &wt); - let wt_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, wt.burn_height) - .expect("FATAL: height before system start"); - if wt_rc + 1 == sort_rc { - prev_wanted_tenures.push(wt); - } else if wt_rc == sort_rc { - cur_wanted_tenures.push(wt); - } else { - debug!("Drop wanted tenure: {:?}", &wt); - } - } - - prev_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - cur_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - - debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); - debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); - debug!("set self.reward_cycle = {}", sort_rc); - - self.prev_wanted_tenures = if prev_wanted_tenures.is_empty() { - None - } else { - Some(prev_wanted_tenures) - }; - self.wanted_tenures = cur_wanted_tenures; - self.reward_cycle = sort_rc; - } else { - debug!( - "Append {} wanted tenures: {:?}", - new_wanted_tenures.len(), - &new_wanted_tenures - ); - self.wanted_tenures.append(&mut new_wanted_tenures); - debug!("wanted_tenures is now {:?}", &self.wanted_tenures); - } + debug!( + "Append {} wanted tenures: {:?}", + new_wanted_tenures.len(), + &new_wanted_tenures + ); + self.wanted_tenures.append(&mut new_wanted_tenures); + debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); Ok(()) } @@ -556,15 +462,17 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: usize cannot support reward cycle length") { // this is the first-ever pass, so load up the last full reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start") - .saturating_sub(1); + let prev_sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + sort_tip.block_height, + ) + .expect("FATAL: burnchain tip is before system start") + .saturating_sub(1); let mut prev_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc, + prev_sort_rc, sort_tip, sortdb, &mut prev_wanted_tenures, @@ -572,16 +480,18 @@ impl NakamotoDownloadStateMachine { debug!( "initial prev_wanted_tenures (rc {}): {:?}", - sort_rc, &prev_wanted_tenures + prev_sort_rc, &prev_wanted_tenures ); self.prev_wanted_tenures = Some(prev_wanted_tenures); } if self.wanted_tenures.is_empty() { // this is the first-ever pass, so load up the current reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start"); + let sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + sort_tip.block_height, + ) + .expect("FATAL: burnchain tip is before system start"); let mut wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( @@ -625,12 +535,12 @@ impl NakamotoDownloadStateMachine { let prev_wanted_rc = prev_wanted_tenures .last() .map(|wt| { - pox_constants - .block_height_to_reward_cycle( - first_burn_height, - wt.burn_height.saturating_sub(1), - ) - .expect("FATAL: wanted tenure before system start") + downloader_block_height_to_reward_cycle( + pox_constants, + first_burn_height, + wt.burn_height, + ) + .expect("FATAL: wanted tenure before system start") }) .unwrap_or(u64::MAX); @@ -763,49 +673,16 @@ impl NakamotoDownloadStateMachine { chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; self.initialize_wanted_tenures(sort_tip, sortdb)?; let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - last_sort_height.saturating_add(1), - ) - .expect("FATAL: burnchain tip is before system start"); - - let next_sort_rc = if last_sort_height == sort_tip.block_height { - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - sort_tip.block_height.saturating_add(2), - ) - .expect("FATAL: burnchain tip is before system start") - } else { - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - sort_tip.block_height.saturating_add(1), - ) - .expect("FATAL: burnchain tip is before system start") - }; - - debug!( - "last_sort_height = {}, sort_rc = {}, next_sort_rc = {}, self.reward_cycle = {}, sort_tip.block_height = {}", + let sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, last_sort_height, - sort_rc, - next_sort_rc, - self.reward_cycle, - sort_tip.block_height, - ); + ) + .expect("FATAL: burnchain tip is before system start"); if self.reward_cycle == sort_rc { // not at a reward cycle boundary, so just extend self.wanted_tenures @@ -815,35 +692,10 @@ impl NakamotoDownloadStateMachine { return Ok(()); } - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: nakamoto starts before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - debug!("No prev_wanted_tenures yet"); - true - }; - if !can_advance_wanted_tenures { - return Ok(()); - } - // crossed reward cycle boundary let mut new_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc + 1, + sort_rc, sort_tip, sortdb, &mut new_wanted_tenures, @@ -851,15 +703,20 @@ impl NakamotoDownloadStateMachine { let mut new_prev_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc, + sort_rc.saturating_sub(1), sort_tip, sortdb, &mut new_prev_wanted_tenures, )?; - debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); debug!( - "new_prev_wanted_tenures is now {:?}", + "new_wanted_tenures is now {} {:?}", + new_wanted_tenures.len(), + &new_wanted_tenures + ); + debug!( + "new_prev_wanted_tenures is now {} {:?}", + new_prev_wanted_tenures.len(), &new_prev_wanted_tenures ); @@ -1200,6 +1057,43 @@ impl NakamotoDownloadStateMachine { ) } + /// Find the two highest tenure IDs that are available for download. + /// These are the ones that must be fetched via the unconfirmed tenure downloader. + /// They are returned in block order -- .0 has a lower block height than .1 + pub(crate) fn find_unconfirmed_tenure_ids( + wanted_tenures: &[WantedTenure], + prev_wanted_tenures: &[WantedTenure], + available: &HashMap>, + ) -> (Option, Option) { + // map each tenure ID to its block height + let tenure_block_heights: BTreeMap<_, _> = wanted_tenures + .iter() + .chain(prev_wanted_tenures.iter()) + .map(|wt| (wt.burn_height, &wt.tenure_id_consensus_hash)) + .collect(); + + debug!("Check availability {:?}", available); + let mut highest_available = Vec::with_capacity(2); + for (_, ch) in tenure_block_heights.iter().rev() { + let available_count = available + .get(ch) + .map(|neighbors| neighbors.len()) + .unwrap_or(0); + + debug!("Check is {} available: {}", ch, available_count); + if available_count == 0 { + continue; + } + highest_available.push((*ch).clone()); + if highest_available.len() == 2 { + break; + } + } + + highest_available.reverse(); + (highest_available.pop(), highest_available.pop()) + } + /// Determine whether or not we can start downloading the highest complete tenure and the /// unconfirmed tenure. Only do this if (1) the sortition DB is at the burnchain tip and (2) /// all of our wanted tenures are marked as either downloaded or complete. @@ -1209,17 +1103,15 @@ impl NakamotoDownloadStateMachine { /// /// This method is static to facilitate testing. pub(crate) fn need_unconfirmed_tenures<'a>( - nakamoto_start_block: u64, burnchain_height: u64, sort_tip: &BlockSnapshot, - completed_tenures: &HashSet, wanted_tenures: &[WantedTenure], prev_wanted_tenures: &[WantedTenure], tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, + available_tenures: &HashMap>, ) -> bool { + debug!("Check if we need unconfirmed tenures"); + if sort_tip.block_height < burnchain_height { debug!( "sort_tip {} < burn tip {}", @@ -1238,34 +1130,53 @@ impl NakamotoDownloadStateMachine { return false; } - // there are still confirmed tenures we have to go and get - if Self::have_unprocessed_tenures( - pox_constants - .block_height_to_reward_cycle(first_burn_height, nakamoto_start_block) - .expect("FATAL: nakamoto starts before system start"), - completed_tenures, - prev_wanted_tenures, - tenure_block_ids, - pox_constants, - first_burn_height, - inventory_iter, - ) { - debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); + if tenure_block_ids.is_empty() { + debug!("No tenure availability known"); return false; } + let (unconfirmed_tenure_opt, confirmed_tenure_opt) = Self::find_unconfirmed_tenure_ids( + wanted_tenures, + prev_wanted_tenures, + available_tenures, + ); + debug!( + "Check unconfirmed tenures: highest two available tenures are {:?}, {:?}", + &unconfirmed_tenure_opt, &confirmed_tenure_opt + ); + // see if we need any tenures still - for wt in wanted_tenures.iter() { - if completed_tenures.contains(&wt.tenure_id_consensus_hash) { - continue; - } - let is_available = tenure_block_ids - .iter() - .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); + for wt in wanted_tenures.iter().chain(prev_wanted_tenures.iter()) { + debug!("Check unconfirmed tenures: check {:?}", &wt); + let is_available_and_processed = tenure_block_ids.iter().any(|(_, available)| { + if let Some(tenure_start_end) = available.get(&wt.tenure_id_consensus_hash) { + tenure_start_end.processed + } else { + true + } + }); + + if !is_available_and_processed { + let is_unconfirmed = unconfirmed_tenure_opt + .as_ref() + .map(|ch| *ch == wt.tenure_id_consensus_hash) + .unwrap_or(false) + || confirmed_tenure_opt + .as_ref() + .map(|ch| *ch == wt.tenure_id_consensus_hash) + .unwrap_or(false); + + if is_unconfirmed { + debug!( + "Tenure {} is only available via the unconfirmed tenure downloader", + &wt.tenure_id_consensus_hash + ); + continue; + } - if is_available && !wt.processed { // a tenure is available but not yet processed, so we can't yet transition to // fetching unconfirmed tenures (we'd have no way to validate them). + // TODO: also check that this cannot be fetched by confirmed downloader debug!( "Tenure {} is available but not yet processed", &wt.tenure_id_consensus_hash @@ -1345,15 +1256,30 @@ impl NakamotoDownloadStateMachine { /// Update our unconfirmed tenure download state machines fn update_unconfirmed_tenure_downloaders( &mut self, + connection_opts: &ConnectionOptions, count: usize, highest_processed_block_id: Option, ) { + if self + .last_unconfirmed_download_run_ms + .saturating_add(connection_opts.nakamoto_unconfirmed_downloader_interval_ms) + > get_epoch_time_ms() + { + debug!( + "Throttle starting new unconfirmed downloaders until {}", + self.last_unconfirmed_download_run_ms + .saturating_add(connection_opts.nakamoto_unconfirmed_downloader_interval_ms) + / 1000 + ); + return; + } Self::make_unconfirmed_tenure_downloaders( &mut self.unconfirmed_tenure_download_schedule, count, &mut self.unconfirmed_tenure_downloads, highest_processed_block_id, ); + self.last_unconfirmed_download_run_ms = get_epoch_time_ms(); } /// Run unconfirmed tenure download state machines. @@ -1579,15 +1505,18 @@ impl NakamotoDownloadStateMachine { /// Run and process all unconfirmed tenure downloads, and highest complete tenure downloads. /// Do the needful bookkeeping to remove dead peers. + /// Returns map of tenure IDs to blocks we fetched, plus whether or not we returned because we + /// were throttled fn download_unconfirmed_tenures( &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, chainstate: &StacksChainState, highest_processed_block_id: Option, - ) -> HashMap> { + ) -> (HashMap>, bool) { // queue up more downloaders self.update_unconfirmed_tenure_downloaders( + network.get_connection_opts(), usize::try_from(network.get_connection_opts().max_inflight_blocks) .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), highest_processed_block_id, @@ -1650,7 +1579,7 @@ impl NakamotoDownloadStateMachine { } } - coalesced_blocks + let tenure_blocks = coalesced_blocks .into_iter() .map(|(consensus_hash, block_map)| { let mut block_list: Vec<_> = @@ -1658,7 +1587,9 @@ impl NakamotoDownloadStateMachine { block_list.sort_unstable_by_key(|blk| blk.header.chain_length); (consensus_hash, block_list) }) - .collect() + .collect(); + + (tenure_blocks, false) } /// Top-level download state machine execution. @@ -1697,6 +1628,16 @@ impl NakamotoDownloadStateMachine { ibd, ); + // check this now, since we mutate self.available + let need_unconfirmed_tenures = Self::need_unconfirmed_tenures( + burnchain_height, + &network.burnchain_tip, + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.tenure_block_ids, + &self.available_tenures, + ); + match self.state { NakamotoDownloadState::Confirmed => { let new_blocks = self.download_confirmed_tenures( @@ -1705,28 +1646,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return HashMap::new(); - }; - - if self.tenure_downloads.is_empty() - && Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - { + if self.tenure_downloads.is_empty() && need_unconfirmed_tenures { debug!( "Transition from {} to {}", &self.state, @@ -1749,7 +1669,7 @@ impl NakamotoDownloadStateMachine { &network.stacks_tip.block_hash, ); - let new_blocks = self.download_unconfirmed_tenures( + let (new_blocks, throttled) = self.download_unconfirmed_tenures( network, sortdb, chainstate, @@ -1760,13 +1680,10 @@ impl NakamotoDownloadStateMachine { }, ); - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return HashMap::new(); - }; + if throttled { + // stay in this state + return new_blocks; + } if !self.tenure_downloads.is_empty() { // need to go get this scheduled tenure @@ -1779,18 +1696,7 @@ impl NakamotoDownloadStateMachine { } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { - if Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) { + if need_unconfirmed_tenures { // do this again self.unconfirmed_tenure_download_schedule = Self::make_unconfirmed_tenure_download_schedule( From 826e57106b91cebdb256ebe688e53006277a0657 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:45:32 -0400 Subject: [PATCH 292/910] fix: consistent start block height of a reward cycle --- stackslib/src/net/download/nakamoto/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110f..5f03c3811aa 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -176,6 +176,14 @@ pub use crate::net::download::nakamoto::tenure_downloader_unconfirmed::{ NakamotoUnconfirmedDownloadState, NakamotoUnconfirmedTenureDownloader, }; +pub fn downloader_block_height_to_reward_cycle( + pox_constants: &PoxConstants, + first_block_height: u64, + block_height: u64, +) -> Option { + pox_constants.block_height_to_reward_cycle(first_block_height, block_height.saturating_sub(1)) +} + impl PeerNetwork { /// Set up the Nakamoto block downloader pub fn init_nakamoto_block_downloader(&mut self) { From d1611024434347a4701c6256b81c81604132a727 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:45:48 -0400 Subject: [PATCH 293/910] fix: consistent block height to reward cycle --- stackslib/src/net/download/nakamoto/tenure.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index a2a3b3eddd3..4fb050e5919 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -51,6 +51,7 @@ use crate::core::{ use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::downloader_block_height_to_reward_cycle; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::inv::epoch2x::InvState; @@ -325,9 +326,12 @@ impl TenureStartEnd { wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, - pox_constants - .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) - .expect("FATAL: tenure from before system start"), + downloader_block_height_to_reward_cycle( + pox_constants, + first_burn_height, + wt_start.burn_height, + ) + .expect("FATAL: tenure from before system start"), wt.processed, ); tenure_start_end.fetch_end_block = true; From dd702947cd16956ad266512652c49ef86c821abc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:17 -0400 Subject: [PATCH 294/910] feat: return the tenure-end block when downloading a tenure so the chainstate can mark the tenure as complete --- .../download/nakamoto/tenure_downloader.rs | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index f7fb970bb6f..95d97f67d51 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -161,13 +161,11 @@ pub struct NakamotoTenureDownloader { pub state: NakamotoTenureDownloadState, /// Tenure-start block pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// Pre-stored tenure end block. /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once /// the start-block for the current tenure is downloaded. This is that start-block, which is /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, /// Tenure blocks pub tenure_blocks: Option>, } @@ -195,7 +193,6 @@ impl NakamotoTenureDownloader { idle: false, state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), tenure_start_block: None, - tenure_end_header: None, tenure_end_block: None, tenure_blocks: None, } @@ -262,10 +259,7 @@ impl NakamotoTenureDownloader { ); self.tenure_start_block = Some(tenure_start_block); - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + if let Some(tenure_end_block) = self.tenure_end_block.take() { // we already have the tenure-end block, so immediately proceed to accept it. debug!( "Preemptively process tenure-end block {} for tenure {}", @@ -280,7 +274,7 @@ impl NakamotoTenureDownloader { ); self.try_accept_tenure_end_block(&tenure_end_block)?; } else { - // need to get tenure_end_header. By default, assume that another + // need to get tenure_end_block. By default, assume that another // NakamotoTenureDownloader will provide this block, and allow the // NakamotoTenureDownloaderSet instance that manages a collection of these // state-machines make the call to require this one to fetch the block directly. @@ -411,12 +405,12 @@ impl NakamotoTenureDownloader { } debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + "Accepted tenure-end block for tenure {} block={}; expect {} blocks", &self.tenure_id_consensus_hash, &tenure_end_block.block_id(), tc_payload.previous_tenure_blocks ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), ); @@ -426,17 +420,27 @@ impl NakamotoTenureDownloader { /// Determine how many blocks must be in this tenure. /// Returns None if we don't have the start and end blocks yet. pub fn tenure_length(&self) -> Option { - self.tenure_end_header + self.tenure_end_block .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + .map(|tenure_end_block| { + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + return None; + }; + + Some(u64::from(tc_payload.previous_tenure_blocks)) + }) + .flatten() } /// Add downloaded tenure blocks to this machine. /// If we have collected all tenure blocks, then return them and transition to the Done state. /// /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. + /// ascending order by height, and will include both the tenure-start block and the tenure-end + /// block. Including the tenure-end block is necessary because processing it will mark this + /// tenure as "complete" in the chainstate, which will allow the downloader to deduce when all + /// confirmed tenures have been completely downloaded. + /// /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to /// the next block to fetch (stored in self.state) will be updated. /// Returns Err(..) if the blocks were invalid. @@ -487,7 +491,8 @@ impl NakamotoTenureDownloader { .map(|blocks| blocks.len()) .unwrap_or(0) .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize + > self.tenure_length().unwrap_or(0).saturating_add(1) as usize + // + 1 due to the inclusion of the tenure-end block { // there are more blocks downloaded than indicated by the end-blocks tenure-change // transaction. @@ -503,6 +508,10 @@ impl NakamotoTenureDownloader { if let Some(blocks) = self.tenure_blocks.as_mut() { blocks.append(&mut tenure_blocks); } else { + // include tenure-end block + if let Some(tenure_end_block) = self.tenure_end_block.as_ref() { + tenure_blocks.insert(0, tenure_end_block.clone()); + } self.tenure_blocks = Some(tenure_blocks); } From 7d742f8a93aae6ad8e0eac8c1f5f3bebe31c7696 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:44 -0400 Subject: [PATCH 295/910] fix: consistent block height to reward cycle --- .../nakamoto/tenure_downloader_unconfirmed.rs | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index c96f718d2b9..ddfd35fa979 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -53,8 +53,8 @@ use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, + downloader_block_height_to_reward_cycle, AvailableTenures, NakamotoTenureDownloader, + NakamotoTenureDownloaderSet, TenureStartEnd, WantedTenure, }; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; @@ -319,17 +319,18 @@ impl NakamotoUnconfirmedTenureDownloader { } // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); + let tenure_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions let Some(Some(confirmed_reward_set)) = current_reward_sets From 0eac1f89b207bb63c1e7c9fe893168571b0ee3e9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:58 -0400 Subject: [PATCH 296/910] feat: do a burst of inv syncs at around the start of a new tenure -- i.e. right after a sortition --- stackslib/src/net/inv/nakamoto.rs | 42 ++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index d01e8625a1a..f24ad1a87ce 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, HashMap}; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -515,8 +515,6 @@ impl NakamotoTenureInv { /// Get the burnchain tip reward cycle for purposes of inv sync fn get_current_reward_cycle(tip: &BlockSnapshot, sortdb: &SortitionDB) -> u64 { - // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but - // .block_height_to_reward_cycle does not account for this. sortdb .pox_constants .block_height_to_reward_cycle( @@ -537,6 +535,10 @@ pub struct NakamotoInvStateMachine { reward_cycle_consensus_hashes: BTreeMap, /// last observed sortition tip last_sort_tip: Option, + /// deadline to stop inv sync burst + burst_deadline_ms: u128, + /// time we did our last burst + last_burst_ms: u128, } impl NakamotoInvStateMachine { @@ -546,6 +548,8 @@ impl NakamotoInvStateMachine { inventories: HashMap::new(), reward_cycle_consensus_hashes: BTreeMap::new(), last_sort_tip: None, + burst_deadline_ms: get_epoch_time_ms(), + last_burst_ms: get_epoch_time_ms(), } } @@ -805,20 +809,40 @@ impl NakamotoInvStateMachine { Ok((num_msgs, learned)) } + /// Do we need to do an inv sync burst? + /// This happens after `burst_interval` milliseconds have passed since we noticed the sortition + /// changed. + fn need_inv_burst(&self) -> bool { + self.burst_deadline_ms < get_epoch_time_ms() && self.last_burst_ms < self.burst_deadline_ms + } + /// Top-level state machine execution pub fn run(&mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, ibd: bool) -> bool { // if the burnchain tip has changed, then force all communications to reset for the current // reward cycle in order to hasten block download if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { - debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); - let tip_rc = - NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); - for inv_state in self.inventories.values_mut() { - inv_state.reset_comms(tip_rc.saturating_sub(1)); - } + debug!( + "Sortition tip changed: {} != {}. Configuring inventory burst", + &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash + ); + self.burst_deadline_ms = get_epoch_time_ms() + .saturating_add(network.connection_opts.nakamoto_inv_sync_burst_interval_ms); } } + if self.need_inv_burst() { + debug!("Forcibly restarting all Nakamoto inventory comms due to inventory burst"); + + let tip_rc = + NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + for inv_state in self.inventories.values_mut() { + inv_state.reset_comms(tip_rc.saturating_sub(1)); + } + + self.last_burst_ms = get_epoch_time_ms() + .saturating_add(network.connection_opts.nakamoto_inv_sync_burst_interval_ms) + .max(self.burst_deadline_ms); + } if let Err(e) = self.process_getnakamotoinv_begins(network, sortdb, ibd) { warn!( From 1945b58a2dc73d583fe3172e068743c704e9751d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:47:24 -0400 Subject: [PATCH 297/910] fix: explicitly return pushed stackerdb chunks --- stackslib/src/net/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c2..5cedc4e0680 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1484,6 +1484,8 @@ pub struct NetworkResult { pub uploaded_microblocks: Vec, /// chunks we received from the HTTP server pub uploaded_stackerdb_chunks: Vec, + /// chunks we received from p2p push + pub pushed_stackerdb_chunks: Vec, /// Atlas attachments we obtained pub attachments: Vec<(AttachmentInstance, Attachment)>, /// transactions we downloaded via a mempool sync @@ -1533,6 +1535,7 @@ impl NetworkResult { uploaded_blocks: vec![], uploaded_microblocks: vec![], uploaded_stackerdb_chunks: vec![], + pushed_stackerdb_chunks: vec![], attachments: vec![], synced_transactions: vec![], stacker_db_sync_results: vec![], @@ -1576,6 +1579,7 @@ impl NetworkResult { .fold(0, |acc, x| acc + x.chunks_to_store.len()) > 0 || self.uploaded_stackerdb_chunks.len() > 0 + || self.pushed_stackerdb_chunks.len() > 0 } pub fn transactions(&self) -> Vec { @@ -1639,6 +1643,9 @@ impl NetworkResult { .insert(neighbor_key.clone(), vec![(message.relayers, block_data)]); } } + StacksMessageType::StackerDBPushChunk(chunk_data) => { + self.pushed_stackerdb_chunks.push(chunk_data) + } _ => { // forward along if let Some(messages) = self.unhandled_messages.get_mut(&neighbor_key) { From f86b55f4bd71188c9419c942a27782a60e920ae2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:48:46 -0400 Subject: [PATCH 298/910] fix: don't connect to ourselves and cause the walk to fail; also, don't abort a walk because we can't find any always-allowed peers (use the seed peers in that case) --- stackslib/src/net/neighbors/db.rs | 17 +++++++++++++---- stackslib/src/net/neighbors/mod.rs | 2 +- stackslib/src/net/neighbors/walk.rs | 22 ++++++++++++++++++---- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index 5a40ac96776..c0e65a6f854 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -176,7 +176,8 @@ pub trait NeighborWalkDB { /// Get the number of peers in a given AS fn get_asn_count(&self, network: &PeerNetwork, asn: u32) -> u64; - /// Pick neighbors with a minimum age for a walk + /// Pick neighbors with a minimum age for a walk. + /// If there are none, then fall back to seed nodes. fn pick_walk_neighbors( network: &PeerNetwork, num_neighbors: u64, @@ -196,10 +197,18 @@ pub trait NeighborWalkDB { if neighbors.len() == 0 { debug!( - "{:?}: No neighbors available in the peer DB!", - network.get_local_peer() + "{:?}: No neighbors available in the peer DB newer than {}!", + network.get_local_peer(), + min_age ); - return Err(net_error::NoSuchNeighbor); + let seed_nodes = PeerDB::get_bootstrap_peers( + &network.peerdb_conn(), + network.get_local_peer().network_id, + )?; + if seed_nodes.len() == 0 { + return Err(net_error::NoSuchNeighbor); + } + return Ok(seed_nodes); } Ok(neighbors) } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 7e01a0c448f..6447a6ec00b 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -215,7 +215,7 @@ impl PeerNetwork { .count_connected_always_allowed_peers() .unwrap_or((0, 0)); - // always ensure we're connected to always-allowed outbound peers + // always ensure we're connected to always-allowed outbound peers other than ourselves let walk_res = if ibd || (num_always_connected == 0 && total_always_connected > 0) { // always connect to bootstrap peers if in IBD, or if we're not connected to an // always-allowed peer already diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 478f5c0e3df..e1207941e06 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -298,11 +298,25 @@ impl NeighborWalk { network: &PeerNetwork, ibd: bool, ) -> Result, net_error> { - let mut allowed_peers = db.get_initial_walk_neighbors(network, ibd)?; - let allowed_peer = if let Some(peer) = allowed_peers.pop() { + let allowed_peers = db.get_initial_walk_neighbors(network, ibd)?; + let allowed_peer_opt = allowed_peers.into_iter().find_map(|peer| { + if peer.public_key + == Secp256k1PublicKey::from_private(&network.get_local_peer().private_key) + { + None + } else { + Some(peer) + } + }); + + let allowed_peer = if let Some(peer) = allowed_peer_opt { peer } else { - // no allowed peers in DB. Try a different strategy + // no allowed peers in DB that aren't us. Try a different strategy + debug!( + "{:?}: No allowed peers in the DB that aren't us", + network.get_local_peer() + ); return Err(net_error::NotFoundError); }; @@ -401,7 +415,7 @@ impl NeighborWalk { /// Instantiate a neighbor walk, but go straight to the pingback logic (i.e. we don't have any /// immediate neighbors). That is, try to connect and step to a node that connected to us. - /// The returned neighbor walk will be in the PingabckHandshakesBegin state. + /// The returned neighbor walk will be in the PingbackHandshakesBegin state. /// /// Returns the new walk, if we have any pingbacks to connect to. /// Returns NoSuchNeighbor if there are no pingbacks to choose from From c755092753ce5ed6aa268734c2eea830fbaa317c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:49:23 -0400 Subject: [PATCH 299/910] fix: allow the relayer to push stackerdb chunks --- stackslib/src/net/p2p.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4a52945521f..b467e80f463 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1420,6 +1420,9 @@ impl PeerNetwork { } Ok(all_neighbors.into_iter().collect()) } + StacksMessageType::StackerDBPushChunk(ref data) => { + Ok(self.sample_broadcast_peers(&relay_hints, data)?) + } StacksMessageType::Transaction(ref data) => { self.sample_broadcast_peers(&relay_hints, data) } @@ -4289,6 +4292,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { + debug!("Refresh reward cycle info for cycle {}", rc); let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? @@ -4313,6 +4317,7 @@ impl PeerNetwork { } } + debug!("Load reward cycle info for cycle {}", rc); let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, &tip_sn.sortition_id, From 565c806e02ddcc619b93e786708cd1f368f443f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:49:40 -0400 Subject: [PATCH 300/910] fix: push stackerdb chunks that we stored but did not have, and push nakamoto blocks if told to (don't look at the chainstate, since the caller stores the block before relaying it). This latter change fixes nakamoto block push --- stackslib/src/net/relay.rs | 162 ++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 56 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 04969099732..5f200f3f9de 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -29,8 +29,8 @@ use stacks_common::address::public_keys_to_address_hash; use stacks_common::codec::MAX_PAYLOAD_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId}; use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; -use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::{Burnchain, BurnchainView}; use crate::chainstate::burn::db::sortdb::{ @@ -77,6 +77,10 @@ pub struct Relayer { connection_opts: ConnectionOptions, /// StackerDB connection stacker_dbs: StackerDBs, + /// Recently-sent Nakamoto blocks, so we don't keep re-sending them. + /// Maps to tenure ID and timestamp, so we can garbage-collect. + /// Timestamp is in milliseconds + recently_sent_nakamoto_blocks: HashMap, } #[derive(Debug)] @@ -199,6 +203,20 @@ impl RelayPayload for StacksTransaction { } } +impl RelayPayload for StackerDBPushChunkData { + fn get_digest(&self) -> Sha512Trunc256Sum { + self.chunk_data.data_hash() + } + fn get_id(&self) -> String { + format!( + "StackerDBPushChunk(id={},ver={},data_hash={})", + &self.chunk_data.slot_id, + self.chunk_data.slot_version, + &self.chunk_data.data_hash() + ) + } +} + impl RelayerStats { pub fn new() -> RelayerStats { RelayerStats { @@ -509,6 +527,7 @@ impl Relayer { p2p: handle, connection_opts, stacker_dbs, + recently_sent_nakamoto_blocks: HashMap::new(), } } @@ -717,7 +736,7 @@ impl Relayer { block: &StacksBlock, download_time: u64, ) -> Result { - debug!( + info!( "Handle incoming block {}/{}", consensus_hash, &block.block_hash() @@ -838,7 +857,7 @@ impl Relayer { obtained_method: NakamotoBlockObtainMethod, force_broadcast: bool, ) -> Result { - debug!( + info!( "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, &block.header.block_hash(), @@ -1010,7 +1029,7 @@ impl Relayer { let mut sort_handle = sortdb.index_handle(&tip.sortition_id); for block in blocks { let block_id = block.block_id(); - if let Err(e) = Self::process_new_nakamoto_block( + let accept = match Self::process_new_nakamoto_block( burnchain, sortdb, &mut sort_handle, @@ -1020,8 +1039,13 @@ impl Relayer { coord_comms, NakamotoBlockObtainMethod::Downloaded, ) { - warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); - } else { + Ok(x) => x, + Err(e) => { + warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); + continue; + } + }; + if accept { accepted.push(block); } } @@ -2314,8 +2338,11 @@ impl Relayer { } /// Process HTTP-uploaded stackerdb chunks. - /// They're already stored by the RPC handler, so just forward events for them. + /// They're already stored by the RPC handler, so all we have to do + /// is forward events for them and rebroadcast them (i.e. the fact that we stored them and got + /// this far at all means that they were novel, and thus potentially novel to our neighbors). pub fn process_uploaded_stackerdb_chunks( + &mut self, uploaded_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) { @@ -2325,9 +2352,13 @@ impl Relayer { for chunk in uploaded_chunks.into_iter() { debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); if let Some(events) = all_events.get_mut(&chunk.contract_id) { - events.push(chunk.chunk_data); + events.push(chunk.chunk_data.clone()); } else { - all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data]); + all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data.clone()]); + } + let msg = StacksMessageType::StackerDBPushChunk(chunk); + if let Err(e) = self.p2p.broadcast_message(vec![], msg) { + warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); } } for (contract_id, new_chunks) in all_events.into_iter() { @@ -2337,8 +2368,11 @@ impl Relayer { } /// Process newly-arrived chunks obtained from a peer stackerdb replica. + /// Chunks that we store will be broadcast, since successful storage implies that they were new + /// to us (and thus might be new to our neighbors) pub fn process_stacker_db_chunks( - stackerdbs: &mut StackerDBs, + &mut self, + rc_consensus_hash: &ConsensusHash, stackerdb_configs: &HashMap, sync_results: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, @@ -2347,11 +2381,10 @@ impl Relayer { let mut sync_results_map: HashMap> = HashMap::new(); for sync_result in sync_results.into_iter() { - let sc = sync_result.contract_id.clone(); - if let Some(result_list) = sync_results_map.get_mut(&sc) { + if let Some(result_list) = sync_results_map.get_mut(&sync_result.contract_id) { result_list.push(sync_result); } else { - sync_results_map.insert(sc, vec![sync_result]); + sync_results_map.insert(sync_result.contract_id.clone(), vec![sync_result]); } } @@ -2360,7 +2393,7 @@ impl Relayer { for (sc, sync_results) in sync_results_map.into_iter() { if let Some(config) = stackerdb_configs.get(&sc) { - let tx = stackerdbs.tx_begin(config.clone())?; + let tx = self.stacker_dbs.tx_begin(config.clone())?; for sync_result in sync_results.into_iter() { for chunk in sync_result.chunks_to_store.into_iter() { let md = chunk.get_slot_metadata(); @@ -2373,14 +2406,23 @@ impl Relayer { "num_bytes" => chunk.data.len(), "error" => %e ); + continue; } else { debug!("Stored chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), "slot_id" => md.slot_id, "slot_version" => md.slot_version); } if let Some(event_list) = all_events.get_mut(&sync_result.contract_id) { - event_list.push(chunk); + event_list.push(chunk.clone()); } else { - all_events.insert(sync_result.contract_id.clone(), vec![chunk]); + all_events.insert(sync_result.contract_id.clone(), vec![chunk.clone()]); + } + let msg = StacksMessageType::StackerDBPushChunk(StackerDBPushChunkData { + contract_id: sc.clone(), + rc_consensus_hash: rc_consensus_hash.clone(), + chunk_data: chunk, + }); + if let Err(e) = self.p2p.broadcast_message(vec![], msg) { + warn!("Failed to broadcast StackerDB chunk: {:?}", &e); } } } @@ -2401,27 +2443,24 @@ impl Relayer { /// Process StackerDB chunks pushed to us. /// extract all StackerDBPushChunk messages from `unhandled_messages` pub fn process_pushed_stacker_db_chunks( - stackerdbs: &mut StackerDBs, + &mut self, + rc_consensus_hash: &ConsensusHash, stackerdb_configs: &HashMap, - unhandled_messages: &mut HashMap>, + stackerdb_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) -> Result<(), Error> { // synthesize StackerDBSyncResults from each chunk - let mut sync_results = vec![]; - for (_nk, msgs) in unhandled_messages.iter_mut() { - msgs.retain(|msg| { - if let StacksMessageType::StackerDBPushChunk(data) = &msg.payload { - let sync_result = StackerDBSyncResult::from_pushed_chunk(data.clone()); - sync_results.push(sync_result); - false - } else { - true - } - }); - } + let sync_results = stackerdb_chunks + .into_iter() + .map(|chunk_data| { + debug!("Received pushed StackerDB chunk {:?}", &chunk_data); + let sync_result = StackerDBSyncResult::from_pushed_chunk(chunk_data); + sync_result + }) + .collect(); - Relayer::process_stacker_db_chunks( - stackerdbs, + self.process_stacker_db_chunks( + rc_consensus_hash, stackerdb_configs, sync_results, event_observer, @@ -2581,11 +2620,8 @@ impl Relayer { &mut self, _local_peer: &LocalPeer, sortdb: &SortitionDB, - chainstate: &StacksChainState, accepted_blocks: Vec, - force_send: bool, ) { - // TODO: we don't relay HTTP-uploaded blocks :( debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, @@ -2612,8 +2648,11 @@ impl Relayer { for blocks_and_relayers in accepted_blocks.into_iter() { let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; + if blocks.len() == 0 { + continue; + } - let relay_blocks: Vec<_> = blocks + let relay_blocks_set: HashMap<_, _> = blocks .into_iter() .filter(|blk| { // don't relay blocks for non-recent tenures @@ -2625,21 +2664,24 @@ impl Relayer { ); return false; } - // don't relay blocks we already have. - // If we have a DB error in figuring this out, then don't relay by - // default (lest a faulty DB cause the node to spam the network). - if !force_send - && chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&blk.block_id()) - .unwrap_or(true) + // don't relay blocks we've recently sent + if let Some((_ch, ts)) = self.recently_sent_nakamoto_blocks.get(&blk.block_id()) { - return false; + if ts + self.connection_opts.nakamoto_push_interval_ms + >= get_epoch_time_ms() + { + // too soon + test_debug!("Sent {} too recently; will not relay", &blk.block_id()); + return false; + } } true }) + .map(|blk| (blk.block_id(), blk)) .collect(); + let relay_blocks: Vec<_> = relay_blocks_set.into_values().collect(); + debug!( "{:?}: Forward {} Nakamoto blocks from {:?}", _local_peer, @@ -2651,12 +2693,16 @@ impl Relayer { continue; } - for _block in relay_blocks.iter() { - test_debug!( + for block in relay_blocks.iter() { + debug!( "{:?}: Forward Nakamoto block {}/{}", _local_peer, - &_block.header.consensus_hash, - &_block.header.block_hash() + &block.header.consensus_hash, + &block.header.block_hash() + ); + self.recently_sent_nakamoto_blocks.insert( + block.block_id(), + (block.header.consensus_hash.clone(), get_epoch_time_ms()), ); } @@ -2667,6 +2713,10 @@ impl Relayer { warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); } } + + // garbage-collect + self.recently_sent_nakamoto_blocks + .retain(|_blk_id, (ch, _ts)| relay_tenures.contains(ch)); } #[cfg_attr(test, mutants::skip)] @@ -2713,7 +2763,7 @@ impl Relayer { // relay if not IBD if !ibd && accepted_blocks.len() > 0 { - self.relay_epoch3_blocks(local_peer, sortdb, chainstate, accepted_blocks, false); + self.relay_epoch3_blocks(local_peer, sortdb, accepted_blocks); } num_new_nakamoto_blocks } @@ -2836,24 +2886,24 @@ impl Relayer { }; // push events for HTTP-uploaded stacker DB chunks - Relayer::process_uploaded_stackerdb_chunks( + self.process_uploaded_stackerdb_chunks( mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), ); // store downloaded stacker DB chunks - Relayer::process_stacker_db_chunks( - &mut self.stacker_dbs, + self.process_stacker_db_chunks( + &network_result.rc_consensus_hash, &network_result.stacker_db_configs, mem::replace(&mut network_result.stacker_db_sync_results, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; // store pushed stacker DB chunks - Relayer::process_pushed_stacker_db_chunks( - &mut self.stacker_dbs, + self.process_pushed_stacker_db_chunks( + &network_result.rc_consensus_hash, &network_result.stacker_db_configs, - &mut network_result.unhandled_messages, + mem::replace(&mut network_result.pushed_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; From d5e5f8c4055558db12f169f8a4e8b32f4531c953 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:23 -0400 Subject: [PATCH 301/910] chore: API sync --- stackslib/src/net/tests/download/nakamoto.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 57bd557186e..4110cae7ff0 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -278,13 +278,7 @@ fn test_nakamoto_tenure_downloader() { next_tenure_start_block.header.parent_block_id.clone() ) ); - assert_eq!( - td.tenure_end_header, - Some(( - next_tenure_start_block.header.clone(), - next_tenure_change_payload.clone() - )) - ); + assert_eq!(td.tenure_end_block, Some(next_tenure_start_block.clone())); assert_eq!(td.tenure_length(), Some(11)); let mut td_one_shot = td.clone(); @@ -314,14 +308,17 @@ fn test_nakamoto_tenure_downloader() { let res = td.try_accept_tenure_blocks(vec![tenure_start_block.clone()]); assert!(res.is_ok()); let res_blocks = res.unwrap().unwrap(); - assert_eq!(res_blocks.len(), blocks.len()); - assert_eq!(res_blocks, blocks); + assert_eq!(res_blocks.len(), blocks.len() + 1); // includes tenure-end block + + let mut all_blocks = blocks.clone(); + all_blocks.push(next_tenure_start_block.clone()); + assert_eq!(res_blocks, all_blocks); assert_eq!(td.state, NakamotoTenureDownloadState::Done); // also works if we give blocks in one shot let res = td_one_shot.try_accept_tenure_blocks(blocks.clone().into_iter().rev().collect()); assert!(res.is_ok()); - assert_eq!(res.unwrap().unwrap(), blocks); + assert_eq!(res.unwrap().unwrap(), all_blocks); assert_eq!(td_one_shot.state, NakamotoTenureDownloadState::Done); // TODO: From 5a9cd84ef04af356858e87a98dfbd0bd3e4b2b9e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:32 -0400 Subject: [PATCH 302/910] chore: API sync --- stackslib/src/net/tests/relay/nakamoto.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 4df31714741..3ab91c14c2a 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -302,12 +302,10 @@ impl SeedNode { peer.relayer.relay_epoch3_blocks( &local_peer, &sortdb, - &stacks_node.chainstate, vec![AcceptedNakamotoBlocks { relayers: vec![], blocks: blocks.clone(), }], - true, ); peer.sortdb = Some(sortdb); From a6b350442ccfef6f0be4b83698ef1e9b565453cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:41 -0400 Subject: [PATCH 303/910] fix: honor poll_time_secs in config.burnchain --- testnet/stacks-node/src/run_loop/nakamoto.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 44a6c0fba90..a43854cc507 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -33,6 +33,7 @@ use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; use crate::burnchains::make_bitcoin_indexer; @@ -521,6 +522,7 @@ impl RunLoop { ); let mut last_tenure_sortition_height = 0; + let mut poll_deadline = 0; loop { if !globals.keep_running() { @@ -580,6 +582,12 @@ impl RunLoop { break; } + if poll_deadline > get_epoch_time_secs() { + sleep_ms(1_000); + continue; + } + poll_deadline = get_epoch_time_secs() + self.config().burnchain.poll_time_secs; + let (next_burnchain_tip, tip_burnchain_height) = match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, From 59fd0fd9ad718883aeff34b38219a63e0cde62f8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 20 Aug 2024 10:21:48 -0400 Subject: [PATCH 304/910] fix: Revert change to `BurnchainHeaderHash` serialization --- stacks-common/src/types/chainstate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index c5208d02f99..47d6c3c499b 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,11 +30,11 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +#[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); impl_array_hexstring_fmt!(BurnchainHeaderHash); impl_byte_array_newtype!(BurnchainHeaderHash, u8, 32); -impl_byte_array_serde!(BurnchainHeaderHash); pub struct BlockHeaderHash(pub [u8; 32]); impl_array_newtype!(BlockHeaderHash, u8, 32); From 11de2a31ed901f75ba7979d1aa9bcb17cc5a5600 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 11:39:31 -0400 Subject: [PATCH 305/910] Fix signer_set_rollover. Use the old signer set when at a reward cycle boundary Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9c81c4dd4..fcf88f8a687 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -293,6 +293,8 @@ impl SignerTest { fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); + let old_reward_cycle = self.get_current_reward_cycle(); + self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response @@ -311,7 +313,16 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - let reward_cycle = self.get_current_reward_cycle(); + let new_reward_cycle = self.get_current_reward_cycle(); + let reward_cycle = if new_reward_cycle != old_reward_cycle { + old_reward_cycle + } else { + new_reward_cycle + }; + info!( + "Verifying signatures against signers for reward cycle {:?}", + reward_cycle + ); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -2784,8 +2795,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) - .saturating_add(1); + .reward_cycle_to_block_height(next_reward_cycle); info!("---- Mining to next reward set calculation -----"); signer_test.run_until_burnchain_height_nakamoto( From 51e46faef150987cc298b730fc836aa8e67e018c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 12:01:20 -0400 Subject: [PATCH 306/910] Simplify logic to ensure at reward cycle boundaries, the old reward cycle is used Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fcf88f8a687..fbea6e8f1e4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -293,7 +293,7 @@ impl SignerTest { fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); - let old_reward_cycle = self.get_current_reward_cycle(); + let reward_cycle = self.get_current_reward_cycle(); self.mine_nakamoto_block(timeout); @@ -312,13 +312,6 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - - let new_reward_cycle = self.get_current_reward_cycle(); - let reward_cycle = if new_reward_cycle != old_reward_cycle { - old_reward_cycle - } else { - new_reward_cycle - }; info!( "Verifying signatures against signers for reward cycle {:?}", reward_cycle From 24c425f5922b0c144d4a5cf38aaf166384151026 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 13:34:31 -0400 Subject: [PATCH 307/910] Add a config option and wait a minimum number of seconds between mining blocks to prevent signers rejecting a block with same timestamp as its parent Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 12 ++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 28 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 30a59903197..2e0d8e963c6 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,6 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; +const DEFAULT_MINIMUM_GAP_SECS: u64 = 1; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2358,6 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, + /// The minimum gap to wait between blocks in seconds. The value must be greater than or equal to 1 second because if a block is mined + /// within the same second as its parent, it will be rejected by the signers. + pub min_block_time_gap_secs: u64, } impl Default for MinerConfig { @@ -2389,6 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set + min_block_time_gap_secs: DEFAULT_MINIMUM_GAP_SECS, } } } @@ -2739,6 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, + pub min_block_time_gap_secs: Option, } impl MinerConfigFile { @@ -2850,6 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set + min_block_time_gap_secs: self.min_block_time_gap_secs.map(|secs| if secs < DEFAULT_MINIMUM_GAP_SECS { + warn!("miner.min_block_time_gap_secs is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_SECS} secs. Using the default value instead."); + DEFAULT_MINIMUM_GAP_SECS + } else { + secs + }).unwrap_or(miner_default_config.min_block_time_gap_secs), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8036389d536..775aa46672a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,6 +45,7 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -317,6 +318,8 @@ impl BlockMinerThread { } } } + self.wait_min_time_between_blocks()?; + match self.mine_block(&stackerdbs) { Ok(x) => break Some(x), Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { @@ -1037,6 +1040,31 @@ impl BlockMinerThread { Some(vrf_proof) } + /// Wait the minimum time between blocks before mining a new block (if necessary) + /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. + fn wait_min_time_between_blocks(&self) -> Result<(), NakamotoNodeError> { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let time_since_parent_secs = get_epoch_time_secs() + .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp); + if time_since_parent_secs < self.config.miner.min_block_time_gap_secs { + let wait_secs = self + .config + .miner + .min_block_time_gap_secs + .saturating_sub(time_since_parent_secs); + info!("Waiting {wait_secs} seconds before mining a new block."); + std::thread::sleep(Duration::from_secs(wait_secs)); + } + Ok(()) + } + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a From 71deeab452986eb80edf4d9a344d207725febe03 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 14:11:40 -0400 Subject: [PATCH 308/910] CRC: use ms precision for min_block_time config option Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 20 +++++++++---------- .../stacks-node/src/nakamoto_node/miner.rs | 19 +++++++++--------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 2e0d8e963c6..3f4e804f268 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MINIMUM_GAP_SECS: u64 = 1; +const DEFAULT_MINIMUM_GAP_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2359,9 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, - /// The minimum gap to wait between blocks in seconds. The value must be greater than or equal to 1 second because if a block is mined + /// The minimum gap to wait between blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. - pub min_block_time_gap_secs: u64, + pub min_block_time_gap_ms: u64, } impl Default for MinerConfig { @@ -2393,7 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set - min_block_time_gap_secs: DEFAULT_MINIMUM_GAP_SECS, + min_block_time_gap_ms: DEFAULT_MINIMUM_GAP_MS, } } } @@ -2744,7 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, - pub min_block_time_gap_secs: Option, + pub min_block_time_gap_ms: Option, } impl MinerConfigFile { @@ -2856,12 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set - min_block_time_gap_secs: self.min_block_time_gap_secs.map(|secs| if secs < DEFAULT_MINIMUM_GAP_SECS { - warn!("miner.min_block_time_gap_secs is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_SECS} secs. Using the default value instead."); - DEFAULT_MINIMUM_GAP_SECS + min_block_time_gap_ms: self.min_block_time_gap_ms.map(|ms| if ms < DEFAULT_MINIMUM_GAP_MS { + warn!("miner.min_block_time_gap_ms is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_MS} ms. Using the default value instead."); + DEFAULT_MINIMUM_GAP_MS } else { - secs - }).unwrap_or(miner_default_config.min_block_time_gap_secs), + ms + }).unwrap_or(miner_default_config.min_block_time_gap_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 775aa46672a..9ed3412a1a4 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,8 +45,8 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; -use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; +use stacks::util::{get_epoch_time_secs, sleep_ms}; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -1051,16 +1051,17 @@ impl BlockMinerThread { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let time_since_parent_secs = get_epoch_time_secs() - .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp); - if time_since_parent_secs < self.config.miner.min_block_time_gap_secs { - let wait_secs = self + let time_since_parent_ms = get_epoch_time_secs() + .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) + / 1000; + if time_since_parent_ms < self.config.miner.min_block_time_gap_ms { + let wait_ms = self .config .miner - .min_block_time_gap_secs - .saturating_sub(time_since_parent_secs); - info!("Waiting {wait_secs} seconds before mining a new block."); - std::thread::sleep(Duration::from_secs(wait_secs)); + .min_block_time_gap_ms + .saturating_sub(time_since_parent_ms); + info!("Waiting {wait_ms} ms before mining a new block."); + sleep_ms(wait_ms); } Ok(()) } From 4d3236fe08ec35d93b5c4184d47931f6c3a6bd52 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 16:58:15 -0400 Subject: [PATCH 309/910] Add a test to check if min_time_between_blocks_ms config option works Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/config.rs | 18 +-- .../stacks-node/src/nakamoto_node/miner.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 117 +++++++++++++++++- 4 files changed, 133 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 901b9fc0400..27e76a646d6 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -98,6 +98,7 @@ jobs: - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks + - tests::signer::v0::min_gap_between_blocks - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3f4e804f268..d1b115d9cf5 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MINIMUM_GAP_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2359,9 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, - /// The minimum gap to wait between blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined + /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. - pub min_block_time_gap_ms: u64, + pub min_time_between_blocks_ms: u64, } impl Default for MinerConfig { @@ -2393,7 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set - min_block_time_gap_ms: DEFAULT_MINIMUM_GAP_MS, + min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, } } } @@ -2744,7 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, - pub min_block_time_gap_ms: Option, + pub min_time_between_blocks_ms: Option, } impl MinerConfigFile { @@ -2856,12 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set - min_block_time_gap_ms: self.min_block_time_gap_ms.map(|ms| if ms < DEFAULT_MINIMUM_GAP_MS { - warn!("miner.min_block_time_gap_ms is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_MS} ms. Using the default value instead."); - DEFAULT_MINIMUM_GAP_MS + min_time_between_blocks_ms: self.min_time_between_blocks_ms.map(|ms| if ms < DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS { + warn!("miner.min_time_between_blocks_ms is less than the minimum allowed value of {DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS} ms. Using the default value instead."); + DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS } else { ms - }).unwrap_or(miner_default_config.min_block_time_gap_ms), + }).unwrap_or(miner_default_config.min_time_between_blocks_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9ed3412a1a4..dc57ca16de3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1054,13 +1054,17 @@ impl BlockMinerThread { let time_since_parent_ms = get_epoch_time_secs() .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) / 1000; - if time_since_parent_ms < self.config.miner.min_block_time_gap_ms { + if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { let wait_ms = self .config .miner - .min_block_time_gap_ms + .min_time_between_blocks_ms .saturating_sub(time_since_parent_ms); - info!("Waiting {wait_ms} ms before mining a new block."); + info!("Parent block mined {} ms ago, waiting {} ms before mining a new block", time_since_parent_ms, wait_ms; + "parent_block_id" => %parent_block_info.stacks_parent_header.index_block_hash(), + "parent_block_height" => parent_block_info.stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => parent_block_info.stacks_parent_header.burn_header_timestamp, + ); sleep_ms(wait_ms); } Ok(()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fbea6e8f1e4..049dcad379b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2788,7 +2788,8 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_add(1); info!("---- Mining to next reward set calculation -----"); signer_test.run_until_burnchain_height_nakamoto( @@ -2847,3 +2848,117 @@ fn signer_set_rollover() { assert!(signer.stop().is_none()); } } + +#[test] +#[ignore] +/// This test checks that the signers will broadcast a block once they receive enough signatures. +fn min_gap_between_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let time_between_blocks_ms = 10_000; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |config| { + config.miner.min_time_between_blocks_ms = time_between_blocks_ms; + }, + &[], + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); + let start_time = Instant::now(); + while start_time.elapsed().as_millis() < (time_between_blocks_ms - 1000).into() { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + assert_eq!( + blocks_proposed, proposals_before, + "Block proposed before gap was exceeded" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + let start_time = Instant::now(); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + assert!( + start_time.elapsed().as_secs() < 30, + "Block not proposed after gap was exceeded within timeout" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + debug!("Ensure that the block is mined after the gap is exceeded"); + + let start = Instant::now(); + let duration = 30; + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + if blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height { + break; + } + + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info_before.stacks_tip_height, info.stacks_tip_height + ); + + std::thread::sleep(Duration::from_millis(100)); + assert!( + start.elapsed() < Duration::from_secs(duration), + "Block not mined within timeout" + ); + } + + signer_test.shutdown(); +} From 79005738bd018dc191116dc24c13e62c554b4798 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 20 Aug 2024 17:45:24 -0400 Subject: [PATCH 310/910] ci: Fix `mock_miner_replay()` --- testnet/stacks-node/src/tests/neon_integrations.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b876257bf2d..0905fb1f60a 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12784,7 +12784,6 @@ fn mock_miner_replay() { // ---------- Test finished, clean up ---------- - btcd_controller.stop_bitcoind().unwrap(); miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } From 869b2e020cb6d697cb392de83dc1508064d5e7f5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 23:43:55 -0400 Subject: [PATCH 311/910] fix: when building a Bitcoin transaction that consumes multiple UTXOs, the partially-signed transaction needs to have all of the inputs present before we can begin signing them (since the sighash commits to the number of inputs). Fix this and add a unit test. --- .../burnchains/bitcoin_regtest_controller.rs | 197 +++++++++++++++++- testnet/stacks-node/src/keychain.rs | 1 - 2 files changed, 191 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 39ef40490b4..8977cd2923e 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1373,14 +1373,36 @@ impl BitcoinRegtestController { previous_fees: Option, previous_txids: &Vec, ) -> Option { - let mut estimated_fees = match previous_fees { + let _ = self.sortdb_mut(); + let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + let estimated_fees = match previous_fees { Some(fees) => fees.fees_from_previous_tx(&payload, &self.config), None => LeaderBlockCommitFees::estimated_fees_from_payload(&payload, &self.config), }; - let _ = self.sortdb_mut(); - let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + self.send_block_commit_operation_at_burnchain_height( + epoch_id, + payload, + signer, + utxos_to_include, + utxos_to_exclude, + estimated_fees, + previous_txids, + burn_chain_tip.block_height, + ) + } + fn send_block_commit_operation_at_burnchain_height( + &mut self, + epoch_id: StacksEpochId, + payload: LeaderBlockCommitOp, + signer: &mut BurnchainOpSigner, + utxos_to_include: Option, + utxos_to_exclude: Option, + mut estimated_fees: LeaderBlockCommitFees, + previous_txids: &Vec, + burnchain_block_height: u64, + ) -> Option { let public_key = signer.get_public_key(); let (mut tx, mut utxos) = self.prepare_tx( epoch_id, @@ -1388,7 +1410,7 @@ impl BitcoinRegtestController { estimated_fees.estimated_amount_required(), utxos_to_include, utxos_to_exclude, - burn_chain_tip.block_height, + burnchain_block_height, )?; // Serialize the payload @@ -1817,7 +1839,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (i, utxo) in utxos_set.utxos.iter().enumerate() { + for (_i, utxo) in utxos_set.utxos.iter().enumerate() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -1828,7 +1850,8 @@ impl BitcoinRegtestController { witness: vec![], }; tx.input.push(input); - + } + for (i, utxo) in utxos_set.utxos.iter().enumerate() { let script_pub_key = utxo.script_pub_key.clone(); let sig_hash_all = 0x01; @@ -2805,6 +2828,12 @@ mod tests { use std::fs::File; use std::io::Write; + use stacks::burnchains::BurnchainSigner; + use stacks_common::deps_common::bitcoin::blockdata::script::Builder; + use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; + use stacks_common::util::hash::to_hex; + use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use super::*; use crate::config::DEFAULT_SATS_PER_VB; @@ -2825,4 +2854,160 @@ mod tests { assert_eq!(get_satoshis_per_byte(&config), 51); } + + /// Verify that we can build a valid Bitcoin transaction with multiple UTXOs. + /// Taken from production data. + /// Tests `serialize_tx()` and `send_block_commit_operation_at_burnchain_height()` + #[test] + fn test_multiple_inputs() { + let spend_utxos = vec![ + UTXO { + txid: Sha256dHash::from_hex( + "d3eafb3aba3cec925473550ed2e4d00bcb0d00744bb3212e4a8e72878909daee", + ) + .unwrap(), + vout: 3, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 42051, + confirmations: 1421, + }, + UTXO { + txid: Sha256dHash::from_hex( + "01132f2d4a98cc715624e033214c8d841098a1ee15b30188ab89589a320b3b24", + ) + .unwrap(), + vout: 0, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 326456, + confirmations: 1421, + }, + ]; + + // test serialize_tx() + let mut config = Config::default(); + config.burnchain.magic_bytes = "T3".as_bytes().into(); + + let mut btc_controller = BitcoinRegtestController::new(config, None); + let mut utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + let mut transaction = Transaction { + input: vec![], + output: vec![ + TxOut { + value: 0, + script_pubkey: Builder::from(hex_bytes("6a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + ], + version: 1, + lock_time: 0, + }; + + let mut signer = BurnchainOpSigner::new( + Secp256k1PrivateKey::from_hex( + "9e446f6b0c6a96cf2190e54bcd5a8569c3e386f091605499464389b8d4e0bfc201", + ) + .unwrap(), + false, + ); + assert!(btc_controller.serialize_tx( + StacksEpochId::Epoch25, + &mut transaction, + 44950, + &mut utxo_set, + &mut signer, + true + )); + assert_eq!(transaction.output[3].value, 323557); + + // test send_block_commit_operation_at_burn_height() + let utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + + let commit_op = LeaderBlockCommitOp { + block_header_hash: BlockHeaderHash::from_hex( + "e88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32af", + ) + .unwrap(), + new_seed: VRFSeed::from_hex( + "d5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375", + ) + .unwrap(), + parent_block_ptr: 2211, // 0x000008a3 + parent_vtxindex: 1, // 0x0001 + key_block_ptr: 1432, // 0x00000598 + key_vtxindex: 1, // 0x0001 + memo: vec![11], // 0x5a >> 3 + + burn_fee: 0, + input: (Txid([0x00; 32]), 0), + burn_parent_modulus: 2, // 0x5a & 0b111 + + apparent_sender: BurnchainSigner("mgbpit8FvkVJ9kuXY8QSM5P7eibnhcEMBk".to_string()), + commit_outs: vec![ + PoxAddress::Standard(StacksAddress::burn_address(false), None), + PoxAddress::Standard(StacksAddress::burn_address(false), None), + ], + + treatment: vec![], + sunset_burn: 0, + + txid: Txid([0x00; 32]), + vtxindex: 0, + block_height: 2212, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + + assert_eq!(to_hex(&commit_op.serialize_to_vec()), "5be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a".to_string()); + + let leader_fees = LeaderBlockCommitFees { + sunset_fee: 0, + fee_rate: 50, + sortition_fee: 20000, + outputs_len: 2, + default_tx_size: 380, + spent_in_attempts: 0, + is_rbf_enabled: false, + final_size: 498, + }; + + assert_eq!(leader_fees.amount_per_output(), 10000); + assert_eq!(leader_fees.total_spent(), 44900); + + let block_commit = btc_controller + .send_block_commit_operation_at_burnchain_height( + StacksEpochId::Epoch30, + commit_op, + &mut signer, + Some(utxo_set), + None, + leader_fees, + &vec![], + 2212, + ) + .unwrap(); + + debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); + assert_eq!(block_commit.output[3].value, 323507); + + assert_eq!(&SerializedTx::new(block_commit.clone()).to_hex(), "0100000002eeda098987728e4a2e21b34b74000dcb0bd0e4d20e55735492ec3cba3afbead3030000006a4730440220558286e20e10ce31537f0625dae5cc62fac7961b9d2cf272c990de96323d7e2502202255adbea3d2e0509b80c5d8a3a4fe6397a87bcf18da1852740d5267d89a0cb20121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff243b0b329a5889ab8801b315eea19810848d4c2133e0245671cc984a2d2f1301000000006a47304402206d9f8de107f9e1eb15aafac66c2bb34331a7523260b30e18779257e367048d34022013c7dabb32a5c281aa00d405e2ccbd00f34f03a65b2336553a4acd6c52c251ef0121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff040000000000000000536a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a10270000000000001976a914000000000000000000000000000000000000000088ac10270000000000001976a914000000000000000000000000000000000000000088acb3ef0400000000001976a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac00000000"); + } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index c9ed722a9e6..b6df8549c41 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -206,7 +206,6 @@ impl Keychain { } /// Create a BurnchainOpSigner representation of this keychain - /// (this is going to be removed in 2.1) pub fn generate_op_signer(&self) -> BurnchainOpSigner { BurnchainOpSigner::new(self.get_secret_key(), false) } From 6ada69da3e6e5c983526856c8512235823b7f496 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 13:32:31 +0300 Subject: [PATCH 312/910] remove input mutants dispatch and automatically proceed from the action's context --- .github/workflows/pr-differences-mutants.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index c109b69cfec..ca2bac5081a 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -10,14 +10,6 @@ on: paths: - '**.rs' workflow_dispatch: - inputs: - ignore_timeout: - description: "Ignore mutants timeout limit" - required: false - type: choice - options: - - true - default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -38,9 +30,8 @@ jobs: team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - - name: Fail if the user does not have the right permissions - if: ${{ inputs.ignore_timeout == 'true' && steps.check_right_permissions.outputs.is_team_member != 'true' }} - run: exit 1 + outputs: + ignore_timeout: ${{ steps.check_right_permissions.outputs.is_team_member == 'true' }} # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: @@ -63,7 +54,7 @@ jobs: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: - ignore_timeout: ${{ inputs.ignore_timeout }} + ignore_timeout: ${{ needs.check-right-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: From 18ed8d0116ce1422f314a8cfb09a48665ca78a47 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 13:35:54 +0300 Subject: [PATCH 313/910] add files for testing --- stackslib/src/net/download/nakamoto/mod.rs | 6 + .../nakamoto/tenure_downloader_copy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_opy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_set_copy.rs | 660 +++++++++++++ .../nakamoto/tenure_downloader_set_opy.rs | 660 +++++++++++++ .../tenure_downloader_unconfirmed_copy.rs | 867 ++++++++++++++++++ .../tenure_downloader_unconfirmed_opy.rs | 867 ++++++++++++++++++ 7 files changed, 4446 insertions(+) create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110f..7643c54ff7d 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,8 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; +mod tenure_downloader_copy; +mod tenure_downloader_opy; mod tenure_downloader_set; +mod tenure_downloader_set_copy; +mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; +mod tenure_downloader_unconfirmed_copy; +mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs new file mode 100644 index 00000000000..f7fb970bb6f --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs new file mode 100644 index 00000000000..f7fb970bb6f --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs new file mode 100644 index 00000000000..28a40e7eb50 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs new file mode 100644 index 00000000000..28a40e7eb50 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs new file mode 100644 index 00000000000..c96f718d2b9 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs new file mode 100644 index 00000000000..c96f718d2b9 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} From 70afa9fd654eb74a58ea549b5de653eadb6e98d8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:35:02 +0300 Subject: [PATCH 314/910] update ignore timeout only on workflow dispatch by team member && update the job name --- .github/workflows/pr-differences-mutants.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index ca2bac5081a..d53e2ca661a 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -17,13 +17,13 @@ concurrency: cancel-in-progress: true jobs: - check-right-permissions: - name: Check Right Permissions + check-access-permissions: + name: Check Access Permissions runs-on: ubuntu-latest steps: - - name: Check Right Permissions To Trigger This - id: check_right_permissions + - name: Check Access Permissions To Trigger This + id: check_access_permissions uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} @@ -31,12 +31,12 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} outputs: - ignore_timeout: ${{ steps.check_right_permissions.outputs.is_team_member == 'true' }} + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards - needs: check-right-permissions + needs: check-access-permissions runs-on: ubuntu-latest @@ -54,7 +54,7 @@ jobs: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: - ignore_timeout: ${{ needs.check-right-permissions.outputs.ignore_timeout }} + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: From 238367c0504e6077bb5e9b3b5c66c382b7db95f9 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:39:48 +0300 Subject: [PATCH 315/910] modify files to trigger testing --- stackslib/src/net/download/nakamoto/mod.rs | 12 ++++++------ ...ownloader_copy.rs => tenure_downloader_copy_2.rs} | 0 ..._downloader_opy.rs => tenure_downloader_opy_2.rs} | 0 ...r_set_copy.rs => tenure_downloader_set_copy_2.rs} | 0 ...der_set_opy.rs => tenure_downloader_set_opy_2.rs} | 0 ...py.rs => tenure_downloader_unconfirmed_copy_2.rs} | 0 ...opy.rs => tenure_downloader_unconfirmed_opy_2.rs} | 0 7 files changed, 6 insertions(+), 6 deletions(-) rename stackslib/src/net/download/nakamoto/{tenure_downloader_copy.rs => tenure_downloader_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_opy.rs => tenure_downloader_opy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_set_copy.rs => tenure_downloader_set_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_set_opy.rs => tenure_downloader_set_opy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_unconfirmed_copy.rs => tenure_downloader_unconfirmed_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_unconfirmed_opy.rs => tenure_downloader_unconfirmed_opy_2.rs} (100%) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 7643c54ff7d..d97eecafe2e 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy; -mod tenure_downloader_opy; +mod tenure_downloader_copy_2; +mod tenure_downloader_opy_2; mod tenure_downloader_set; -mod tenure_downloader_set_copy; -mod tenure_downloader_set_opy; +mod tenure_downloader_set_copy_2; +mod tenure_downloader_set_opy_2; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy; -mod tenure_downloader_unconfirmed_opy; +mod tenure_downloader_unconfirmed_copy_2; +mod tenure_downloader_unconfirmed_opy_2; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs From 3aed5cb499c90a3234b530062110bffd67d0d1e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 21 Aug 2024 10:52:24 -0400 Subject: [PATCH 316/910] fix: fix compile issues in tests --- stackslib/src/net/stackerdb/tests/sync.rs | 310 ++++++++++++---------- 1 file changed, 173 insertions(+), 137 deletions(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index d1ac5e58bed..69bdad93d9d 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -258,40 +258,48 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { test_reconnect(&mut peer_1.network); test_reconnect(&mut peer_2.network); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -379,7 +387,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); @@ -387,23 +395,28 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { peer_1_stale = true; } } - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); @@ -411,20 +424,24 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { peer_2_stale = true; } } - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } if peer_1_stale && peer_2_stale { @@ -455,40 +472,48 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -579,40 +604,48 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -719,24 +752,27 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); - if let Ok(mut res) = res { + if let Ok(res) = res { check_sync_results(&res); - let rc_consensus_hash = - peers[i].network.get_chain_view().rc_consensus_hash.clone(); - Relayer::process_stacker_db_chunks( - &mut peers[i].network.stackerdbs, - &peer_db_configs[i], - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peers[i].network.stackerdbs, - &peer_db_configs[i], - &mut res.unhandled_messages, - None, - ) - .unwrap(); + let rc_ch = peers[i].network.get_chain_view().rc_consensus_hash.clone(); + peers[i] + .relayer + .process_stacker_db_chunks( + &rc_ch, + &peer_db_configs[i], + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peers[i] + .relayer + .process_pushed_stacker_db_chunks( + &rc_ch, + &peer_db_configs[i], + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } } From abf5544f2191e8d86ff3e659c26215295d00107d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:52:53 +0300 Subject: [PATCH 317/910] remove extra copied files --- stackslib/src/net/download/nakamoto/mod.rs | 6 - .../nakamoto/tenure_downloader_copy_2.rs | 693 -------------- .../nakamoto/tenure_downloader_opy_2.rs | 693 -------------- .../nakamoto/tenure_downloader_set_copy_2.rs | 660 ------------- .../nakamoto/tenure_downloader_set_opy_2.rs | 660 ------------- .../tenure_downloader_unconfirmed_copy_2.rs | 867 ------------------ .../tenure_downloader_unconfirmed_opy_2.rs | 867 ------------------ 7 files changed, 4446 deletions(-) delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index d97eecafe2e..dd440ac110f 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,8 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy_2; -mod tenure_downloader_opy_2; mod tenure_downloader_set; -mod tenure_downloader_set_copy_2; -mod tenure_downloader_set_opy_2; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy_2; -mod tenure_downloader_unconfirmed_opy_2; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs deleted file mode 100644 index f7fb970bb6f..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs deleted file mode 100644 index f7fb970bb6f..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs deleted file mode 100644 index 28a40e7eb50..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs deleted file mode 100644 index 28a40e7eb50..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs deleted file mode 100644 index c96f718d2b9..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs deleted file mode 100644 index c96f718d2b9..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} From 387dbc349dc056b988c711d38dbb9ceb76866689 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 11:02:44 -0400 Subject: [PATCH 318/910] fix: replace async code in event dispatcher The reason behind this is that with the async code we had, we were unable to successfully implement a timeout, so when there was a network glitch, it would get stuck at `TcpStream::connect`, and crash after 60 minutes. This new implementation ensures that all parts of the event dispatcher networking code are able to fail with a timeout. --- testnet/stacks-node/src/event_dispatcher.rs | 357 +++++++++++++++++--- 1 file changed, 308 insertions(+), 49 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ad0b70a2f1d..fcfc0849695 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,20 +16,18 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::io::{Read, Write}; +use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; -use std::time::Duration; +use std::time::{Duration, Instant}; -use async_h1::client; -use async_std::future::timeout; -use async_std::net::TcpStream; -use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; -use http_types::{Method, Request, Url}; +use http_types::Url; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -313,12 +311,112 @@ impl RewardSetEventPayload { } } +fn send_request( + host: &str, + port: u16, + body: &[u8], + url: &Url, + timeout: Duration, +) -> Result { + let addr = format!("{}:{}", host, port) + .to_socket_addrs()? + .next() + .ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "No valid address found") + })?; + let mut stream = TcpStream::connect_timeout(&addr, timeout)?; + stream.set_read_timeout(Some(timeout))?; + stream.set_write_timeout(Some(timeout))?; + + let request = format!( + "POST {} HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Connection: close\r\n\ + \r\n", + url.path(), + host, + body.len(), + ); + debug!("Event dispatcher: Sending request"; "request" => &request); + + stream.write_all(request.as_bytes())?; + stream.write_all(body)?; + stream.flush()?; + debug!("Event dispatcher: Request sent"); + + let mut response = Vec::new(); + let mut buffer = [0; 512]; + let mut headers_parsed = false; + let mut content_length = None; + let mut total_read = 0; + + let start_time = Instant::now(); + + while total_read < content_length.unwrap_or(usize::MAX) { + if start_time.elapsed() >= timeout { + return Err(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "Response reading timed out", + )); + } + + let bytes_read = stream.read(&mut buffer)?; + if bytes_read == 0 { + // Connection closed + break; + } + + response.extend_from_slice(&buffer[..bytes_read]); + + // Parse headers if not already done + if !headers_parsed { + if let Some(headers_end) = response.windows(4).position(|window| window == b"\r\n\r\n") + { + headers_parsed = true; + // Parse Content-Length header + let headers = &response[..headers_end]; + let headers_str = String::from_utf8_lossy(headers); + if let Some(content_length_line) = headers_str + .lines() + .find(|line| line.to_lowercase().starts_with("content-length:")) + { + let length_str = content_length_line + .split(":") + .nth(1) + // This is safe because we already know the line starts with "Content-Length:" + .expect("unreachable"); + match length_str.trim().parse::() { + Ok(len) => content_length = Some(len), + Err(_) => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid Content-Length header", + )) + } + } + } + total_read = response[headers_end + 4..].len(); + } + } else { + total_read += bytes_read; + } + } + + let response_str = String::from_utf8_lossy(&response).to_string(); + debug!("Event dispatcher: Response received"; "response" => &response_str); + + Ok(response_str) +} + impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { debug!( "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload ); - let body = match serde_json::to_vec(&payload) { + + let body = match serde_json::to_vec(payload) { Ok(body) => body, Err(err) => { error!("Event dispatcher: serialization failed - {:?}", err); @@ -327,57 +425,37 @@ impl EventObserver { }; let url = { - let joined_components = match path.starts_with('/') { - true => format!("{}{}", &self.endpoint, path), - false => format!("{}/{}", &self.endpoint, path), + let joined_components = if path.starts_with('/') { + format!("{}{}", &self.endpoint, path) + } else { + format!("{}/{}", &self.endpoint, path) }; let url = format!("http://{}", joined_components); Url::parse(&url) .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", url)) }; - let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); - let connection_timeout = Duration::from_secs(5); + let host = url.host_str().expect("Invalid URL: missing host"); + let port = url.port_or_known_default().unwrap_or(80); - loop { - let body = body.clone(); - let mut req = Request::new(Method::Post, url.clone()); - req.append_header("Content-Type", "application/json"); - req.set_body(body); - - let response = task::block_on(async { - let stream = - match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { - Ok(Ok(stream)) => stream, - Ok(Err(err)) => { - warn!("Event dispatcher: connection failed - {:?}", err); - return None; - } - Err(_) => { - error!("Event dispatcher: connection attempt timed out"); - return None; - } - }; + let backoff = Duration::from_millis(1000); // 1 second - match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - None + loop { + match send_request(host, port, &body, &url, backoff) { + Ok(response) => { + if response.starts_with("HTTP/1.1 200") { + debug!( + "Event dispatcher: Successful POST"; "url" => %url + ); + break; + } else { + error!( + "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response + ); } } - }); - - if let Some(response) = response { - if response.status().is_success() { - debug!( - "Event dispatcher: Successful POST"; "url" => %url - ); - break; - } else { - error!( - "Event dispatcher: Failed POST"; "url" => %url, "err" => ?response - ); + Err(err) => { + warn!("Event dispatcher: connection or request failed - {:?}", err); } } sleep(backoff); @@ -1483,6 +1561,10 @@ impl EventDispatcher { #[cfg(test)] mod test { + use std::net::TcpListener; + use std::thread; + use std::time::Instant; + use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -1494,7 +1576,7 @@ mod test { use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; - use crate::event_dispatcher::EventObserver; + use super::*; #[test] fn build_block_processed_event() { @@ -1615,4 +1697,181 @@ mod test { .expect("Unable to deserialize array of MessageSignature"); assert_eq!(event_signer_signature, signer_signature); } + + #[test] + fn test_send_request_connect_timeout() { + let timeout_duration = Duration::from_secs(3); + + // Start measuring time + let start_time = Instant::now(); + + // Attempt to send a request with a timeout + let result = send_request( + "10.255.255.1", // Non-routable IP for timeout + 80, // HTTP port + b"{}", // Example empty JSON body + &Url::parse("http://10.255.255.1/").expect("Failed to parse URL"), + timeout_duration, + ); + + // Measure the elapsed time + let elapsed_time = start_time.elapsed(); + + // Assert that the connection attempt timed out + assert!( + result.is_err(), + "Expected a timeout error, but got {:?}", + result + ); + assert_eq!( + result.unwrap_err().kind(), + std::io::ErrorKind::TimedOut, + "Expected a TimedOut error" + ); + + // Assert that the elapsed time is within an acceptable range + assert!( + elapsed_time >= timeout_duration, + "Timeout occurred too quickly" + ); + assert!( + elapsed_time < timeout_duration + Duration::from_secs(1), + "Timeout took too long" + ); + } + + #[test] + fn test_send_request_timeout() { + // Set up a TcpListener that accepts a connection but delays response + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); + let addr = listener.local_addr().unwrap(); + + // Spawn a thread that will accept the connection and do nothing, simulating a long delay + thread::spawn(move || { + let (stream, _addr) = listener.accept().unwrap(); + // Hold the connection open to simulate a delay + thread::sleep(Duration::from_secs(10)); + drop(stream); // Close the stream + }); + + // Set a timeout shorter than the sleep duration to force a timeout + let connection_timeout = Duration::from_secs(2); + + // Attempt to connect, expecting a timeout error + let result = send_request( + "127.0.0.1", + addr.port(), + b"{}", + &Url::parse("http://127.0.0.1/").unwrap(), + connection_timeout, + ); + + // Assert that the result is an error, specifically a timeout + assert!( + result.is_err(), + "Expected a timeout error, got: {:?}", + result + ); + + if let Err(err) = result { + assert_eq!( + err.kind(), + std::io::ErrorKind::WouldBlock, + "Expected TimedOut error, got: {:?}", + err + ); + } + } + + fn start_mock_server(response: &str, client_done_signal: Receiver<()>) -> String { + // Bind to an available port on localhost + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); + let addr = listener.local_addr().unwrap(); + + debug!("Mock server listening on {}", addr); + + // Start the server in a new thread + let response = response.to_string(); + thread::spawn(move || { + for stream in listener.incoming() { + debug!("Mock server accepted connection"); + let mut stream = stream.expect("Failed to accept connection"); + + // Read the client's request (even if we don't do anything with it) + let mut buffer = [0; 512]; + let _ = stream.read(&mut buffer); + debug!("Mock server received request"); + + // Simulate a basic HTTP response + stream + .write_all(response.as_bytes()) + .expect("Failed to write response"); + stream.flush().expect("Failed to flush stream"); + debug!("Mock server sent response"); + + // Wait for the client to signal that it's done reading + client_done_signal + .recv() + .expect("Failed to receive client done signal"); + + debug!("Mock server closing connection"); + + // Explicitly drop the stream after signaling to ensure the client finishes + drop(stream); + break; // Close after the first request + } + }); + + // Return the address of the mock server + format!("{}:{}", addr.ip(), addr.port()) + } + + fn parse_http_response(response: &str) -> &str { + let parts: Vec<&str> = response.split("\r\n\r\n").collect(); + if parts.len() == 2 { + parts[1] // The body is after the second \r\n\r\n + } else { + "" + } + } + + #[test] + fn test_send_request_success() { + // Prepare the mock server to return a successful HTTP response + let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; + + // Create a channel to signal when the client is done reading + let (tx_client_done, rx_client_done) = channel(); + let server_addr = start_mock_server(mock_response, rx_client_done); + let timeout_duration = Duration::from_secs(5); + + // Attempt to send a request to the mock server + let result = send_request( + &server_addr.split(':').collect::>()[0], // Host part + server_addr.split(':').collect::>()[1] + .parse() + .unwrap(), // Port part + b"{}", // Example JSON body + &Url::parse(&format!("http://{}/", server_addr)).expect("Failed to parse URL"), + timeout_duration, + ); + debug!("Got result: {:?}", result); + + // Ensure the server only closes after the client has finished processing + if let Ok(response) = &result { + let body = parse_http_response(response); + assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); + } + + tx_client_done + .send(()) + .expect("Failed to send close signal"); + + // Assert that the connection was successful + assert!( + result.is_ok(), + "Expected a successful request, but got {:?}", + result + ); + } } From 175009bc6c11ec99d767a66e9b83cd4d619aaa11 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 21 Aug 2024 11:33:17 -0400 Subject: [PATCH 319/910] fix: Make config opts in `stop_bitcoind()` match those in `start_bitcoind()` --- .../stacks-node/src/tests/bitcoin_regtest.rs | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3fbfa519862..d829b76a83b 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -58,12 +58,9 @@ impl BitcoinCoreController { .arg("-server=1") .arg("-listenonion=0") .arg("-rpcbind=127.0.0.1") - .arg(&format!("-port={}", self.config.burnchain.peer_port)) - .arg(&format!( - "-datadir={}", - self.config.get_burnchain_path_str() - )) - .arg(&format!("-rpcport={}", self.config.burnchain.rpc_port)); + .arg(format!("-port={}", self.config.burnchain.peer_port)) + .arg(format!("-datadir={}", self.config.get_burnchain_path_str())) + .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); match ( &self.config.burnchain.username, @@ -71,8 +68,8 @@ impl BitcoinCoreController { ) { (Some(username), Some(password)) => { command - .arg(&format!("-rpcuser={}", username)) - .arg(&format!("-rpcpassword={}", password)); + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } _ => {} } @@ -81,7 +78,7 @@ impl BitcoinCoreController { let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -111,14 +108,25 @@ impl BitcoinCoreController { command .stdout(Stdio::piped()) .arg("-rpcconnect=127.0.0.1") - .arg("-rpcport=8332") - .arg("-rpcuser=neon-tester") - .arg("-rpcpassword=neon-tester-pass") - .arg("stop"); + .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + + match ( + &self.config.burnchain.username, + &self.config.burnchain.password, + ) { + (Some(username), Some(password)) => { + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); + } + _ => {} + } + + command.arg("stop"); let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -127,7 +135,7 @@ impl BitcoinCoreController { if bytes_read == 0 { break; } - eprintln!("{}", &line); + eprintln!("{line}"); } } Ok(()) From 728177e57c86d67fb4e9541fdd1f97db146bb359 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 18:49:23 +0300 Subject: [PATCH 320/910] update the workflow dispatch to match the develop branch one --- .github/workflows/pr-differences-mutants.yml | 30 ++++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 85b2a49ea66..d53e2ca661a 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -10,15 +10,6 @@ on: paths: - '**.rs' workflow_dispatch: - inputs: - ignore_timeout: - description: "Ignore mutants timeout limit" - required: false - type: choice - options: - - true - - false - default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -26,9 +17,26 @@ concurrency: cancel-in-progress: true jobs: + check-access-permissions: + name: Check Access Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Access Permissions To Trigger This + id: check_access_permissions + uses: stacks-network/actions/team-membership@main + with: + username: ${{ github.actor }} + team: 'blockchain-team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + outputs: + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-access-permissions runs-on: ubuntu-latest @@ -40,10 +48,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + with: + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -230,3 +241,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} From e42d6abddd4cef3c395f32fe5eb030e50c653735 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 12:06:23 -0400 Subject: [PATCH 321/910] test: add `send_payload` integration tests --- Cargo.lock | 1 + testnet/stacks-node/Cargo.toml | 1 + testnet/stacks-node/src/event_dispatcher.rs | 92 +++++++++++++++++++++ 3 files changed, 94 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 357a9def709..1e7e6b6b422 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3421,6 +3421,7 @@ dependencies = [ "stackslib", "stx-genesis", "tikv-jemallocator", + "tiny_http", "tokio", "toml 0.5.11", "tracing", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index aa72f814db8..8708aea2ce4 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -49,6 +49,7 @@ tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" +tiny_http = "0.12.0" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index fcfc0849695..00920db88dc 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1575,6 +1575,7 @@ mod test { use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; + use tiny_http::{Method, Response, Server, StatusCode}; use super::*; @@ -1874,4 +1875,95 @@ mod test { result ); } + + fn get_random_port() -> u16 { + // Bind to a random port by specifying port 0, then retrieve the port assigned by the OS + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind to a random port"); + listener.local_addr().unwrap().port() + } + + #[test] + fn test_send_payload_success() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let request = server.recv().unwrap(); + assert_eq!(request.url(), "/test"); + assert_eq!(request.method(), &Method::Post); + + // Simulate a successful response + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed + tx.send(()).unwrap(); + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } + + #[test] + fn test_send_payload_retry() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + while let Ok(request) = server.recv() { + attempt += 1; + if attempt == 1 { + debug!("Mock server received request attempt 1"); + // Simulate a failure on the first attempt + let response = Response::new( + StatusCode(500), + vec![], + "Internal Server Error".as_bytes(), + Some(21), + None, + ); + request.respond(response).unwrap(); + } else { + debug!("Mock server received request attempt 2"); + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + } + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } From b480038810a356e60f66102f9acb7e0c6a8a98a3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 12:06:47 -0400 Subject: [PATCH 322/910] chore: fix warning --- testnet/stacks-node/src/nakamoto_node/peer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index facb1dd8357..004023ea263 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -16,8 +16,8 @@ use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; +use std::thread; use std::time::Duration; -use std::{cmp, thread}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::PoxConstants; From 7bc0b8be8241fbd9d286a5ed99244a3a922935b7 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 21 Aug 2024 11:52:30 -0400 Subject: [PATCH 323/910] refactor: Move some shared code out into separate function --- .../stacks-node/src/tests/bitcoin_regtest.rs | 52 ++++++++----------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index d829b76a83b..6619152f9ff 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -44,6 +44,22 @@ impl BitcoinCoreController { } } + fn add_rpc_cli_args(&self, command: &mut Command) { + command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + + match ( + &self.config.burnchain.username, + &self.config.burnchain.password, + ) { + (Some(username), Some(password)) => { + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); + } + _ => {} + } + } + pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); @@ -59,22 +75,11 @@ impl BitcoinCoreController { .arg("-listenonion=0") .arg("-rpcbind=127.0.0.1") .arg(format!("-port={}", self.config.burnchain.peer_port)) - .arg(format!("-datadir={}", self.config.get_burnchain_path_str())) - .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + .arg(format!("-datadir={}", self.config.get_burnchain_path_str())); - match ( - &self.config.burnchain.username, - &self.config.burnchain.password, - ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} - } + self.add_rpc_cli_args(&mut command); - eprintln!("bitcoind spawn: {:?}", command); + eprintln!("bitcoind spawn: {command:?}"); let mut process = match command.spawn() { Ok(child) => child, @@ -105,22 +110,9 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { let mut command = Command::new("bitcoin-cli"); - command - .stdout(Stdio::piped()) - .arg("-rpcconnect=127.0.0.1") - .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - - match ( - &self.config.burnchain.username, - &self.config.burnchain.password, - ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} - } + command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); + + self.add_rpc_cli_args(&mut command); command.arg("stop"); From e4f59e59a2253a31be7ea909eca32595b93dfcf9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 21 Aug 2024 12:55:49 -0400 Subject: [PATCH 324/910] CRC: rather than wait, just do a retry for mining Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 65 +++++++++++-------- testnet/stacks-node/src/tests/signer/v0.rs | 34 ++++------ 2 files changed, 50 insertions(+), 49 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index dc57ca16de3..cd811a9346d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,8 +45,8 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; -use stacks::util::{get_epoch_time_secs, sleep_ms}; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -318,10 +318,17 @@ impl BlockMinerThread { } } } - self.wait_min_time_between_blocks()?; match self.mine_block(&stackerdbs) { - Ok(x) => break Some(x), + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { info!("Miner interrupted while mining, will try again"); // sleep, and try again. if the miner was interrupted because the burnchain @@ -1040,34 +1047,40 @@ impl BlockMinerThread { Some(vrf_proof) } - /// Wait the minimum time between blocks before mining a new block (if necessary) + /// Check that the provided block is not mined too quickly after the parent block. /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. - fn wait_min_time_between_blocks(&self) -> Result<(), NakamotoNodeError> { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = - SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) - .expect("FATAL: could not open sortition DB"); - - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + fn validate_timestamp(&self, x: &NakamotoBlock) -> Result { + let chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let time_since_parent_ms = get_epoch_time_secs() - .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) - / 1000; + let stacks_parent_header = + NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) + .map_err(|e| { + error!( + "Could not query header info for parent block ID {}: {:?}", + &x.header.parent_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for parent block ID {}", + &x.header.parent_block_id + ); + NakamotoNodeError::ParentNotFound + })?; + let current_timestamp = get_epoch_time_secs(); + let time_since_parent_ms = + current_timestamp.saturating_sub(stacks_parent_header.burn_header_timestamp) * 1000; if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { - let wait_ms = self - .config - .miner - .min_time_between_blocks_ms - .saturating_sub(time_since_parent_ms); - info!("Parent block mined {} ms ago, waiting {} ms before mining a new block", time_since_parent_ms, wait_ms; - "parent_block_id" => %parent_block_info.stacks_parent_header.index_block_hash(), - "parent_block_height" => parent_block_info.stacks_parent_header.stacks_block_height, - "parent_block_timestamp" => parent_block_info.stacks_parent_header.burn_header_timestamp, + debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; + "current_timestamp" => current_timestamp, + "parent_block_id" => %stacks_parent_header.index_block_hash(), + "parent_block_height" => stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, ); - sleep_ms(wait_ms); + return Ok(false); } - Ok(()) + Ok(true) } // TODO: add tests from mutation testing results #4869 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 049dcad379b..1f637b6b8d0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2890,11 +2890,6 @@ fn min_gap_between_blocks() { .nakamoto_blocks_proposed .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); // submit a tx so that the miner will mine a block @@ -2904,19 +2899,6 @@ fn min_gap_between_blocks() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); - let start_time = Instant::now(); - while start_time.elapsed().as_millis() < (time_between_blocks_ms - 1000).into() { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - assert_eq!( - blocks_proposed, proposals_before, - "Block proposed before gap was exceeded" - ); - std::thread::sleep(Duration::from_millis(100)); - } - let start_time = Instant::now(); loop { let blocks_proposed = signer_test @@ -2924,12 +2906,12 @@ fn min_gap_between_blocks() { .nakamoto_blocks_proposed .load(Ordering::SeqCst); if blocks_proposed > proposals_before { + assert!( + start_time.elapsed().as_millis() >= time_between_blocks_ms.into(), + "Block proposed before gap was exceeded" + ); break; } - assert!( - start_time.elapsed().as_secs() < 30, - "Block not proposed after gap was exceeded within timeout" - ); std::thread::sleep(Duration::from_millis(100)); } @@ -2937,6 +2919,10 @@ fn min_gap_between_blocks() { let start = Instant::now(); let duration = 30; + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); loop { let blocks_mined = signer_test .running_nodes @@ -2944,7 +2930,9 @@ fn min_gap_between_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height { + if blocks_mined > blocks_before + && info.stacks_tip_height == info_before.stacks_tip_height + 1 + { break; } From 1dc17102ea6f5877485c8382c08a341d2f392711 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 13:38:31 -0400 Subject: [PATCH 325/910] chore: syntax improvements Co-authored-by: Jeff Bencin --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 00920db88dc..f7371248c60 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -318,7 +318,7 @@ fn send_request( url: &Url, timeout: Duration, ) -> Result { - let addr = format!("{}:{}", host, port) + let addr = format!("{host}:{port}") .to_socket_addrs()? .next() .ok_or_else(|| { From 383f15b1960713ccf066a553ed07d867f7161a4a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 21 Aug 2024 23:53:53 -0400 Subject: [PATCH 326/910] fix: fix failing unit test --- stackslib/src/net/download/nakamoto/tenure.rs | 5 ++++- stackslib/src/net/tests/download/nakamoto.rs | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 4fb050e5919..80065dc0c6b 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -331,7 +331,10 @@ impl TenureStartEnd { first_burn_height, wt_start.burn_height, ) - .expect("FATAL: tenure from before system start"), + .expect(&format!( + "FATAL: tenure from before system start ({} <= {})", + wt_start.burn_height, first_burn_height + )), wt.processed, ); tenure_start_end.fetch_end_block = true; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 6b10a95c92c..afba1e90e7d 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1086,12 +1086,12 @@ fn test_tenure_start_end_from_inventory() { wanted_tenures.push(WantedTenure::new( ConsensusHash([i as u8; 20]), StacksBlockId([i as u8; 32]), - u64::from(i) + first_burn_height, + u64::from(i) + first_burn_height + 1, )); next_wanted_tenures.push(WantedTenure::new( ConsensusHash([(i + 128) as u8; 20]), StacksBlockId([(i + 128) as u8; 32]), - u64::from(i) + first_burn_height, + u64::from(i) + first_burn_height + 1, )); } let mut all_tenures = wanted_tenures.clone(); From ee28db9f47db4ebc5beee2aa4d40e3ad3cda4010 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 22 Aug 2024 09:19:17 -0400 Subject: [PATCH 327/910] Reuse BlockResponse slot for MockSignature message type Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 3 +-- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7d411f89b5b..b0f470ee2ab 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -185,8 +185,7 @@ impl SignerMessage { pub fn msg_id(&self) -> Option { match self { Self::BlockProposal(_) | Self::BlockPushed(_) => None, - Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), - Self::MockSignature(_) => Some(MessageSlotID::MockSignature), + Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature reuses the same slot as block response since its exclusively used in Epoch 2.5 } } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ee2e417eeb..210dcf22c96 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2212,8 +2212,8 @@ fn mock_sign_epoch_25() { std::thread::sleep(Duration::from_millis(100)); let messages: Vec = StackerDB::get_messages( stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), // Epoch 2.5 MockSignatures use the BlockResponse slot &signer_slot_ids, ) .expect("Failed to get message from stackerdb"); From 3303f479449f9f0310dc3b905b6931c363dc24eb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 22 Aug 2024 06:45:53 -0700 Subject: [PATCH 328/910] fix: use permanent backoff when stacker set not found --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914bd..0473b68ee10 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -542,7 +542,7 @@ impl StacksClient { backoff::Error::permanent(e.into()) })?; if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + return Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)); } else { warn!("Got error response ({status}): {}", error_data.err_msg); return Err(backoff::Error::permanent(ClientError::RequestFailure( From 611eec22df944dcdd68619b8554f2dd49607b54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 13:38:42 -0400 Subject: [PATCH 329/910] fix: address PR feedback --- .../download/nakamoto/download_state_machine.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 2e7be7f9772..8cef43a9aa3 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -252,7 +252,7 @@ impl NakamotoDownloadStateMachine { ) .expect("FATAL: tip.block_height before system start"); - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at + // careful -- need .saturating_add(1) since this calculation puts the reward cycle start at // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = if let Some(highest_wanted_tenure) = loaded_so_far.last() { highest_wanted_tenure.burn_height.saturating_add(1) @@ -1505,15 +1505,13 @@ impl NakamotoDownloadStateMachine { /// Run and process all unconfirmed tenure downloads, and highest complete tenure downloads. /// Do the needful bookkeeping to remove dead peers. - /// Returns map of tenure IDs to blocks we fetched, plus whether or not we returned because we - /// were throttled fn download_unconfirmed_tenures( &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, chainstate: &StacksChainState, highest_processed_block_id: Option, - ) -> (HashMap>, bool) { + ) -> HashMap> { // queue up more downloaders self.update_unconfirmed_tenure_downloaders( network.get_connection_opts(), @@ -1589,7 +1587,7 @@ impl NakamotoDownloadStateMachine { }) .collect(); - (tenure_blocks, false) + tenure_blocks } /// Top-level download state machine execution. @@ -1669,7 +1667,7 @@ impl NakamotoDownloadStateMachine { &network.stacks_tip.block_hash, ); - let (new_blocks, throttled) = self.download_unconfirmed_tenures( + let new_blocks = self.download_unconfirmed_tenures( network, sortdb, chainstate, @@ -1680,11 +1678,6 @@ impl NakamotoDownloadStateMachine { }, ); - if throttled { - // stay in this state - return new_blocks; - } - if !self.tenure_downloads.is_empty() { // need to go get this scheduled tenure debug!( From 76c91359cfb7dca65e3b19539b2038440d7c7ebc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 22 Aug 2024 15:53:09 -0400 Subject: [PATCH 330/910] refactor: add release version of `fault_injection` --- stackslib/src/net/relay.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 412d5ed0a1a..33e19025b9c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -104,6 +104,17 @@ pub mod fault_injection { } } +#[cfg(not(any(test, feature = "testing")))] +pub mod fault_injection { + pub fn ignore_block(_height: u64, _working_dir: &str) -> bool { + false + } + + pub fn set_ignore_block(_height: u64, _working_dir: &str) {} + + pub fn clear_ignore_block() {} +} + pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, @@ -879,7 +890,6 @@ impl Relayer { &obtained_method, ); - #[cfg(any(test, feature = "testing"))] if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { return Ok(false); } From e347fb8ad2190ab86bd04df66738d41bf69ebb7a Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 22 Aug 2024 23:42:55 +0300 Subject: [PATCH 331/910] mutants add manual workflow_dispatch no timeout --- .github/workflows/pr-differences-mutants.yml | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a7256873..d53e2ca661a 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,7 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -16,9 +17,26 @@ concurrency: cancel-in-progress: true jobs: + check-access-permissions: + name: Check Access Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Access Permissions To Trigger This + id: check_access_permissions + uses: stacks-network/actions/team-membership@main + with: + username: ${{ github.actor }} + team: 'blockchain-team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + outputs: + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-access-permissions runs-on: ubuntu-latest @@ -30,10 +48,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + with: + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -220,3 +241,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} From afab2837d0ec6b9795db004654f8e09fa9163461 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:38:26 -0400 Subject: [PATCH 332/910] chore: use `url` (which we use in dependencies already) instead of `http-types --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 1e7e6b6b422..10b65eb7458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3426,6 +3426,7 @@ dependencies = [ "toml 0.5.11", "tracing", "tracing-subscriber", + "url", "warp", "wsts", ] From 15d81622dbcb71947e465ac9530a255525e9d9ac Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:38:48 -0400 Subject: [PATCH 333/910] fix; expand receive buffer size to 64k --- stackslib/src/net/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 36b1fc18ff0..78c15e08330 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -971,7 +971,7 @@ impl ConnectionInbox

{ // NOTE: it's important that buf not be too big, since up to buf.len()-1 bytes may need // to be copied if a message boundary isn't aligned with buf (which is usually the // case). - let mut buf = [0u8; 4096]; + let mut buf = [0u8; 65536]; let num_read = match fd.read(&mut buf) { Ok(0) => { // remote fd is closed, but do try to consume all remaining bytes in the buffer From f45f1d312cf63feec7fc843b1110b81cb8ffeec1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:39:07 -0400 Subject: [PATCH 334/910] fix: handle `text/plain; ...` --- stackslib/src/net/http/mod.rs | 2 +- stackslib/src/net/http/response.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index cc6355ca315..33935fdb04f 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -178,7 +178,7 @@ impl FromStr for HttpContentType { let s = header.to_string().to_lowercase(); if s == "application/octet-stream" { Ok(HttpContentType::Bytes) - } else if s == "text/plain" { + } else if s == "text/plain" || s.starts_with("text/plain;") { Ok(HttpContentType::Text) } else if s == "application/json" { Ok(HttpContentType::JSON) diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index f6f17762118..77bcaa730f0 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -582,7 +582,7 @@ impl StacksMessageCodec for HttpResponsePreamble { )); } - if content_type.is_none() || (content_length.is_none() && !chunked_encoding) { + if content_length.is_none() && !chunked_encoding { return Err(CodecError::DeserializeError( "Invalid HTTP response: missing Content-Type, Content-Length".to_string(), )); @@ -593,7 +593,7 @@ impl StacksMessageCodec for HttpResponsePreamble { status_code: status_code, reason: reason, keep_alive: keep_alive, - content_type: content_type.unwrap(), + content_type: content_type.unwrap_or(HttpContentType::Bytes), // per the RFC content_length: content_length, headers: headers, }) From 238fbe6229a05ee87a5a22f4fecc2ffe071bf603 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:39:25 -0400 Subject: [PATCH 335/910] feat: implement `StacksHttp::new_client()` so we can use it to request and receive arbitrary well-formed HTTP messages without having to bind a dedicated request handler to them first. --- stackslib/src/net/httpcore.rs | 194 ++++++++++++++++++++++++++-------- 1 file changed, 152 insertions(+), 42 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 88ee0365b27..39ce1e64ac0 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -44,11 +44,12 @@ use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::core::{MemPoolDB, StacksEpoch}; use crate::net::connection::ConnectionOptions; -use crate::net::http::common::HTTP_PREAMBLE_MAX_ENCODED_SIZE; +use crate::net::http::common::{parse_raw_bytes, HTTP_PREAMBLE_MAX_ENCODED_SIZE}; use crate::net::http::{ - http_reason, Error as HttpError, HttpBadRequest, HttpContentType, HttpErrorResponse, - HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, + http_reason, parse_bytes, parse_json, Error as HttpError, HttpBadRequest, HttpContentType, + HttpErrorResponse, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, + HttpVersion, }; use crate::net::p2p::PeerNetwork; use crate::net::server::HttpPeer; @@ -855,6 +856,44 @@ struct StacksHttpReplyData { stream: StacksHttpRecvStream, } +/// Default response handler, for when using StacksHttp to issue arbitrary requests +#[derive(Clone)] +struct RPCArbitraryResponseHandler {} +impl HttpResponse for RPCArbitraryResponseHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + match preamble.content_type { + HttpContentType::Bytes => { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } + HttpContentType::JSON => { + if body.len() > MAX_MESSAGE_LEN as usize { + return Err(HttpError::DecodeError( + "Message is too long to decode".into(), + )); + } + + let json = parse_json(preamble, body)?; + Ok(HttpResponsePayload::JSON(json)) + } + HttpContentType::Text => { + let text_bytes = parse_raw_bytes( + preamble, + body, + MAX_MESSAGE_LEN.into(), + HttpContentType::Text, + )?; + let text = String::from_utf8_lossy(&text_bytes).to_string(); + Ok(HttpResponsePayload::Text(text)) + } + } + } +} + /// Stacks HTTP state machine implementation, for bufferring up data. /// One of these exists per Connection. /// There can be at most one HTTP request in-flight (i.e. we don't do pipelining). @@ -890,9 +929,13 @@ pub struct StacksHttp { pub read_only_call_limit: ExecutionCost, /// The authorization token to enable access to privileged features, such as the block proposal RPC endpoint pub auth_token: Option, + /// Allow arbitrary responses to be handled in addition to request handlers + allow_arbitrary_response: bool, } impl StacksHttp { + /// Create an HTTP protocol state machine that handles the built-in RPC API. + /// Used for building the RPC server pub fn new(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { let mut http = StacksHttp { peer_addr, @@ -906,11 +949,31 @@ impl StacksHttp { maximum_call_argument_size: conn_opts.maximum_call_argument_size, read_only_call_limit: conn_opts.read_only_call_limit.clone(), auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: false, }; http.register_rpc_methods(); http } + /// Create an HTTP protocol state machine that can handle arbitrary responses. + /// Used for building clients. + pub fn new_client(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { + StacksHttp { + peer_addr, + body_start: None, + num_preamble_bytes: 0, + last_four_preamble_bytes: [0u8; 4], + reply: None, + chunk_size: 8192, + request_handler_index: None, + request_handlers: vec![], + maximum_call_argument_size: conn_opts.maximum_call_argument_size, + read_only_call_limit: conn_opts.read_only_call_limit.clone(), + auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: true, + } + } + /// Register an API RPC endpoint pub fn register_rpc_endpoint( &mut self, @@ -1164,7 +1227,7 @@ impl StacksHttp { match preamble { StacksHttpPreamble::Response(ref http_response_preamble) => { // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() { + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { return Err(NetError::DeserializeError( "Unexpected HTTP response: no active request handler".to_string(), )); @@ -1293,13 +1356,15 @@ impl StacksHttp { &ConnectionOptions::default(), ); - let response_handler_index = - http.find_response_handler(verb, request_path) - .ok_or(NetError::SendError(format!( - "No such handler for '{} {}'", - verb, request_path - )))?; - http.request_handler_index = Some(response_handler_index); + if !self.allow_arbitrary_response { + let response_handler_index = + http.find_response_handler(verb, request_path) + .ok_or(NetError::SendError(format!( + "No such handler for '{} {}'", + verb, request_path + )))?; + http.request_handler_index = Some(response_handler_index); + } let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { @@ -1417,9 +1482,9 @@ impl ProtocolFamily for StacksHttp { } // sanity check -- if we're receiving a response, then we must have earlier issued - // a request. Thus, we must already know which response handler to use. - // Otherwise, someone sent us malforemd data. - if self.request_handler_index.is_none() { + // a request, or we must be in client mode. Thus, we must already know which + // response handler to use. Otherwise, someone sent us malforemd data. + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { self.reset(); return Err(NetError::DeserializeError( "Unsolicited HTTP response".to_string(), @@ -1442,18 +1507,28 @@ impl ProtocolFamily for StacksHttp { num_read, ); - // we now know the content-length, so pass it into the parser. - let handler_index = - self.request_handler_index - .ok_or(NetError::DeserializeError( - "Unknown HTTP response handler".to_string(), - ))?; - - let parse_res = self.try_parse_response( - handler_index, - http_response_preamble, - &message_bytes[..], - ); + let parse_res = if self.allow_arbitrary_response { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = arbitrary_parser + .try_parse_response(http_response_preamble, &message_bytes[..])?; + Ok(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )) + } else { + // we now know the content-length, so pass it into the parser. + let handler_index = + self.request_handler_index + .ok_or(NetError::DeserializeError( + "Unknown HTTP response handler".to_string(), + ))?; + + self.try_parse_response( + handler_index, + http_response_preamble, + &message_bytes[..], + ) + }; // done parsing self.reset(); @@ -1538,6 +1613,32 @@ impl ProtocolFamily for StacksHttp { // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); + if self.allow_arbitrary_response { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = + arbitrary_parser.try_parse_response(http_response_preamble, buf)?; + if http_response_preamble.status_code >= 400 { + return Ok(( + StacksHttpMessage::Error( + "(client-given)".into(), + StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + ), + ), + buf.len(), + )); + } else { + return Ok(( + StacksHttpMessage::Response(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )), + buf.len(), + )); + } + } + // sanity check -- if we're receiving a response, then we must have earlier issued // a request. Thus, we must already know which response handler to use. // Otherwise, someone sent us malformed data. @@ -1576,27 +1677,36 @@ impl ProtocolFamily for StacksHttp { ) -> Result<(), NetError> { match *message { StacksHttpMessage::Request(ref req) => { - // client cannot send more than one request in parallel - if self.request_handler_index.is_some() { - test_debug!("Have pending request already"); - return Err(NetError::InProgress); - } + // the node cannot send more than one request in parallel, unless the client is + // directing it + let handler_index = if !self.allow_arbitrary_response { + if self.request_handler_index.is_some() { + test_debug!("Have pending request already"); + return Err(NetError::InProgress); + } - // find the response handler we'll use - let (decoded_path, _) = decode_request_path(&req.preamble().path_and_query_str)?; - let handler_index = self - .find_response_handler(&req.preamble().verb, &decoded_path) - .ok_or(NetError::SendError(format!( - "No response handler found for `{} {}`", - &req.preamble().verb, - &decoded_path - )))?; + // find the response handler we'll use + let (decoded_path, _) = + decode_request_path(&req.preamble().path_and_query_str)?; + let handler_index = self + .find_response_handler(&req.preamble().verb, &decoded_path) + .ok_or(NetError::SendError(format!( + "No response handler found for `{} {}`", + &req.preamble().verb, + &decoded_path + )))?; + handler_index + } else { + 0 + }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - self.request_handler_index = Some(handler_index); + if !self.allow_arbitrary_response { + self.request_handler_index = Some(handler_index); + } Ok(()) } StacksHttpMessage::Response(ref resp) => resp.send(fd), From 626136e1de0b41f4496bd362a494caefb69ccb2d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:40:33 -0400 Subject: [PATCH 336/910] fix: async-h1, async-std, and http-types are only required for the prometheus feature --- testnet/stacks-node/Cargo.toml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 8708aea2ce4..e11096fbf21 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -15,9 +15,6 @@ serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} toml = "0.5.6" -async-h1 = "2.3.2" -async-std = { version = "1.6", features = ["attributes"] } -http-types = "2.12" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" @@ -28,10 +25,13 @@ chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } wsts = { workspace = true } +url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } +async-h1 = { version = "2.3.2", optional = true } +async-std = { version = "1.6", optional = true, features = ["attributes"] } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -40,7 +40,7 @@ tikv-jemallocator = {workspace = true} ring = "0.16.19" warp = "0.3.5" tokio = "1.15" -reqwest = { version = "0.11", default_features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } @@ -50,6 +50,7 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" +http-types = "2.12" [[bin]] name = "stacks-node" @@ -60,7 +61,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "dep:async-h1", "dep:async-std"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] From 32cdefef032e5292f5a61f6d7624dcb07141ec56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:41:03 -0400 Subject: [PATCH 337/910] feat: `send_request()` function for issuing synchronous HTTP requests using StacksHttp --- testnet/stacks-node/src/event_dispatcher.rs | 376 ++++++++++++++------ 1 file changed, 261 insertions(+), 115 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f7371248c60..56bba0eadff 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::io; use std::io::{Read, Write}; use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -27,7 +28,6 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; -use http_types::Url; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -55,13 +55,20 @@ use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::net::atlas::{Attachment, AttachmentInstance}; +use stacks::net::connection::{ConnectionOptions, NetworkConnection}; +use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; +use stacks::net::httpcore::{StacksHttp, StacksHttpMessage, StacksHttpRequest, StacksHttpResponse}; use stacks::net::stackerdb::StackerDBEventDispatcher; +use stacks::net::Error as NetError; use stacks::util::hash::to_hex; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; +use url::Url; use super::config::{EventKeyType, EventObserverConfig}; @@ -311,103 +318,212 @@ impl RewardSetEventPayload { } } -fn send_request( +/// Convert a NetError into an io::Error if appropriate. +fn handle_net_error(e: NetError, msg: &str) -> io::Error { + if let NetError::ReadError(ioe) = e { + ioe + } else if let NetError::WriteError(ioe) = e { + ioe + } else if let NetError::RecvTimeout = e { + io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") + } else { + io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + } +} + +/// Send an HTTP request to the given host:port. Returns the decoded response. +/// Interanlly, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// response. It is a blocking operation. +/// +/// If the request encounters a network error, then return an error. Don't retry. +/// If the request times out after `timeout`, then return an error. +pub fn send_request( host: &str, port: u16, - body: &[u8], - url: &Url, + request: StacksHttpRequest, timeout: Duration, -) -> Result { - let addr = format!("{host}:{port}") - .to_socket_addrs()? - .next() - .ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::NotFound, "No valid address found") - })?; - let mut stream = TcpStream::connect_timeout(&addr, timeout)?; +) -> Result { + // Find the host:port that works. + // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 + // addresses, but usually, Stacks services like event observers are only bound to ipv4 + // addresses. So, be sure to use an address that will lead to a socket connection! + let mut stream_and_addr = None; + let mut last_err = None; + for addr in format!("{host}:{port}").to_socket_addrs()? { + debug!("send_request: connect to {}", &addr); + match TcpStream::connect_timeout(&addr, timeout) { + Ok(sock) => { + stream_and_addr = Some((sock, addr)); + break; + } + Err(e) => { + last_err = Some(e); + } + } + } + + let Some((mut stream, addr)) = stream_and_addr else { + return Err(last_err.unwrap_or(io::Error::new( + io::ErrorKind::Other, + "Unable to connect to {host}:{port}", + ))); + }; + stream.set_read_timeout(Some(timeout))?; stream.set_write_timeout(Some(timeout))?; + stream.set_nodelay(true)?; + + let start = Instant::now(); + + debug!("send_request: Sending request"; "request" => %request.request_path()); + + // Some explanation of what's going on here is in order. + // + // The networking stack in Stacks is designed to operate on non-blocking sockets, and + // furthermore, it operates in a way that the call site in which a network request is issued can + // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary + // within the Stacks node, using it to issue a single blocking request imposes a lot of + // overhead. + // + // First, we will create the network connection and give it a ProtocolFamily implementation + // (StacksHttp), which gets used by the connection to encode and deocde messages. + // + // Second, we'll create a _handle_ to the network connection into which we will write requests + // and read responses. The connection itself is an opaque black box that, internally, + // implements a state machine around the ProtocolFamily implementation to incrementally read + // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is + // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, + // as well as underfull socket buffers. + // + // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network + // handle (which contains the buffered bytes from the message to be fed into the socket), and + // (2) drive bytes from the handle into the socket iself via the network connection. This is a + // two-step process mainly because the handle is expected to live in a separate stack (or even + // a separate thread). + // + // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from + // the socket into the network connection, and (2) drive parsed messages from the connection to + // the handle. Then, the call site that owns the handle simply polls the handle for new + // messages. Once we have received a message, we can proceed to handle it. + // + // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert + // it into an error. If it's a request (i.e. not a response), we also return an error. We + // only return the message if it was a well-formed non-error HTTP response. + + // Step 1-2: set up the connection and request handle + // NOTE: we don't need anything special for connection options, so just use the default + let conn_opts = ConnectionOptions::default(); + let http = StacksHttp::new_client(addr, &conn_opts); + let mut connection = NetworkConnection::new(http, &conn_opts, None); + let mut request_handle = connection + .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) + .map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Failed to create request handle: {:?}", &e).as_str(), + ) + })?; - let request = format!( - "POST {} HTTP/1.1\r\n\ - Host: {}\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - Connection: close\r\n\ - \r\n", - url.path(), - host, - body.len(), - ); - debug!("Event dispatcher: Sending request"; "request" => &request); - - stream.write_all(request.as_bytes())?; - stream.write_all(body)?; - stream.flush()?; - debug!("Event dispatcher: Request sent"); - - let mut response = Vec::new(); - let mut buffer = [0; 512]; - let mut headers_parsed = false; - let mut content_length = None; - let mut total_read = 0; - - let start_time = Instant::now(); - - while total_read < content_length.unwrap_or(usize::MAX) { - if start_time.elapsed() >= timeout { - return Err(std::io::Error::new( - std::io::ErrorKind::TimedOut, - "Response reading timed out", - )); - } + // Step 3: load up the request with the message we're gonna send, and iteratively dump its + // bytes from the handle into the socket (the connection does internall buffering and + // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send + // anymore because the socket buffer is currently full). + request + .send(&mut request_handle) + .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; + + debug!("send_request(sending data)"); + loop { + let flushed = request_handle + .try_flush() + .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; + + // send it out + let num_sent = connection + .send_data(&mut stream) + .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; - let bytes_read = stream.read(&mut buffer)?; - if bytes_read == 0 { - // Connection closed + debug!( + "send_request(sending data): flushed = {}, num_sent = {}", + flushed, num_sent + ); + if flushed && num_sent == 0 { break; } + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + } + } - response.extend_from_slice(&buffer[..bytes_read]); - - // Parse headers if not already done - if !headers_parsed { - if let Some(headers_end) = response.windows(4).position(|window| window == b"\r\n\r\n") - { - headers_parsed = true; - // Parse Content-Length header - let headers = &response[..headers_end]; - let headers_str = String::from_utf8_lossy(headers); - if let Some(content_length_line) = headers_str - .lines() - .find(|line| line.to_lowercase().starts_with("content-length:")) - { - let length_str = content_length_line - .split(":") - .nth(1) - // This is safe because we already know the line starts with "Content-Length:" - .expect("unreachable"); - match length_str.trim().parse::() { - Ok(len) => content_length = Some(len), - Err(_) => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Invalid Content-Length header", - )) - } - } - } - total_read = response[headers_end + 4..].len(); + // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded + // and dispatched any new messages to the request handle. If so, then extract the message and + // check that it's a well-formed HTTP response. + debug!("send_request(receiving data)"); + let response; + loop { + // get back the reply + debug!("send_request(receiving data): try to receive data"); + match connection.recv_data(&mut stream) { + Ok(nr) => { + debug!("send_request(receiving data): received {} bytes", nr); + } + Err(e) => { + return Err(handle_net_error(e, "Failed to receive socket data")); } - } else { - total_read += bytes_read; + } + + // fullfill the request -- send it to its corresponding handle + debug!("send_request(receiving data): drain inbox"); + connection.drain_inbox(); + + // see of we got a message that was fulfilled in our handle + debug!("send_request(receiving data): try receive response"); + let rh = match request_handle.try_recv() { + Ok(resp) => { + response = resp; + break; + } + Err(e) => match e { + Ok(handle) => handle, + Err(e) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); + } + }, + }; + request_handle = rh; + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); } } - let response_str = String::from_utf8_lossy(&response).to_string(); - debug!("Event dispatcher: Response received"; "response" => &response_str); + // Step 5: decode the HTTP message and return it if it's not an error. + let response_data = match response { + StacksHttpMessage::Response(response_data) => response_data, + StacksHttpMessage::Error(path, response) => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Request did not succeed ({} != 200). Path: '{}'", + response.preamble().status_code, + &path + ) + .as_str(), + )); + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Did not receive an HTTP response", + )); + } + }; - Ok(response_str) + Ok(response_data) } impl EventObserver { @@ -416,14 +532,6 @@ impl EventObserver { "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload ); - let body = match serde_json::to_vec(payload) { - Ok(body) => body, - Err(err) => { - error!("Event dispatcher: serialization failed - {:?}", err); - return; - } - }; - let url = { let joined_components = if path.starts_with('/') { format!("{}{}", &self.endpoint, path) @@ -437,25 +545,40 @@ impl EventObserver { let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); let backoff = Duration::from_millis(1000); // 1 second loop { - match send_request(host, port, &body, &url, backoff) { + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + match send_request(host, port, request, backoff) { Ok(response) => { - if response.starts_with("HTTP/1.1 200") { + if response.preamble().status_code == 200 { debug!( "Event dispatcher: Successful POST"; "url" => %url ); break; } else { error!( - "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response + "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response.preamble() ); } } Err(err) => { - warn!("Event dispatcher: connection or request failed - {:?}", err); + warn!( + "Event dispatcher: connection or request failed to {}:{} - {:?}", + &host, &port, err + ); } } sleep(backoff); @@ -1571,6 +1694,7 @@ mod test { use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; + use stacks::net::httpcore::StacksHttpResponse; use stacks::types::chainstate::BlockHeaderHash; use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; @@ -1579,6 +1703,22 @@ mod test { use super::*; + fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + path.into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + request + } + #[test] fn build_block_processed_event() { let observer = EventObserver { @@ -1710,8 +1850,7 @@ mod test { let result = send_request( "10.255.255.1", // Non-routable IP for timeout 80, // HTTP port - b"{}", // Example empty JSON body - &Url::parse("http://10.255.255.1/").expect("Failed to parse URL"), + json_body("10.255.255.1", 80, "/", b"{}"), timeout_duration, ); @@ -1762,8 +1901,7 @@ mod test { let result = send_request( "127.0.0.1", addr.port(), - b"{}", - &Url::parse("http://127.0.0.1/").unwrap(), + json_body("127.0.0.1", 80, "/", b"{}"), connection_timeout, ); @@ -1814,11 +1952,14 @@ mod test { client_done_signal .recv() .expect("Failed to receive client done signal"); + + // Explicitly drop the stream after signaling to ensure the client finishes + // NOTE: this will cause the test to slow down, since `send_request` expects + // `Connection: close` + drop(stream); debug!("Mock server closing connection"); - // Explicitly drop the stream after signaling to ensure the client finishes - drop(stream); break; // Close after the first request } }); @@ -1827,13 +1968,16 @@ mod test { format!("{}:{}", addr.ip(), addr.port()) } - fn parse_http_response(response: &str) -> &str { - let parts: Vec<&str> = response.split("\r\n\r\n").collect(); - if parts.len() == 2 { - parts[1] // The body is after the second \r\n\r\n - } else { - "" - } + fn parse_http_response(response: StacksHttpResponse) -> String { + let response_txt = match response.destruct().1 { + HttpResponsePayload::Text(s) => s, + HttpResponsePayload::Empty => "".to_string(), + HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), + HttpResponsePayload::Bytes(bytes) => { + String::from_utf8_lossy(bytes.as_slice()).to_string() + } + }; + response_txt } #[test] @@ -1846,21 +1990,23 @@ mod test { let server_addr = start_mock_server(mock_response, rx_client_done); let timeout_duration = Duration::from_secs(5); + let host = server_addr.split(':').collect::>()[0]; // Host part + let port = server_addr.split(':').collect::>()[1] + .parse() + .unwrap(); // Port part + // Attempt to send a request to the mock server let result = send_request( - &server_addr.split(':').collect::>()[0], // Host part - server_addr.split(':').collect::>()[1] - .parse() - .unwrap(), // Port part - b"{}", // Example JSON body - &Url::parse(&format!("http://{}/", server_addr)).expect("Failed to parse URL"), + host, + port, + json_body(host, port, "/", b"{}"), timeout_duration, ); debug!("Got result: {:?}", result); // Ensure the server only closes after the client has finished processing if let Ok(response) = &result { - let body = parse_http_response(response); + let body = parse_http_response(response.clone()); assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); } From 184403396ba104d1589d2f4bee1f1224409dfe57 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:41:46 -0400 Subject: [PATCH 338/910] feat: drop async_h1 in favor of send_request() --- .../burnchains/bitcoin_regtest_controller.rs | 129 ++++++++---------- 1 file changed, 55 insertions(+), 74 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 39ef40490b4..19c948bde90 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,14 +1,11 @@ -use std::cmp; +use std::convert::From; use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use std::time::Instant; +use std::time::{Duration, Instant}; +use std::{cmp, io}; -use async_h1::client; -use async_std::io::ReadExt; -use async_std::net::TcpStream; use base64::encode; -use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; use serde_json::value::RawValue; @@ -38,6 +35,9 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::address::PoxAddress; use stacks::core::{StacksEpoch, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; +use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; +use stacks::net::httpcore::StacksHttpRequest; +use stacks::net::Error as NetError; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::blockdata::opcodes; use stacks_common::deps_common::bitcoin::blockdata::script::{Builder, Script}; @@ -50,9 +50,11 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; +use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; @@ -62,6 +64,7 @@ use crate::config::{ OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; +use crate::event_dispatcher::send_request; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2396,8 +2399,20 @@ pub enum RPCError { type RPCResult = Result; +impl From for RPCError { + fn from(ioe: io::Error) -> Self { + Self::Network(format!("IO Error: {:?}", &ioe)) + } +} + +impl From for RPCError { + fn from(ne: NetError) -> Self { + Self::Network(format!("Net Error: {:?}", &ne)) + } +} + impl BitcoinRPCRequest { - fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> Request { + fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> StacksHttpRequest { let url = { // some methods require a wallet ID let wallet_id = match payload.method.as_str() { @@ -2412,16 +2427,35 @@ impl BitcoinRPCRequest { &payload.method, &config.burnchain.username, &config.burnchain.password, &url ); - let mut req = Request::new(Method::Post, url); + let host = url + .host_str() + .expect("Invalid bitcoin RPC URL: missing host"); + let port = url.port_or_known_default().unwrap_or(8333); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or_else(|_| panic!("FATAL: could not parse URL into PeerHost")); + + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json( + serde_json::to_value(payload).unwrap_or_else(|_| { + panic!("FATAL: failed to encode Bitcoin RPC request as JSON") + }), + ), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); match (&config.burnchain.username, &config.burnchain.password) { (Some(username), Some(password)) => { let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - req.append_header("Authorization", auth_token); + request.add_header("Authorization".into(), auth_token); } (_, _) => {} }; - req + request } #[cfg(test)] @@ -2506,10 +2540,10 @@ impl BitcoinRPCRequest { .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let bhh = BurnchainHeaderHash::from_hex(&bhh) .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - Ok(bhh) + bhh } _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), - }?; + }; let min_conf = 0i64; let max_conf = 9999999i64; @@ -2731,71 +2765,18 @@ impl BitcoinRPCRequest { } fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let mut request = BitcoinRPCRequest::build_rpc_request(&config, &payload); - - let body = match serde_json::to_vec(&json!(payload)) { - Ok(body) => body, - Err(err) => { - return Err(RPCError::Network(format!("RPC Error: {}", err))); - } - }; + let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let timeout = Duration::from_secs(60); - request.append_header("Content-Type", "application/json"); - request.set_body(body); - - let mut response = async_std::task::block_on(async move { - let stream = match TcpStream::connect(config.burnchain.get_rpc_socket_addr()).await { - Ok(stream) => stream, - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: connection failed - {:?}", - err - ))) - } - }; + let host = request.preamble().host.hostname(); + let port = request.preamble().host.port(); - match client::connect(stream, request).await { - Ok(response) => Ok(response), - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: invoking procedure failed - {:?}", - err - ))) - } - } - })?; - - let status = response.status(); - - let (res, buffer) = async_std::task::block_on(async move { - let mut buffer = Vec::new(); - let mut body = response.take_body(); - let res = body.read_to_end(&mut buffer).await; - (res, buffer) - }); - - if !status.is_success() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: status({}) != success, body is '{:?}'", - status, - match serde_json::from_slice::(&buffer[..]) { - Ok(v) => v, - Err(_e) => serde_json::from_str("\"(unparseable)\"") - .expect("Failed to parse JSON literal"), - } - ))); - } - - if res.is_err() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: unable to read body - {:?}", - res - ))); + let response = send_request(&host, port, request, timeout)?; + if let HttpResponsePayload::JSON(js) = response.destruct().1 { + return Ok(js); + } else { + return Err(RPCError::Parsing("Did not get a JSON response".into())); } - - let payload = serde_json::from_slice::(&buffer[..]) - .map_err(|e| RPCError::Parsing(format!("Bitcoin RPC: {}", e)))?; - Ok(payload) } } From adb750334996946af5c6ffe13d0a128d98ac6cf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:43:27 -0400 Subject: [PATCH 339/910] chore: cargo fmt --- testnet/stacks-node/src/event_dispatcher.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 56bba0eadff..d3243e8cd48 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -394,7 +394,7 @@ pub fn send_request( // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, // as well as underfull socket buffers. - // + // // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network // handle (which contains the buffered bytes from the message to be fed into the socket), and // (2) drive bytes from the handle into the socket iself via the network connection. This is a @@ -450,9 +450,12 @@ pub fn send_request( if flushed && num_sent == 0 { break; } - + if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); } } @@ -497,7 +500,10 @@ pub fn send_request( request_handle = rh; if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); } } @@ -1952,7 +1958,7 @@ mod test { client_done_signal .recv() .expect("Failed to receive client done signal"); - + // Explicitly drop the stream after signaling to ensure the client finishes // NOTE: this will cause the test to slow down, since `send_request` expects // `Connection: close` From a29ff4266c573484b906c48c8a02097fddb0b0b5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 22 Aug 2024 23:57:27 -0400 Subject: [PATCH 340/910] chore: add info to error for debugging --- stackslib/src/net/http/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index 33935fdb04f..17bf1d49786 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -183,9 +183,9 @@ impl FromStr for HttpContentType { } else if s == "application/json" { Ok(HttpContentType::JSON) } else { - Err(CodecError::DeserializeError( - "Unsupported HTTP content type".to_string(), - )) + Err(CodecError::DeserializeError(format!( + "Unsupported HTTP content type: {header}" + ))) } } } From 98b1b3f9e9bef039b759ca866a90d75e614d2d9c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:07:51 -0400 Subject: [PATCH 341/910] chore: minor changes from code review --- testnet/stacks-node/src/event_dispatcher.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index d3243e8cd48..c2d8d3c2c34 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -332,7 +332,7 @@ fn handle_net_error(e: NetError, msg: &str) -> io::Error { } /// Send an HTTP request to the given host:port. Returns the decoded response. -/// Interanlly, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP /// response. It is a blocking operation. /// /// If the request encounters a network error, then return an error. Don't retry. @@ -425,7 +425,7 @@ pub fn send_request( })?; // Step 3: load up the request with the message we're gonna send, and iteratively dump its - // bytes from the handle into the socket (the connection does internall buffering and + // bytes from the handle into the socket (the connection does internal buffering and // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send // anymore because the socket buffer is currently full). request @@ -480,7 +480,7 @@ pub fn send_request( debug!("send_request(receiving data): drain inbox"); connection.drain_inbox(); - // see of we got a message that was fulfilled in our handle + // see if we got a message that was fulfilled in our handle debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { @@ -1928,7 +1928,7 @@ mod test { } } - fn start_mock_server(response: &str, client_done_signal: Receiver<()>) -> String { + fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { // Bind to an available port on localhost let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); let addr = listener.local_addr().unwrap(); @@ -1936,7 +1936,6 @@ mod test { debug!("Mock server listening on {}", addr); // Start the server in a new thread - let response = response.to_string(); thread::spawn(move || { for stream in listener.incoming() { debug!("Mock server accepted connection"); @@ -1993,13 +1992,12 @@ mod test { // Create a channel to signal when the client is done reading let (tx_client_done, rx_client_done) = channel(); - let server_addr = start_mock_server(mock_response, rx_client_done); + let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); let timeout_duration = Duration::from_secs(5); - let host = server_addr.split(':').collect::>()[0]; // Host part - let port = server_addr.split(':').collect::>()[1] - .parse() - .unwrap(); // Port part + let parts = server_addr.split(':').collect::>(); + let host = parts[0]; + let port = parts[1].parse().unwrap(); // Attempt to send a request to the mock server let result = send_request( From 67a9d791223bf1586ba6637ee297847130145a17 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:12:41 -0400 Subject: [PATCH 342/910] fix: add support for extended content types For example, `application/json; charset=utf-8` was not handled. --- stackslib/src/net/http/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index 17bf1d49786..ca7a97c5beb 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -180,7 +180,7 @@ impl FromStr for HttpContentType { Ok(HttpContentType::Bytes) } else if s == "text/plain" || s.starts_with("text/plain;") { Ok(HttpContentType::Text) - } else if s == "application/json" { + } else if s == "application/json" || s.starts_with("application/json;") { Ok(HttpContentType::JSON) } else { Err(CodecError::DeserializeError(format!( From 0d1ed2873a6dd9818cf3250c6bbbb54658b53e7c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:34:11 -0400 Subject: [PATCH 343/910] fix: prometheus build --- testnet/stacks-node/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index e11096fbf21..5128f17f03a 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ hashbrown = { workspace = true } rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } +http-types = { version = "2.12", optional = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -61,7 +62,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "dep:async-h1", "dep:async-std"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "async-h1", "async-std", "http-types"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] From 31ee50db96628d466826c91a5fd33e315f490f56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 10:33:14 -0400 Subject: [PATCH 344/910] fix: remove client-only code that didn't compile and wasn't necessary --- stackslib/src/net/httpcore.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 39ce1e64ac0..f916a19cc11 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1356,16 +1356,6 @@ impl StacksHttp { &ConnectionOptions::default(), ); - if !self.allow_arbitrary_response { - let response_handler_index = - http.find_response_handler(verb, request_path) - .ok_or(NetError::SendError(format!( - "No such handler for '{} {}'", - verb, request_path - )))?; - http.request_handler_index = Some(response_handler_index); - } - let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), From 68f97c5df1aa0cb94244b21580df20c384d1e8bd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 11:23:33 -0400 Subject: [PATCH 345/910] refactor: put send_request into httpcore --- stackslib/src/net/httpcore.rs | 221 +++++++++- stackslib/src/net/tests/httpcore.rs | 170 +++++++- .../burnchains/bitcoin_regtest_controller.rs | 5 +- testnet/stacks-node/src/event_dispatcher.rs | 407 +----------------- 4 files changed, 403 insertions(+), 400 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index f916a19cc11..b173925dc88 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -17,7 +17,8 @@ /// This module binds the http library to Stacks as a `ProtocolFamily` implementation use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; +use std::time::{Duration, Instant}; use std::{fmt, io, mem}; use clarity::vm::costs::ExecutionCost; @@ -32,8 +33,8 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use stacks_common::util::chunked_encoding::*; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::retry::{BoundReader, RetryReader}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use url::Url; use super::rpc::ConversationHttp; @@ -43,7 +44,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::core::{MemPoolDB, StacksEpoch}; -use crate::net::connection::ConnectionOptions; +use crate::net::connection::{ConnectionOptions, NetworkConnection}; use crate::net::http::common::{parse_raw_bytes, HTTP_PREAMBLE_MAX_ENCODED_SIZE}; use crate::net::http::{ http_reason, parse_bytes, parse_json, Error as HttpError, HttpBadRequest, HttpContentType, @@ -1764,3 +1765,217 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { query_str.unwrap_or("").to_string(), )) } + +/// Convert a NetError into an io::Error if appropriate. +fn handle_net_error(e: NetError, msg: &str) -> io::Error { + if let NetError::ReadError(ioe) = e { + ioe + } else if let NetError::WriteError(ioe) = e { + ioe + } else if let NetError::RecvTimeout = e { + io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") + } else { + io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + } +} + +/// Send an HTTP request to the given host:port. Returns the decoded response. +/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// response. It is a blocking operation. +/// +/// If the request encounters a network error, then return an error. Don't retry. +/// If the request times out after `timeout`, then return an error. +pub fn send_http_request( + host: &str, + port: u16, + request: StacksHttpRequest, + timeout: Duration, +) -> Result { + // Find the host:port that works. + // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 + // addresses, but usually, Stacks services like event observers are only bound to ipv4 + // addresses. So, be sure to use an address that will lead to a socket connection! + let mut stream_and_addr = None; + let mut last_err = None; + for addr in format!("{host}:{port}").to_socket_addrs()? { + debug!("send_request: connect to {}", &addr); + match TcpStream::connect_timeout(&addr, timeout) { + Ok(sock) => { + stream_and_addr = Some((sock, addr)); + break; + } + Err(e) => { + last_err = Some(e); + } + } + } + + let Some((mut stream, addr)) = stream_and_addr else { + return Err(last_err.unwrap_or(io::Error::new( + io::ErrorKind::Other, + "Unable to connect to {host}:{port}", + ))); + }; + + stream.set_read_timeout(Some(timeout))?; + stream.set_write_timeout(Some(timeout))?; + stream.set_nodelay(true)?; + + let start = Instant::now(); + + debug!("send_request: Sending request"; "request" => %request.request_path()); + + // Some explanation of what's going on here is in order. + // + // The networking stack in Stacks is designed to operate on non-blocking sockets, and + // furthermore, it operates in a way that the call site in which a network request is issued can + // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary + // within the Stacks node, using it to issue a single blocking request imposes a lot of + // overhead. + // + // First, we will create the network connection and give it a ProtocolFamily implementation + // (StacksHttp), which gets used by the connection to encode and deocde messages. + // + // Second, we'll create a _handle_ to the network connection into which we will write requests + // and read responses. The connection itself is an opaque black box that, internally, + // implements a state machine around the ProtocolFamily implementation to incrementally read + // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is + // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, + // as well as underfull socket buffers. + // + // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network + // handle (which contains the buffered bytes from the message to be fed into the socket), and + // (2) drive bytes from the handle into the socket iself via the network connection. This is a + // two-step process mainly because the handle is expected to live in a separate stack (or even + // a separate thread). + // + // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from + // the socket into the network connection, and (2) drive parsed messages from the connection to + // the handle. Then, the call site that owns the handle simply polls the handle for new + // messages. Once we have received a message, we can proceed to handle it. + // + // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert + // it into an error. If it's a request (i.e. not a response), we also return an error. We + // only return the message if it was a well-formed non-error HTTP response. + + // Step 1-2: set up the connection and request handle + // NOTE: we don't need anything special for connection options, so just use the default + let conn_opts = ConnectionOptions::default(); + let http = StacksHttp::new_client(addr, &conn_opts); + let mut connection = NetworkConnection::new(http, &conn_opts, None); + let mut request_handle = connection + .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) + .map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Failed to create request handle: {:?}", &e).as_str(), + ) + })?; + + // Step 3: load up the request with the message we're gonna send, and iteratively dump its + // bytes from the handle into the socket (the connection does internal buffering and + // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send + // anymore because the socket buffer is currently full). + request + .send(&mut request_handle) + .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; + + debug!("send_request(sending data)"); + loop { + let flushed = request_handle + .try_flush() + .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; + + // send it out + let num_sent = connection + .send_data(&mut stream) + .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; + + debug!( + "send_request(sending data): flushed = {}, num_sent = {}", + flushed, num_sent + ); + if flushed && num_sent == 0 { + break; + } + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + } + + // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded + // and dispatched any new messages to the request handle. If so, then extract the message and + // check that it's a well-formed HTTP response. + debug!("send_request(receiving data)"); + let response; + loop { + // get back the reply + debug!("send_request(receiving data): try to receive data"); + match connection.recv_data(&mut stream) { + Ok(nr) => { + debug!("send_request(receiving data): received {} bytes", nr); + } + Err(e) => { + return Err(handle_net_error(e, "Failed to receive socket data")); + } + } + + // fullfill the request -- send it to its corresponding handle + debug!("send_request(receiving data): drain inbox"); + connection.drain_inbox(); + + // see if we got a message that was fulfilled in our handle + debug!("send_request(receiving data): try receive response"); + let rh = match request_handle.try_recv() { + Ok(resp) => { + response = resp; + break; + } + Err(e) => match e { + Ok(handle) => handle, + Err(e) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); + } + }, + }; + request_handle = rh; + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + } + + // Step 5: decode the HTTP message and return it if it's not an error. + let response_data = match response { + StacksHttpMessage::Response(response_data) => response_data, + StacksHttpMessage::Error(path, response) => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Request did not succeed ({} != 200). Path: '{}'", + response.preamble().status_code, + &path + ) + .as_str(), + )); + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Did not receive an HTTP response", + )); + } + }; + + Ok(response_data) +} diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 1837d8e1c47..d9e13883a1c 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::io::Write; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::str; +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream, ToSocketAddrs}; +use std::sync::mpsc::{channel, Receiver}; +use std::time::{Duration, Instant}; +use std::{str, thread}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; @@ -38,12 +40,12 @@ use crate::net::api::getneighbors::{RPCNeighbor, RPCNeighborsInfo}; use crate::net::connection::ConnectionOptions; use crate::net::http::{ http_error_from_code_and_text, http_reason, HttpContentType, HttpErrorResponse, - HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePreamble, - HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, + HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePayload, + HttpResponsePreamble, HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, }; use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpMessage, - StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, + send_http_request, HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, + StacksHttpMessage, StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, }; use crate::net::rpc::ConversationHttp; use crate::net::{ProtocolFamily, TipRequest}; @@ -1118,3 +1120,157 @@ fn test_metrics_identifiers() { assert_eq!(response_handler_index.is_some(), should_have_handler); } } + +fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + path.into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + request +} + +#[test] +fn test_send_request_timeout() { + // Set up a TcpListener that accepts a connection but delays response + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); + let addr = listener.local_addr().unwrap(); + + // Spawn a thread that will accept the connection and do nothing, simulating a long delay + thread::spawn(move || { + let (stream, _addr) = listener.accept().unwrap(); + // Hold the connection open to simulate a delay + thread::sleep(Duration::from_secs(10)); + drop(stream); // Close the stream + }); + + // Set a timeout shorter than the sleep duration to force a timeout + let connection_timeout = Duration::from_secs(2); + + // Attempt to connect, expecting a timeout error + let result = send_http_request( + "127.0.0.1", + addr.port(), + json_body("127.0.0.1", 80, "/", b"{}"), + connection_timeout, + ); + + // Assert that the result is an error, specifically a timeout + assert!( + result.is_err(), + "Expected a timeout error, got: {:?}", + result + ); + + if let Err(err) = result { + assert_eq!( + err.kind(), + std::io::ErrorKind::WouldBlock, + "Expected TimedOut error, got: {:?}", + err + ); + } +} + +fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { + // Bind to an available port on localhost + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); + let addr = listener.local_addr().unwrap(); + + debug!("Mock server listening on {}", addr); + + // Start the server in a new thread + thread::spawn(move || { + for stream in listener.incoming() { + debug!("Mock server accepted connection"); + let mut stream = stream.expect("Failed to accept connection"); + + // Read the client's request (even if we don't do anything with it) + let mut buffer = [0; 512]; + let _ = stream.read(&mut buffer); + debug!("Mock server received request"); + + // Simulate a basic HTTP response + stream + .write_all(response.as_bytes()) + .expect("Failed to write response"); + stream.flush().expect("Failed to flush stream"); + debug!("Mock server sent response"); + + // Wait for the client to signal that it's done reading + client_done_signal + .recv() + .expect("Failed to receive client done signal"); + + // Explicitly drop the stream after signaling to ensure the client finishes + // NOTE: this will cause the test to slow down, since `send_http_request` expects + // `Connection: close` + drop(stream); + + debug!("Mock server closing connection"); + + break; // Close after the first request + } + }); + + // Return the address of the mock server + format!("{}:{}", addr.ip(), addr.port()) +} + +fn parse_http_response(response: StacksHttpResponse) -> String { + let response_txt = match response.destruct().1 { + HttpResponsePayload::Text(s) => s, + HttpResponsePayload::Empty => "".to_string(), + HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), + HttpResponsePayload::Bytes(bytes) => String::from_utf8_lossy(bytes.as_slice()).to_string(), + }; + response_txt +} + +#[test] +fn test_send_request_success() { + // Prepare the mock server to return a successful HTTP response + let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; + + // Create a channel to signal when the client is done reading + let (tx_client_done, rx_client_done) = channel(); + let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); + let timeout_duration = Duration::from_secs(5); + + let parts = server_addr.split(':').collect::>(); + let host = parts[0]; + let port = parts[1].parse().unwrap(); + + // Attempt to send a request to the mock server + let result = send_http_request( + host, + port, + json_body(host, port, "/", b"{}"), + timeout_duration, + ); + debug!("Got result: {:?}", result); + + // Ensure the server only closes after the client has finished processing + if let Ok(response) = &result { + let body = parse_http_response(response.clone()); + assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); + } + + tx_client_done + .send(()) + .expect("Failed to send close signal"); + + // Assert that the connection was successful + assert!( + result.is_ok(), + "Expected a successful request, but got {:?}", + result + ); +} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 36336bdbf38..0a4b2556bb8 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -36,7 +36,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::core::{StacksEpoch, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; -use stacks::net::httpcore::StacksHttpRequest; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::Error as NetError; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::blockdata::opcodes; @@ -64,7 +64,6 @@ use crate::config::{ OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; -use crate::event_dispatcher::send_request; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2794,7 +2793,7 @@ impl BitcoinRPCRequest { let host = request.preamble().host.hostname(); let port = request.preamble().host.port(); - let response = send_request(&host, port, request, timeout)?; + let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { return Ok(js); } else { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index c2d8d3c2c34..34e42501ace 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,13 +16,10 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use std::io; -use std::io::{Read, Write}; -use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; -use std::time::{Duration, Instant}; +use std::time::Duration; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; @@ -55,17 +52,14 @@ use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::net::atlas::{Attachment, AttachmentInstance}; -use stacks::net::connection::{ConnectionOptions, NetworkConnection}; -use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; -use stacks::net::httpcore::{StacksHttp, StacksHttpMessage, StacksHttpRequest, StacksHttpResponse}; +use stacks::net::http::HttpRequestContents; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; -use stacks::net::Error as NetError; use stacks::util::hash::to_hex; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::net::PeerHost; -use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; @@ -318,220 +312,6 @@ impl RewardSetEventPayload { } } -/// Convert a NetError into an io::Error if appropriate. -fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) - } -} - -/// Send an HTTP request to the given host:port. Returns the decoded response. -/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP -/// response. It is a blocking operation. -/// -/// If the request encounters a network error, then return an error. Don't retry. -/// If the request times out after `timeout`, then return an error. -pub fn send_request( - host: &str, - port: u16, - request: StacksHttpRequest, - timeout: Duration, -) -> Result { - // Find the host:port that works. - // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 - // addresses, but usually, Stacks services like event observers are only bound to ipv4 - // addresses. So, be sure to use an address that will lead to a socket connection! - let mut stream_and_addr = None; - let mut last_err = None; - for addr in format!("{host}:{port}").to_socket_addrs()? { - debug!("send_request: connect to {}", &addr); - match TcpStream::connect_timeout(&addr, timeout) { - Ok(sock) => { - stream_and_addr = Some((sock, addr)); - break; - } - Err(e) => { - last_err = Some(e); - } - } - } - - let Some((mut stream, addr)) = stream_and_addr else { - return Err(last_err.unwrap_or(io::Error::new( - io::ErrorKind::Other, - "Unable to connect to {host}:{port}", - ))); - }; - - stream.set_read_timeout(Some(timeout))?; - stream.set_write_timeout(Some(timeout))?; - stream.set_nodelay(true)?; - - let start = Instant::now(); - - debug!("send_request: Sending request"; "request" => %request.request_path()); - - // Some explanation of what's going on here is in order. - // - // The networking stack in Stacks is designed to operate on non-blocking sockets, and - // furthermore, it operates in a way that the call site in which a network request is issued can - // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary - // within the Stacks node, using it to issue a single blocking request imposes a lot of - // overhead. - // - // First, we will create the network connection and give it a ProtocolFamily implementation - // (StacksHttp), which gets used by the connection to encode and deocde messages. - // - // Second, we'll create a _handle_ to the network connection into which we will write requests - // and read responses. The connection itself is an opaque black box that, internally, - // implements a state machine around the ProtocolFamily implementation to incrementally read - // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is - // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, - // as well as underfull socket buffers. - // - // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network - // handle (which contains the buffered bytes from the message to be fed into the socket), and - // (2) drive bytes from the handle into the socket iself via the network connection. This is a - // two-step process mainly because the handle is expected to live in a separate stack (or even - // a separate thread). - // - // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from - // the socket into the network connection, and (2) drive parsed messages from the connection to - // the handle. Then, the call site that owns the handle simply polls the handle for new - // messages. Once we have received a message, we can proceed to handle it. - // - // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert - // it into an error. If it's a request (i.e. not a response), we also return an error. We - // only return the message if it was a well-formed non-error HTTP response. - - // Step 1-2: set up the connection and request handle - // NOTE: we don't need anything special for connection options, so just use the default - let conn_opts = ConnectionOptions::default(); - let http = StacksHttp::new_client(addr, &conn_opts); - let mut connection = NetworkConnection::new(http, &conn_opts, None); - let mut request_handle = connection - .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) - .map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("Failed to create request handle: {:?}", &e).as_str(), - ) - })?; - - // Step 3: load up the request with the message we're gonna send, and iteratively dump its - // bytes from the handle into the socket (the connection does internal buffering and - // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send - // anymore because the socket buffer is currently full). - request - .send(&mut request_handle) - .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; - - debug!("send_request(sending data)"); - loop { - let flushed = request_handle - .try_flush() - .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; - - // send it out - let num_sent = connection - .send_data(&mut stream) - .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; - - debug!( - "send_request(sending data): flushed = {}, num_sent = {}", - flushed, num_sent - ); - if flushed && num_sent == 0 { - break; - } - - if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new( - io::ErrorKind::WouldBlock, - "Timed out while receiving request", - )); - } - } - - // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded - // and dispatched any new messages to the request handle. If so, then extract the message and - // check that it's a well-formed HTTP response. - debug!("send_request(receiving data)"); - let response; - loop { - // get back the reply - debug!("send_request(receiving data): try to receive data"); - match connection.recv_data(&mut stream) { - Ok(nr) => { - debug!("send_request(receiving data): received {} bytes", nr); - } - Err(e) => { - return Err(handle_net_error(e, "Failed to receive socket data")); - } - } - - // fullfill the request -- send it to its corresponding handle - debug!("send_request(receiving data): drain inbox"); - connection.drain_inbox(); - - // see if we got a message that was fulfilled in our handle - debug!("send_request(receiving data): try receive response"); - let rh = match request_handle.try_recv() { - Ok(resp) => { - response = resp; - break; - } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, - }; - request_handle = rh; - - if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new( - io::ErrorKind::WouldBlock, - "Timed out while receiving request", - )); - } - } - - // Step 5: decode the HTTP message and return it if it's not an error. - let response_data = match response { - StacksHttpMessage::Response(response_data) => response_data, - StacksHttpMessage::Error(path, response) => { - return Err(io::Error::new( - io::ErrorKind::Other, - format!( - "Request did not succeed ({} != 200). Path: '{}'", - response.preamble().status_code, - &path - ) - .as_str(), - )); - } - _ => { - return Err(io::Error::new( - io::ErrorKind::Other, - "Did not receive an HTTP response", - )); - } - }; - - Ok(response_data) -} - impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { debug!( @@ -567,7 +347,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_request(host, port, request, backoff) { + match send_http_request(host, port, request, backoff) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -1700,7 +1480,6 @@ mod test { use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; - use stacks::net::httpcore::StacksHttpResponse; use stacks::types::chainstate::BlockHeaderHash; use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; @@ -1709,22 +1488,6 @@ mod test { use super::*; - fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { - let peerhost: PeerHost = format!("{host}:{port}") - .parse() - .unwrap_or(PeerHost::DNS(host.to_string(), port)); - let mut request = StacksHttpRequest::new_for_peer( - peerhost, - "POST".into(), - path.into(), - HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); - - request - } - #[test] fn build_block_processed_event() { let observer = EventObserver { @@ -1852,13 +1615,23 @@ mod test { // Start measuring time let start_time = Instant::now(); + let host = "10.255.255.1"; // non-routable IP for timeout + let port = 80; + + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + "/".into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(b"{}").unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + // Attempt to send a request with a timeout - let result = send_request( - "10.255.255.1", // Non-routable IP for timeout - 80, // HTTP port - json_body("10.255.255.1", 80, "/", b"{}"), - timeout_duration, - ); + let result = send_http_request(host, port, request, timeout_duration); // Measure the elapsed time let elapsed_time = start_time.elapsed(); @@ -1886,146 +1659,6 @@ mod test { ); } - #[test] - fn test_send_request_timeout() { - // Set up a TcpListener that accepts a connection but delays response - let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); - let addr = listener.local_addr().unwrap(); - - // Spawn a thread that will accept the connection and do nothing, simulating a long delay - thread::spawn(move || { - let (stream, _addr) = listener.accept().unwrap(); - // Hold the connection open to simulate a delay - thread::sleep(Duration::from_secs(10)); - drop(stream); // Close the stream - }); - - // Set a timeout shorter than the sleep duration to force a timeout - let connection_timeout = Duration::from_secs(2); - - // Attempt to connect, expecting a timeout error - let result = send_request( - "127.0.0.1", - addr.port(), - json_body("127.0.0.1", 80, "/", b"{}"), - connection_timeout, - ); - - // Assert that the result is an error, specifically a timeout - assert!( - result.is_err(), - "Expected a timeout error, got: {:?}", - result - ); - - if let Err(err) = result { - assert_eq!( - err.kind(), - std::io::ErrorKind::WouldBlock, - "Expected TimedOut error, got: {:?}", - err - ); - } - } - - fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { - // Bind to an available port on localhost - let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); - let addr = listener.local_addr().unwrap(); - - debug!("Mock server listening on {}", addr); - - // Start the server in a new thread - thread::spawn(move || { - for stream in listener.incoming() { - debug!("Mock server accepted connection"); - let mut stream = stream.expect("Failed to accept connection"); - - // Read the client's request (even if we don't do anything with it) - let mut buffer = [0; 512]; - let _ = stream.read(&mut buffer); - debug!("Mock server received request"); - - // Simulate a basic HTTP response - stream - .write_all(response.as_bytes()) - .expect("Failed to write response"); - stream.flush().expect("Failed to flush stream"); - debug!("Mock server sent response"); - - // Wait for the client to signal that it's done reading - client_done_signal - .recv() - .expect("Failed to receive client done signal"); - - // Explicitly drop the stream after signaling to ensure the client finishes - // NOTE: this will cause the test to slow down, since `send_request` expects - // `Connection: close` - drop(stream); - - debug!("Mock server closing connection"); - - break; // Close after the first request - } - }); - - // Return the address of the mock server - format!("{}:{}", addr.ip(), addr.port()) - } - - fn parse_http_response(response: StacksHttpResponse) -> String { - let response_txt = match response.destruct().1 { - HttpResponsePayload::Text(s) => s, - HttpResponsePayload::Empty => "".to_string(), - HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), - HttpResponsePayload::Bytes(bytes) => { - String::from_utf8_lossy(bytes.as_slice()).to_string() - } - }; - response_txt - } - - #[test] - fn test_send_request_success() { - // Prepare the mock server to return a successful HTTP response - let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; - - // Create a channel to signal when the client is done reading - let (tx_client_done, rx_client_done) = channel(); - let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); - let timeout_duration = Duration::from_secs(5); - - let parts = server_addr.split(':').collect::>(); - let host = parts[0]; - let port = parts[1].parse().unwrap(); - - // Attempt to send a request to the mock server - let result = send_request( - host, - port, - json_body(host, port, "/", b"{}"), - timeout_duration, - ); - debug!("Got result: {:?}", result); - - // Ensure the server only closes after the client has finished processing - if let Ok(response) = &result { - let body = parse_http_response(response.clone()); - assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); - } - - tx_client_done - .send(()) - .expect("Failed to send close signal"); - - // Assert that the connection was successful - assert!( - result.is_ok(), - "Expected a successful request, but got {:?}", - result - ); - } - fn get_random_port() -> u16 { // Bind to a random port by specifying port 0, then retrieve the port assigned by the OS let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind to a random port"); From f968d548f14991cb596ab86ec110d29d56aaeaa3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 13:16:13 -0400 Subject: [PATCH 346/910] chore: address PR feedback --- stackslib/src/net/api/getstackers.rs | 2 +- stackslib/src/net/atlas/db.rs | 8 +++- stackslib/src/net/httpcore.rs | 44 ++++++++----------- .../burnchains/bitcoin_regtest_controller.rs | 17 ++++++- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 581f989c7ee..3b253aeb21c 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -120,7 +120,7 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,10})$"#).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index f971344a28a..d6bdbb301eb 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -494,8 +494,12 @@ impl AtlasDB { page_index: u32, block_id: &StacksBlockId, ) -> Result, db_error> { - let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; - let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; + let min = page_index + .checked_mul(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; + let max = min + .checked_add(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index b173925dc88..5e90261e89d 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1686,17 +1686,17 @@ impl ProtocolFamily for StacksHttp { &req.preamble().verb, &decoded_path )))?; - handler_index + Some(handler_index) } else { - 0 + None }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - if !self.allow_arbitrary_response { - self.request_handler_index = Some(handler_index); + if handler_index.is_some() { + self.request_handler_index = handler_index; } Ok(()) } @@ -1768,14 +1768,10 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { /// Convert a NetError into an io::Error if appropriate. fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + match e { + NetError::ReadError(ioe) | NetError::WriteError(ioe) => ioe, + NetError::RecvTimeout => io::Error::new(io::ErrorKind::WouldBlock, "recv timeout"), + _ => io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()), } } @@ -1911,8 +1907,7 @@ pub fn send_http_request( // and dispatched any new messages to the request handle. If so, then extract the message and // check that it's a well-formed HTTP response. debug!("send_request(receiving data)"); - let response; - loop { + let response = loop { // get back the reply debug!("send_request(receiving data): try to receive data"); match connection.recv_data(&mut stream) { @@ -1932,18 +1927,15 @@ pub fn send_http_request( debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { - response = resp; - break; + break resp; + } + Err(Ok(handle)) => handle, + Err(Err(e)) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, }; request_handle = rh; @@ -1953,7 +1945,7 @@ pub fn send_http_request( "Timed out while receiving request", )); } - } + }; // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 0a4b2556bb8..32d590dd396 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,4 +1,19 @@ -use std::convert::From; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; From 705ebf3af4e8e5fe6e525aab28c91c42469ee588 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 13:16:13 -0400 Subject: [PATCH 347/910] chore: address PR feedback --- stackslib/src/net/api/getstackers.rs | 2 +- stackslib/src/net/atlas/db.rs | 8 +++- stackslib/src/net/httpcore.rs | 44 ++++++++----------- .../burnchains/bitcoin_regtest_controller.rs | 17 ++++++- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 69961dbe143..c2605adf611 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -121,7 +121,7 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,10})$"#).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index f971344a28a..d6bdbb301eb 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -494,8 +494,12 @@ impl AtlasDB { page_index: u32, block_id: &StacksBlockId, ) -> Result, db_error> { - let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; - let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; + let min = page_index + .checked_mul(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; + let max = min + .checked_add(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index b173925dc88..5e90261e89d 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1686,17 +1686,17 @@ impl ProtocolFamily for StacksHttp { &req.preamble().verb, &decoded_path )))?; - handler_index + Some(handler_index) } else { - 0 + None }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - if !self.allow_arbitrary_response { - self.request_handler_index = Some(handler_index); + if handler_index.is_some() { + self.request_handler_index = handler_index; } Ok(()) } @@ -1768,14 +1768,10 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { /// Convert a NetError into an io::Error if appropriate. fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + match e { + NetError::ReadError(ioe) | NetError::WriteError(ioe) => ioe, + NetError::RecvTimeout => io::Error::new(io::ErrorKind::WouldBlock, "recv timeout"), + _ => io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()), } } @@ -1911,8 +1907,7 @@ pub fn send_http_request( // and dispatched any new messages to the request handle. If so, then extract the message and // check that it's a well-formed HTTP response. debug!("send_request(receiving data)"); - let response; - loop { + let response = loop { // get back the reply debug!("send_request(receiving data): try to receive data"); match connection.recv_data(&mut stream) { @@ -1932,18 +1927,15 @@ pub fn send_http_request( debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { - response = resp; - break; + break resp; + } + Err(Ok(handle)) => handle, + Err(Err(e)) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, }; request_handle = rh; @@ -1953,7 +1945,7 @@ pub fn send_http_request( "Timed out while receiving request", )); } - } + }; // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 0a4b2556bb8..32d590dd396 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,4 +1,19 @@ -use std::convert::From; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; From f43ca1af89283fd19ba1152048962f6877f2f6c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 15:03:29 -0400 Subject: [PATCH 348/910] fix: allow arbitrary response handler only if there is no handler found, and turn it on by default in the parse_response() helper. Also, it's no longer an error to omit content-type; per the RFC, this defaults to application/octet-stream --- stackslib/src/net/http/tests.rs | 2 -- stackslib/src/net/httpcore.rs | 7 +++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 508ca55c6e0..a17635bc59d 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -368,8 +368,6 @@ fn test_parse_http_response_preamble_err() { "Unsupported HTTP content type"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Invalid Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 5e90261e89d..bd406345b98 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1356,6 +1356,7 @@ impl StacksHttp { "127.0.0.1:20443".parse().unwrap(), &ConnectionOptions::default(), ); + http.allow_arbitrary_response = true; let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { @@ -1498,7 +1499,9 @@ impl ProtocolFamily for StacksHttp { num_read, ); - let parse_res = if self.allow_arbitrary_response { + let parse_res = if self.request_handler_index.is_none() + && self.allow_arbitrary_response + { let arbitrary_parser = RPCArbitraryResponseHandler {}; let response_payload = arbitrary_parser .try_parse_response(http_response_preamble, &message_bytes[..])?; @@ -1604,7 +1607,7 @@ impl ProtocolFamily for StacksHttp { // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); - if self.allow_arbitrary_response { + if self.request_handler_index.is_none() && self.allow_arbitrary_response { let arbitrary_parser = RPCArbitraryResponseHandler {}; let response_payload = arbitrary_parser.try_parse_response(http_response_preamble, buf)?; From 83253a08ac36a2174ade61e40355bab03335d188 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 15:31:44 -0400 Subject: [PATCH 349/910] fix: failing unit test (omitted content-type is not a problem anymore) --- stackslib/src/net/tests/httpcore.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index d9e13883a1c..d9c62eedf67 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -120,8 +120,6 @@ fn test_parse_stacks_http_preamble_response_err() { "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", From e8672eb215b47170b90830847bb6330e789c219c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:13:41 -0400 Subject: [PATCH 350/910] fix: more compact stackerdb output --- libstackerdb/src/libstackerdb.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 507d2249f71..8b3fe9db0fd 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -82,7 +82,7 @@ pub struct SlotMetadata { } /// Stacker DB chunk (i.e. as a reply to a chunk request) -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct StackerDBChunkData { /// slot ID pub slot_id: u32, @@ -98,6 +98,17 @@ pub struct StackerDBChunkData { pub data: Vec, } +impl fmt::Debug for StackerDBChunkData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.data.len() < 128 { + write!(f, "StackerDBChunkData({},{},{},{})", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data)) + } + else { + write!(f, "StackerDBChunkData({},{},{},{}...({}))", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data[..128]), self.data.len()) + } + } +} + /// StackerDB post chunk acknowledgement #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StackerDBChunkAckData { From 5879e5a3aa30dc130f5b2ef135825243c853c4f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:13:51 -0400 Subject: [PATCH 351/910] chore: docstring --- stackslib/src/burnchains/db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 384047ccd44..72ca2e8bf11 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1411,6 +1411,8 @@ impl BurnchainDB { Ok(()) } + /// Stores a newly-parsed burnchain block's relevant data into the DB. + /// The given block's operations will be validated. pub fn store_new_burnchain_block( &mut self, burnchain: &Burnchain, From 498395dae5b43b04b7822a9d8dccd69064223575 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:06 -0400 Subject: [PATCH 352/910] chore: add test coverage for stackerdb push and modify downloader tests to rely only on the chains coordinator to advance state (thereby testing reward cycle boundary conditions) --- stackslib/src/net/connection.rs | 3 + stackslib/src/net/mod.rs | 14 ++ stackslib/src/net/stackerdb/sync.rs | 8 + stackslib/src/net/stackerdb/tests/sync.rs | 178 ++++++++++++++++++- stackslib/src/net/tests/download/nakamoto.rs | 166 ----------------- 5 files changed, 197 insertions(+), 172 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index afeebe54c3f..35779002793 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -439,6 +439,8 @@ pub struct ConnectionOptions { pub disable_inbound_handshakes: bool, /// Disable getting chunks from StackerDB (e.g. to test push-only) pub disable_stackerdb_get_chunks: bool, + /// Disable running stackerdb sync altogether (e.g. to test push-only) + pub disable_stackerdb_sync: bool, /// Unconditionally disconnect a peer after this amount of time pub force_disconnect_interval: Option, /// If set to true, this forces the p2p state machine to believe that it is running in @@ -548,6 +550,7 @@ impl std::default::Default for ConnectionOptions { disable_natpunch: false, disable_inbound_handshakes: false, disable_stackerdb_get_chunks: false, + disable_stackerdb_sync: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 5cedc4e0680..96c5be3d30e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2918,6 +2918,20 @@ pub mod test { ret } + pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { + let burnchain_db = + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite) + .unwrap(); + burnchain_db + } + + pub fn get_sortition_at_height(&self, height: u64) -> Option { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + sort_handle.get_block_snapshot_by_height(height).unwrap() + } + pub fn get_burnchain_block_ops( &self, burn_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 85e76ea5242..53a5e13e487 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1204,6 +1204,14 @@ impl StackerDBSync { network: &mut PeerNetwork, config: &StackerDBConfig, ) -> Result, net_error> { + if network.get_connection_opts().disable_stackerdb_sync { + test_debug!( + "{:?}: stacker DB sync is disabled", + network.get_local_peer() + ); + return Ok(None); + } + // throttle to write_freq if self.last_run_ts + config.write_freq.max(1) > get_epoch_time_secs() { debug!( diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 69bdad93d9d..9227eedecc7 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -319,8 +319,8 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { with_timeout(600, || { std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); - let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT); - let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 2); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 4); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 8); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -532,13 +532,13 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_chunks() { - inner_test_stackerdb_replica_2_neighbors_10_chunks(false, BASE_PORT + 4); + inner_test_stackerdb_replica_2_neighbors_10_chunks(false, BASE_PORT + 10); } #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_push_chunks() { - inner_test_stackerdb_replica_2_neighbors_10_chunks(true, BASE_PORT + 8); + inner_test_stackerdb_replica_2_neighbors_10_chunks(true, BASE_PORT + 30); } fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port: u16) { @@ -663,16 +663,182 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port }) } +/// Verify that the relayer will push stackerdb chunks. +/// Replica A has the data. +/// Replica B receives the data via StackerDB sync +/// Replica C receives the data from B's relayer pushes +#[test] +fn test_stackerdb_push_relayer() { + with_timeout(600, move || { + std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 100); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 102); + let mut peer_3_config = TestPeerConfig::from_port(BASE_PORT + 104); + + peer_1_config.allowed = -1; + peer_2_config.allowed = -1; + peer_3_config.allowed = -1; + + // short-lived walks... + peer_1_config.connection_opts.walk_max_duration = 10; + peer_2_config.connection_opts.walk_max_duration = 10; + peer_3_config.connection_opts.walk_max_duration = 10; + + peer_3_config.connection_opts.disable_stackerdb_sync = true; + + // peer 1 crawls peer 2, and peer 2 crawls peer 1 and peer 3, and peer 3 crawls peer 2 + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); + + // set up stacker DBs for both peers + let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); + let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); + let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_3 = TestPeer::new(peer_3_config); + + // peer 1 gets the DB + setup_stackerdb(&mut peer_1, idx_1, true, 10); + setup_stackerdb(&mut peer_2, idx_2, false, 10); + setup_stackerdb(&mut peer_3, idx_2, false, 10); + + // verify that peer 1 got the data + let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); + assert_eq!(peer_1_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); + assert!(peer_1_db_chunks[i].1.len() > 0); + } + + // verify that peer 2 and 3 did NOT get the data + let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); + assert_eq!(peer_2_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); + assert!(peer_2_db_chunks[i].1.len() == 0); + } + + let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); + assert_eq!(peer_3_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); + assert!(peer_3_db_chunks[i].1.len() == 0); + } + + let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); + let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); + let peer_3_db_configs = peer_3.config.get_stacker_db_configs(); + + let mut i = 0; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + peer_3.network.stacker_db_configs = peer_3_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + let res_3 = peer_3.step_with_ibd(false); + + if let Ok(res) = res_1 { + check_sync_results(&res); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_2 { + check_sync_results(&res); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_3 { + check_sync_results(&res); + peer_3 + .relayer + .process_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_3 + .relayer + .process_pushed_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + let db1 = load_stackerdb(&peer_1, idx_1); + let db2 = load_stackerdb(&peer_2, idx_2); + let db3 = load_stackerdb(&peer_3, idx_3); + + if db1 == db2 && db2 == db3 { + break; + } + i += 1; + + debug!("StackerDB sync step {}", i); + } + + debug!("Completed stacker DB sync in {} step(s)", i); + }) +} + #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_10_chunks() { - inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(false, BASE_PORT + 28); + inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(false, BASE_PORT + 50); } #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_push_10_chunks() { - inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(true, BASE_PORT + 68); + inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(true, BASE_PORT + 70); } fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, base_port: u16) { diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index afba1e90e7d..31f88b50f8c 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2111,38 +2111,6 @@ fn test_nakamoto_download_run_2_peers() { let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_tenure_start_block_header( - &mut peer.chainstate().index_conn(), - &nakamoto_tip, - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); @@ -2178,19 +2146,9 @@ fn test_nakamoto_download_run_2_peers() { let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - loop { boot_peer .run_with_ibd(true, Some(&mut boot_dns_client)) @@ -2200,47 +2158,6 @@ fn test_nakamoto_download_run_2_peers() { SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - last_stacks_tip_ch = stacks_tip_ch; last_stacks_tip_bhh = stacks_tip_bhh; @@ -2305,38 +2222,6 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_tenure_start_block_header( - &mut peer.chainstate().index_conn(), - &nakamoto_tip, - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); @@ -2372,19 +2257,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - loop { boot_peer .run_with_ibd(true, Some(&mut boot_dns_client)) @@ -2394,47 +2269,6 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - last_stacks_tip_ch = stacks_tip_ch; last_stacks_tip_bhh = stacks_tip_bhh; From ec1b9138b69f33bb973c7c476264562710d2d4cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:34 -0400 Subject: [PATCH 353/910] chore: log block rejection based on signature threshold more faithfully --- stackslib/src/chainstate/nakamoto/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 49ced5f916a..1ec2bb4656a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2280,7 +2280,14 @@ impl NakamotoChainState { "signing_weight" => signing_weight); true } else { - debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); + if existing_signing_weight > signing_weight { + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); + } else { + debug!( + "Will not store duplicate copy of block {} ({}) with block hash {}", + &block_id, &block.header.consensus_hash, &block_hash + ); + } false }; From fb1e3d61d200893a008eb6f7c2b39c3eba86fa0c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:47 -0400 Subject: [PATCH 354/910] chore: debug instead of warn on block rejection since the cause is usually normal --- testnet/stacks-node/src/nakamoto_node/miner.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346d..fb79c6abc72 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -738,7 +738,11 @@ impl BlockMinerThread { staging_tx.commit()?; if !accepted { - warn!("Did NOT accept block {} we mined", &block.block_id()); + // this can happen if the p2p network and relayer manage to receive this block prior to + // the thread reaching this point -- this can happen because the signers broadcast the + // signed block to the nodes independent of the miner, so the miner itself can receive + // and store its own block outside of this thread. + debug!("Did NOT accept block {} we mined", &block.block_id()); // not much we can do here, but try and mine again and hope we produce a valid one. return Ok(()); From 6475f6510f8c71d905a8c10bdbcde480d39d8993 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:37:25 -0400 Subject: [PATCH 355/910] chore: rust-fmt --- libstackerdb/src/libstackerdb.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 8b3fe9db0fd..714ef838c45 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -101,10 +101,24 @@ pub struct StackerDBChunkData { impl fmt::Debug for StackerDBChunkData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.data.len() < 128 { - write!(f, "StackerDBChunkData({},{},{},{})", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data)) - } - else { - write!(f, "StackerDBChunkData({},{},{},{}...({}))", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data[..128]), self.data.len()) + write!( + f, + "StackerDBChunkData({},{},{},{})", + self.slot_id, + self.slot_version, + &self.sig, + &to_hex(&self.data) + ) + } else { + write!( + f, + "StackerDBChunkData({},{},{},{}...({}))", + self.slot_id, + self.slot_version, + &self.sig, + &to_hex(&self.data[..128]), + self.data.len() + ) } } } From 334d0373c32c19a31b180d62a48c79ecc84ef792 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 25 Aug 2024 20:01:19 -0400 Subject: [PATCH 356/910] fix: catch error on tx submission --- .../src/tests/neon_integrations.rs | 17 ++++-- testnet/stacks-node/src/tests/signer/v0.rs | 61 ++++++++++++++----- 2 files changed, 57 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0905fb1f60a..b651f405c32 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -749,8 +749,8 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 return true; } -/// returns Txid string -pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { +/// returns Txid string upon success +pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/transactions", http_origin); let res = client @@ -768,13 +768,20 @@ pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { .txid() .to_string() ); - return res; + Ok(res) } else { - eprintln!("Submit tx error: {}", res.text().unwrap()); - panic!(""); + Err(res.text().unwrap()) } } +/// returns Txid string +pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { + submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { + eprintln!("Submit tx error: {}", e); + panic!(""); + }) +} + pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d41483a3643..702a136d1e5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,7 +69,7 @@ use crate::tests::nakamoto_integrations::{ }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - test_observer, + submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController, Config, Keychain}; @@ -3165,6 +3165,7 @@ fn partial_tenure_fork() { &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_proposed = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); @@ -3231,6 +3232,8 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_before = blocks_proposed.load(Ordering::SeqCst); + info!("proposed_blocks: {proposed_before}, proposed_blocks2: {proposed_before_2}"); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3238,6 +3241,10 @@ fn partial_tenure_fork() { let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_1 = blocks_proposed.load(Ordering::SeqCst); + info!( + "Fork initiated: {fork_initiated}, Mined 1 blocks: {mined_1}, Mined 2 blocks {mined_2}, Proposed blocks: {proposed_1}, Proposed blocks 2: {proposed_2}", + ); Ok((fork_initiated && proposed_2 > proposed_before_2) || mined_1 > mined_before_1 @@ -3279,18 +3286,30 @@ fn partial_tenure_fork() { let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - submit_tx(&http_origin, &transfer_tx); - - wait_for(60, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }) - .unwrap(); + // This may fail if the forking miner wins too many tenures and this account's + // nonces get too high (TooMuchChaining) + match submit_tx_fallible(&http_origin, &transfer_tx) { + Ok(_) => { + wait_for(60, || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }) + .unwrap(); + } + Err(e) => { + if e.to_string().contains("TooMuchChaining") { + info!("TooMuchChaining error, skipping block"); + continue; + } else { + panic!("Failed to submit tx: {}", e); + } + } + } info!( "Attempted to mine interim block {}:{}", btc_blocks_mined, interim_block_ix @@ -3317,9 +3336,6 @@ fn partial_tenure_fork() { } else if miner_2_tenures == min_miner_2_tenures { // If this is the forking tenure, miner 2 should have mined 0 blocks assert_eq!(mined_2, mined_before_2); - - // Clear the ignore block - clear_ignore_block(); } } } @@ -3364,5 +3380,18 @@ fn partial_tenure_fork() { .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); + let (chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let blocks = chainstate + .get_stacks_chain_tips_at_height(ignore_block) + .unwrap(); + info!("blocks: {:?}", blocks); + signer_test.shutdown(); } From 06efc09c1dde5f8deae87c16c54cf93ab89a7c21 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 26 Aug 2024 10:38:55 -0400 Subject: [PATCH 357/910] WIP: Cleanup test for easier logic Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 51 ++++++++++------------ 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1f637b6b8d0..0e38f13ea87 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1915,7 +1915,10 @@ fn end_of_tenure() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); - + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); // Advance to one before the next reward cycle to ensure we are on the reward cycle boundary @@ -1928,7 +1931,18 @@ fn end_of_tenure() { - 2; // give the system a chance to mine a Nakamoto block - sleep_ms(30_000); + // But it doesn't have to mine one for this test to succeed? + let start = Instant::now(); + while start.elapsed() <= short_timeout { + let mined_blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if mined_blocks > blocks_before { + break; + } + sleep_ms(100); + } info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -1936,7 +1950,7 @@ fn end_of_tenure() { final_reward_cycle_height_boundary, num_signers, ); - println!("Advanced to nexct reward cycle boundary: {final_reward_cycle_height_boundary}"); + println!("Advanced to next reward cycle boundary: {final_reward_cycle_height_boundary}"); assert_eq!( signer_test.get_current_reward_cycle(), final_reward_cycle - 1 @@ -1977,39 +1991,20 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - info!("Triggering a new block to be mined"); - - // Mine a block into the next reward cycle - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, - ) - .unwrap(); - - // Mine a few blocks so we are well into the next reward cycle - for _ in 0..2 { + while signer_test.get_current_reward_cycle() != final_reward_cycle { next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 10, || Ok(true), ) .unwrap(); + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting to enter the next reward cycle" + ); + std::thread::sleep(Duration::from_millis(100)); } - sleep_ms(10_000); - assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); - while test_observer::get_burn_blocks() .last() .unwrap() From 24b2d840a1216b7d557e54d454c53d4b2c5c38e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 26 Aug 2024 13:57:36 -0400 Subject: [PATCH 358/910] fix: remove deadlock condition between p2p and relayer threads that could arise when they both try to read the reward set. In short -- don't use a StacksDBTx<'a> to read chainstate when a StacksDBConn<'a> will do. Patch the relevant files to make it so the offending code can take a StacksDBIndexed trait impl instead of a transaction. --- .../chainstate/nakamoto/coordinator/mod.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 37 +++++++++++++++---- stackslib/src/chainstate/nakamoto/tenure.rs | 4 +- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index de884a8d9c9..058025ee1c2 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -188,7 +188,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log: bool, ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( - &mut chainstate.index_tx_begin(), + &mut chainstate.index_conn(), block_id, coinbase_height_of_calculation, )? diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 52ab3632976..28ba89d59da 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -300,6 +300,13 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; + /// Get the ancestor block hash given a height + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError>; + /// Get the block ID for a specific coinbase height in the fork identified by `tip` fn get_nakamoto_block_id_at_coinbase_height( &mut self, @@ -452,6 +459,14 @@ impl StacksDBIndexed for StacksDBConn<'_> { fn sqlite(&self) -> &Connection { self.conn() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl StacksDBIndexed for StacksDBTx<'_> { @@ -462,6 +477,14 @@ impl StacksDBIndexed for StacksDBTx<'_> { fn sqlite(&self) -> &Connection { self.tx().deref() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl<'a> ChainstateTx<'a> { @@ -2406,22 +2429,22 @@ impl NakamotoChainState { /// Return a Nakamoto StacksHeaderInfo at a given coinbase height in the fork identified by `tip_index_hash`. /// * For Stacks 2.x, this is the Stacks block's header /// * For Stacks 3.x (Nakamoto), this is the first block in the miner's tenure. - pub fn get_header_by_coinbase_height( - tx: &mut StacksDBTx, + pub fn get_header_by_coinbase_height( + conn: &mut SDBI, tip_index_hash: &StacksBlockId, coinbase_height: u64, ) -> Result, ChainstateError> { // nakamoto block? if let Some(block_id) = - tx.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? + conn.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? { - return Self::get_block_header_nakamoto(tx.sqlite(), &block_id); + return Self::get_block_header_nakamoto(conn.sqlite(), &block_id); } // epcoh2 block? - let Some(ancestor_at_height) = tx - .get_ancestor_block_hash(coinbase_height, tip_index_hash)? - .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) + let Some(ancestor_at_height) = conn + .get_ancestor_block_id(coinbase_height, tip_index_hash)? + .map(|ancestor| Self::get_block_header(conn.sqlite(), &ancestor)) .transpose()? .flatten() else { diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index bff030be8f8..81380cc93d0 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -372,7 +372,7 @@ impl NakamotoChainState { let matured_coinbase_height = coinbase_height - MINER_REWARD_MATURITY; let matured_tenure_block_header = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &tip_index_hash, matured_coinbase_height, )? @@ -964,7 +964,7 @@ impl NakamotoChainState { let total_coinbase = coinbase_at_block.saturating_add(accumulated_rewards); let parent_tenure_start_header: StacksHeaderInfo = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &block.header.parent_block_id, parent_coinbase_height, )? From 56746d0bfbb660c133fa86749e6c6e7ca57803f5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 26 Aug 2024 15:22:39 -0400 Subject: [PATCH 359/910] Fix test compilation Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 1b971869bcb..188e2e5a3e8 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -1086,8 +1086,6 @@ fn test_nakamoto_chainstate_getters() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_ref().unwrap(); - let (mut stacks_db_tx, _) = chainstate.chainstate_tx_begin().unwrap(); - for coinbase_height in 0..=((tip .anchored_header .as_stacks_nakamoto() @@ -1097,7 +1095,7 @@ fn test_nakamoto_chainstate_getters() { + 1) { let header_opt = NakamotoChainState::get_header_by_coinbase_height( - &mut stacks_db_tx, + &mut chainstate.index_conn(), &tip.index_block_hash(), coinbase_height, ) From 8ba1a3555290f505f4555ebf1de10b1d6eeb9672 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Tue, 27 Aug 2024 11:43:19 +0200 Subject: [PATCH 360/910] feat: add `block_time` to `/new_block` event payload --- stackslib/src/chainstate/coordinator/mod.rs | 1 + stackslib/src/chainstate/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 3 +++ stackslib/src/chainstate/stacks/db/blocks.rs | 2 ++ stackslib/src/net/mod.rs | 1 + testnet/stacks-node/src/event_dispatcher.rs | 6 ++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/tests/nakamoto_integrations.rs | 7 +++++++ 8 files changed, 22 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 72e44f981c0..d5ffe3e55dd 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -179,6 +179,7 @@ pub trait BlockEventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ); /// called whenever a burn block is about to be diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 50127af1760..be5f862839d 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -430,6 +430,7 @@ impl BlockEventDispatcher for NullEventDispatcher { _pox_constants: &PoxConstants, _reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { assert!( false, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 28ba89d59da..2ccf6c11577 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2049,6 +2049,8 @@ impl NakamotoChainState { let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + let block_timestamp = next_ready_block.header.timestamp; + // set stacks block accepted let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( @@ -2088,6 +2090,7 @@ impl NakamotoChainState { &pox_constants, &reward_set_data, &Some(signer_bitvec), + Some(block_timestamp), ); } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 47cace8c4b0..a45a8d60cb4 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -190,6 +190,7 @@ impl BlockEventDispatcher for DummyEventDispatcher { _pox_constants: &PoxConstants, _reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { assert!( false, @@ -6409,6 +6410,7 @@ impl StacksChainState { &pox_constants, &reward_set_data, &None, + None, ); } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c2..ba8575a0326 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2017,6 +2017,7 @@ pub mod test { pox_constants: &PoxConstants, reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { self.blocks.lock().unwrap().push(TestEventObserverBlock { block: block.clone(), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 34e42501ace..99b500dc523 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -594,6 +594,7 @@ impl EventObserver { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec_opt: &Option>, + block_timestamp: Option, ) -> serde_json::Value { // Serialize events to JSON let serialized_events: Vec = filtered_events @@ -631,6 +632,7 @@ impl EventObserver { let mut payload = json!({ "block_hash": format!("0x{}", block.block_hash), "block_height": metadata.stacks_block_height, + "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, "miner_txid": format!("0x{}", winner_txid), @@ -852,6 +854,7 @@ impl BlockEventDispatcher for EventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ) { self.process_chain_tip( block, @@ -869,6 +872,7 @@ impl BlockEventDispatcher for EventDispatcher { pox_constants, reward_set_data, signer_bitvec, + block_timestamp, ); } @@ -1051,6 +1055,7 @@ impl EventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ) { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); @@ -1102,6 +1107,7 @@ impl EventDispatcher { pox_constants, reward_set_data, signer_bitvec, + block_timestamp, ); // Send payload diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b824793e172..2be02659cd0 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -197,5 +197,6 @@ pub fn announce_boot_receipts( pox_constants, &None, &None, + None, ); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..5bd9ba87e71 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2271,6 +2271,13 @@ fn correct_burn_outs() { "Blocks should be sorted by cycle number already" ); + let block_times: Vec = new_blocks_with_reward_set + .iter() + .filter_map(|block| block.get("block_time").and_then(|cn| cn.as_u64())) + .collect(); + // Assert that block_times are all greater than 0 + assert!(block_times.iter().all(|&t| t > 0)); + for block in new_blocks_with_reward_set.iter() { let cycle_number = block["cycle_number"].as_u64().unwrap(); let reward_set = block["reward_set"].as_object().unwrap(); From 880b345f2ddcf13a6094303b5b2d4f320156f600 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Tue, 27 Aug 2024 12:09:59 +0200 Subject: [PATCH 361/910] chore: fix tests --- testnet/stacks-node/src/event_dispatcher.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 99b500dc523..53a17aca97c 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1514,6 +1514,7 @@ mod test { let mblock_confirmed_consumed = ExecutionCost::zero(); let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + let block_timestamp = Some(123456); let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1531,6 +1532,7 @@ mod test { &pox_constants, &None, &Some(signer_bitvec.clone()), + block_timestamp, ); assert_eq!( payload @@ -1582,6 +1584,7 @@ mod test { let mblock_confirmed_consumed = ExecutionCost::zero(); let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + let block_timestamp = Some(123456); let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1599,6 +1602,7 @@ mod test { &pox_constants, &None, &Some(signer_bitvec.clone()), + block_timestamp, ); let event_signer_signature = payload From cc9ab54a6e4a99ee8527f93ace1243918180a3dd Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:10:35 -0700 Subject: [PATCH 362/910] Update changelog for 2.5.0.0.5.2 --- stacks-signer/CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 1476d56ad01..dabe0b346a6 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [2.5.0.0.5.2] + +### Added + +### Changed + +- Reuse BlockResponse slot for MockSignature message type (#5103) + ## [2.5.0.0.5.2-rc1] ### Added From 072be56e5dafd8ab35556d66a11b2cfac2623506 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 28 Aug 2024 09:10:55 -0400 Subject: [PATCH 363/910] Fix typo in httpcore.rs StacksHttpMessage Signed-off-by: Jacinta Ferrant --- stackslib/src/net/httpcore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index bd406345b98..804add6f331 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -764,7 +764,7 @@ impl MessageSequence for StacksHttpMessage { } fn get_message_name(&self) -> &'static str { - "StachsHttpMessage" + "StacksHttpMessage" } } From 30575476e50f35918ce21f68385a2b30e7d5a7c8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 22 Aug 2024 16:07:58 -0400 Subject: [PATCH 364/910] test(signer): Add test with duplicate signer config --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 56 ++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 97 +++++++++++++++++++-- 3 files changed, 121 insertions(+), 33 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d6..894aed3cf34 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -99,6 +99,7 @@ jobs: - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks + - tests::signer::v0::duplicate_signers - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 57613728038..42407a1a76b 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -125,7 +125,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest, mut signer_config_modifier: F, mut node_config_modifier: G, - btc_miner_pubkeys: &[Secp256k1PublicKey], + btc_miner_pubkeys: Option>, + signer_stacks_private_keys: Option>, ) -> Self { // Generate Signer Data - let signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>(); + let signer_stacks_private_keys = signer_stacks_private_keys + .inspect(|keys| { + assert_eq!( + keys.len(), + num_signers, + "Number of private keys does not match number of signers" + ) + }) + .unwrap_or_else(|| (0..num_signers).map(|_| StacksPrivateKey::new()).collect()); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); @@ -159,11 +167,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); + + // First two signers have same private key + signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + None, + |_| {}, + |_| {}, + None, + Some(signer_stacks_private_keys), + ); + let timeout = Duration::from_secs(30); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); + + signer_test.boot_to_epoch_3(); + + // give the system a chance to reach the Nakamoto start tip + // mine a Nakamoto block + wait_for(30, || { + let blocks_mined = mined_blocks.load(Ordering::SeqCst); + Ok(blocks_mined > blocks_mined_before) + }) + .unwrap(); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + let expected_result = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers + ); + assert!(metrics_response.contains(&expected_result)); + } +} From c30162159d6573133cc8e7a540bb3afc3780abef Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 27 Aug 2024 08:53:05 -0400 Subject: [PATCH 365/910] test(signer): Add checks for duplicate signing keys --- .../src/tests/neon_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 47 ++++++------------- 2 files changed, 16 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0905fb1f60a..3dc9669a9ea 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -574,7 +574,7 @@ pub mod test_observer { pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { // Get set of all burn block heights let burn_block_heights = get_blocks() - .iter() + .into_iter() .map(|x| x.get("burn_block_height").unwrap().as_u64().unwrap()) .collect::>(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 60b6c879377..877c6b71ae4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -321,6 +321,7 @@ impl SignerTest { // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; + let mut signing_keys = HashSet::new(); let validated = loop { // Since we've already checked `signature.len()`, this means we've // validated all the signatures in this loop @@ -331,6 +332,9 @@ impl SignerTest { error!("Failed to validate the mined nakamoto block: ran out of signers to try to validate signatures"); break false; }; + if !signing_keys.insert(signer.signing_key) { + panic!("Duplicate signing key detected: {:?}", signer.signing_key); + } let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) .expect("Failed to convert signing key to StacksPublicKey"); let valid = stacks_public_key @@ -488,11 +492,7 @@ fn block_proposal_rejection() { while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { std::thread::sleep(Duration::from_secs(1)); let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks - .into_iter() - .map(|chunk| chunk.modified_slots) - .flatten() - { + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) else { continue; @@ -2982,6 +2982,13 @@ fn duplicate_signers() { // First two signers have same private key signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); + let duplicate_pubkey_from_copy = + Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); + assert_eq!( + duplicate_pubkey, duplicate_pubkey_from_copy, + "Recovered pubkeys don't match" + ); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -2992,37 +2999,13 @@ fn duplicate_signers() { None, Some(signer_stacks_private_keys), ); - let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); + info!("------------------------- Try mining one block -------------------------"); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - // Test prometheus metrics response - #[cfg(feature = "monitoring_prom")] - { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - let expected_result = format!( - "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", - num_signers - ); - assert!(metrics_response.contains(&expected_result)); - } + signer_test.shutdown(); } From bf860f4d0fc41c34abc418632d778cefcc8fc2c3 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 28 Aug 2024 12:56:01 -0400 Subject: [PATCH 366/910] test(signer): Add check that duplicate signers produce identical signatures and recovered pubkeys --- stacks-common/src/util/secp256k1.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 0274f41b025..034a5a4941a 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -38,7 +38,7 @@ use crate::util::hash::{hex_bytes, to_hex}; // per-thread Secp256k1 context thread_local!(static _secp256k1: Secp256k1 = Secp256k1::new()); -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Hash)] pub struct Secp256k1PublicKey { // serde is broken for secp256k1, so do it ourselves #[serde( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 877c6b71ae4..7c7412a6e4f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2982,6 +2982,7 @@ fn duplicate_signers() { // First two signers have same private key signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + let unique_signers = num_signers - 1; let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); let duplicate_pubkey_from_copy = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); @@ -3007,5 +3008,53 @@ fn duplicate_signers() { signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); + + let mut signer_accepted_responses = vec![]; + let start_polling = Instant::now(); + while start_polling.elapsed() <= timeout { + std::thread::sleep(Duration::from_secs(1)); + let messages = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()).ok() + }) + .filter_map(|message| match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { + info!("Message(accepted): {message:?}"); + Some(m) + } + _ => { + debug!("Message(ignored): {message:?}"); + None + } + }); + signer_accepted_responses.extend(messages); + } + + info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); + + // Pick a message hash + let (selected_sighash, _) = signer_accepted_responses + .iter() + .min_by_key(|(sighash, _)| *sighash) + .copied() + .expect("No `BlockResponse::Accepted` messages recieved"); + + // Filter only resonses for selected block and collect unique pubkeys and signatures + let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses + .into_iter() + .filter(|(hash, _)| *hash == selected_sighash) + .map(|(msg, sig)| { + let pubkey = Secp256k1PublicKey::recover_to_pubkey(msg.bits(), &sig) + .expect("Failed to recover pubkey"); + (pubkey, sig) + }) + .unzip(); + + assert_eq!(pubkeys.len(), unique_signers); + assert_eq!(signatures.len(), unique_signers); + signer_test.shutdown(); } From 3bd185f82481b070e3f8431aef38c92221a081f3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:32:11 -0700 Subject: [PATCH 367/910] Adding changes for 2.5.0.0.6 --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb6061cb9d4..2cca1d273ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,27 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed +## [2.5.0.0.7] + +### Added + +feat: Neon mock miner replay (#5060) +chore: add warn logs for block validate rejections (#5079) + +### Changed + +- bugfix/boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) +- Fix: Revert BurnchainHeaderHash serialization change (#5094) +- Feat/mock signing revamp (#5070) +- Fix block proposal rejection test (#5084) +- Fix/multi miner fixes jude (#5040) + +## [2.5.0.0.6] + +### Changed + +- If there is a getchunk/putchunk that fails due to a stale (or future) version NACK, the StackerDB sync state machine should immediately retry sync (#5066) + ## [2.5.0.0.5] ### Added From 88c95c6817f3412d5dc7b41c3d0dc8a9b3070ed3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:51:47 -0700 Subject: [PATCH 368/910] Update changelog for release --- CHANGELOG.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cca1d273ab..0f9a419f1d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,16 +21,16 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -feat: Neon mock miner replay (#5060) -chore: add warn logs for block validate rejections (#5079) +- Neon mock miner replay (#5060) +- Add warn logs for block validate rejections (#5079) ### Changed -- bugfix/boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) -- Fix: Revert BurnchainHeaderHash serialization change (#5094) -- Feat/mock signing revamp (#5070) +- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) +- Revert BurnchainHeaderHash serialization change (#5094) +- Mock signing revamp (#5070) - Fix block proposal rejection test (#5084) -- Fix/multi miner fixes jude (#5040) +- Multi miner fixes jude (#5040) ## [2.5.0.0.6] From 1584e9c422782e6e5e3f342f2e334f0e3d9e41b1 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:52:51 -0700 Subject: [PATCH 369/910] Update changelog for release --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f9a419f1d9..2add5b99f65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,15 +21,15 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- Neon mock miner replay (#5060) - Add warn logs for block validate rejections (#5079) +- Neon mock miner replay (#5060) ### Changed -- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) - Revert BurnchainHeaderHash serialization change (#5094) -- Mock signing revamp (#5070) +- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) - Fix block proposal rejection test (#5084) +- Mock signing revamp (#5070) - Multi miner fixes jude (#5040) ## [2.5.0.0.6] From 03b373d837b7b3dc2a38efd0b8304b2d1ea33090 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:49:59 -0400 Subject: [PATCH 370/910] chore: remove spurious deadlock condition arising from needlessly opening a transaction whenever we open the sortition DB --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3cf13a8a55e..808cb73c1f0 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3456,6 +3456,14 @@ impl SortitionDB { SortitionDB::apply_schema_9(&tx.deref(), epochs)?; tx.commit()?; } else if version == expected_version { + // this transaction is almost never needed + let validated_epochs = StacksEpoch::validate_epochs(epochs); + let existing_epochs = Self::get_stacks_epochs(self.conn())?; + if existing_epochs == validated_epochs { + return Ok(()); + } + + // epochs are out of date let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; tx.commit()?; From 3d56c79f28ecac584a4ac0ce843c0d2a26401859 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:50:23 -0400 Subject: [PATCH 371/910] fix: invoke both the epoch2 and nakamoto block announcement handlers when in the transition reward cycle --- stackslib/src/chainstate/coordinator/mod.rs | 31 ++++++++++----------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 72e44f981c0..2849b749047 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -543,20 +543,24 @@ impl< in_nakamoto_epoch: false, }; - let mut nakamoto_available = false; loop { - if nakamoto_available - || inst - .can_process_nakamoto() - .expect("FATAL: could not determine if Nakamoto is available") - { - // short-circuit to avoid gratuitous I/O - nakamoto_available = true; - if !inst.handle_comms_nakamoto(&comms, miner_status.clone()) { + let bits = comms.wait_on(); + if inst.in_subsequent_nakamoto_reward_cycle() { + debug!("Coordinator: in subsequent Nakamoto reward cycle"); + if !inst.handle_comms_nakamoto(bits, miner_status.clone()) { + return; + } + } else if inst.in_first_nakamoto_reward_cycle() { + debug!("Coordinator: in first Nakamoto reward cycle"); + if !inst.handle_comms_nakamoto(bits, miner_status.clone()) { + return; + } + if !inst.handle_comms_epoch2(bits, miner_status.clone()) { return; } } else { - if !inst.handle_comms_epoch2(&comms, miner_status.clone()) { + debug!("Coordinator: in epoch2 reward cycle"); + if !inst.handle_comms_epoch2(bits, miner_status.clone()) { return; } } @@ -566,13 +570,8 @@ impl< /// This is the Stacks 2.x coordinator loop body, which handles communications /// from the given `comms`. It returns `true` if the coordinator is still running, and `false` /// if not. - pub fn handle_comms_epoch2( - &mut self, - comms: &CoordinatorReceivers, - miner_status: Arc>, - ) -> bool { + pub fn handle_comms_epoch2(&mut self, bits: u8, miner_status: Arc>) -> bool { // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { signal_mining_blocked(miner_status.clone()); debug!("Received new stacks block notice"); From bcb0abc310ceb7f5813024eb0be8438dc7a2b4a7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:50:49 -0400 Subject: [PATCH 372/910] fix: remove buggy check to see if we're in nakamoto, and just rely on reward cycles --- .../chainstate/nakamoto/coordinator/mod.rs | 94 +++++++------------ 1 file changed, 35 insertions(+), 59 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index de884a8d9c9..cfbf18f0629 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -614,38 +614,11 @@ impl< B: BurnchainHeaderReader, > ChainsCoordinator<'a, T, N, U, CE, FE, B> { - /// Check to see if we're in the last of the 2.x epochs, and we have the first PoX anchor block - /// for epoch 3. - /// NOTE: the first block in epoch3 must be after the first block in the reward phase, so as - /// to ensure that the PoX stackers have been selected for this cycle. This means that we - /// don't proceed to process Nakamoto blocks until the reward cycle has begun. Also, the last - /// reward cycle of epoch2 _must_ be PoX so we have stackers who can sign. - pub fn can_process_nakamoto(&mut self) -> Result { - let canonical_sortition_tip = self - .canonical_sortition_tip - .clone() - .expect("FAIL: checking epoch status, but we don't have a canonical sortition tip"); - - let canonical_sn = - SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip)? - .expect("FATAL: canonical sortition tip has no sortition"); + /// Get the first nakamoto reward cycle + fn get_first_nakamoto_reward_cycle(&self) -> u64 { + let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) + .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - // what epoch are we in? - let cur_epoch = - SortitionDB::get_stacks_epoch(self.sortition_db.conn(), canonical_sn.block_height)? - .unwrap_or_else(|| { - panic!( - "BUG: no epoch defined at height {}", - canonical_sn.block_height - ) - }); - - if cur_epoch.epoch_id < StacksEpochId::Epoch30 { - return Ok(false); - } - - // in epoch3 - let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) .expect("FATAL: epoch3 not defined"); @@ -655,32 +628,36 @@ impl< .block_height_to_reward_cycle(epoch3.start_height) .expect("FATAL: epoch3 block height has no reward cycle"); - // NOTE(safety): this is not guaranteed to be the canonical best Stacks tip. - // However, it's safe to use here because we're only interested in loading up the first - // Nakamoto reward set, which uses the epoch2 anchor block selection algorithm. There will - // only be one such reward set in epoch2 rules, since it's tied to a specific block-commit - // (note that this is not true for reward sets generated in Nakamoto prepare phases). - let (local_best_stacks_ch, local_best_stacks_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortition_db.conn())?; - let local_best_stacks_tip = - StacksBlockId::new(&local_best_stacks_ch, &local_best_stacks_bhh); - - // only proceed if we have processed the _anchor block_ for this reward cycle. - let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain - .block_height_to_reward_cycle(canonical_sn.block_height) - .expect("FATAL: snapshot has no reward cycle"), - &canonical_sn.sortition_id, - &self.burnchain, - &mut self.chain_state_db, - &local_best_stacks_tip, - &self.sortition_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(false); - }; - Ok(rc_info.reward_cycle >= first_epoch3_reward_cycle) + first_epoch3_reward_cycle + } + + /// Get the current reward cycle + fn get_current_reward_cycle(&self) -> u64 { + let canonical_sortition_tip = self.canonical_sortition_tip.clone().unwrap_or_else(|| { + panic!("FAIL: checking epoch status, but we don't have a canonical sortition tip") + }); + + let canonical_sn = + SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip) + .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB: {:?}", &e)) + .unwrap_or_else(|| panic!("FATAL: canonical sortition tip has no sortition")); + + let cur_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"); + + cur_reward_cycle + } + + /// Are we in the first-ever Nakamoto reward cycle? + pub fn in_first_nakamoto_reward_cycle(&self) -> bool { + self.get_current_reward_cycle() == self.get_first_nakamoto_reward_cycle() + } + + /// Are we in the second or later Nakamoto reward cycle? + pub fn in_subsequent_nakamoto_reward_cycle(&self) -> bool { + self.get_current_reward_cycle() > self.get_first_nakamoto_reward_cycle() } /// This is the main loop body for the coordinator in epoch 3. @@ -688,11 +665,10 @@ impl< /// Returns false otherwise. pub fn handle_comms_nakamoto( &mut self, - comms: &CoordinatorReceivers, + bits: u8, miner_status: Arc>, ) -> bool { // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { signal_mining_blocked(miner_status.clone()); debug!("Received new Nakamoto stacks block notice"); From 0946b3dfe8b9808e0f38ba08c1adfb4f5e57ca28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:11 -0400 Subject: [PATCH 373/910] chore: doc fix --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 96c5be3d30e..0fa8688abb3 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1502,7 +1502,7 @@ pub struct NetworkResult { pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the burnchain tip (prefixed `rc_` for historical reasons) + /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, From e7a93c953e74734de75866a53286cd7e4e376fda Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:26 -0400 Subject: [PATCH 374/910] chore: log stacks tip height --- .../stacks-node/src/nakamoto_node/relayer.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 435305472a9..47016565876 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -812,6 +812,8 @@ impl RelayerThread { reason, )?; + debug!("Relayer: starting new tenure thread"); + let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) @@ -1036,6 +1038,25 @@ impl RelayerThread { return Err(NakamotoNodeError::StacksTipChanged); } + let Some(tip_height) = NakamotoChainState::get_block_header( + self.chainstate.db(), + &StacksBlockId::new(&tip_block_ch, &tip_block_bh), + ) + .map_err(|e| { + warn!( + "Relayer: failed to load tip {}/{}: {:?}", + &tip_block_ch, &tip_block_bh, &e + ); + NakamotoNodeError::ParentNotFound + })? + .map(|header| header.stacks_block_height) else { + warn!( + "Relayer: failed to load height for tip {}/{} (got None)", + &tip_block_ch, &tip_block_bh + ); + return Err(NakamotoNodeError::ParentNotFound); + }; + // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let txid = self @@ -1057,6 +1078,7 @@ impl RelayerThread { "Relayer: Submitted block-commit"; "tip_consensus_hash" => %tip_block_ch, "tip_block_hash" => %tip_block_bh, + "tip_height" => %tip_height, "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh), "txid" => %txid, ); From cd4b3cd74bff2cd0c7c44e7c47e2f5949b6f585a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:43 -0400 Subject: [PATCH 375/910] chore: add integration test to see that a follower can cold-boot into nakamoto --- .../src/tests/nakamoto_integrations.rs | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..ea431d9cd59 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3340,6 +3340,214 @@ fn follower_bootup() { follower_thread.join().unwrap(); } +/// This test boots a follower node using the block downloader, but the follower will be multiple +/// Nakamoto reward cycles behind. +#[test] +#[ignore] +fn follower_bootup_across_multiple_cycles() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.max_rbf = 10_000_000; + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // mine two reward cycles + for _ in 0..btc_regtest_controller + .get_burnchain() + .pox_constants + .reward_cycle_length + * 2 + { + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + } + + info!("Nakamoto miner has advanced two reward cycles"); + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + "block_height_pre_3_0" => block_height_pre_3_0 + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + // spawn follower + let mut follower_conf = naka_conf.clone(); + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.pox_sync_sample_secs = 30; + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + wait_for(300, || { + sleep_ms(1000); + let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { + return Ok(false); + }; + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + Ok( + follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash(), + ) + }) + .unwrap(); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} + #[test] #[ignore] fn stack_stx_burn_op_integration_test() { From 8825583e429447a44f3c9269544e0dc186040548 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:52:33 -0400 Subject: [PATCH 376/910] chore: add follower cold-boot integration test to CI --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d6..7da1fe010df 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -108,6 +108,7 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From dd98a2cc5591d722ef662976981da1eb926338cd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 23:22:33 -0400 Subject: [PATCH 377/910] fix: some tests don't have epoch3 defined, so don't panic while testing if that's the case --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index cfbf18f0629..309d02dd545 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -619,8 +619,15 @@ impl< let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) - .expect("FATAL: epoch3 not defined"); + let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) else { + // this is only reachable in tests + if cfg!(any(test, feature = "testing")) { + return u64::MAX; + } + else { + panic!("FATAL: epoch3 not defined"); + } + }; let epoch3 = &all_epochs[epoch_3_idx]; let first_epoch3_reward_cycle = self From 93a5c71de28d0cd1549ff11bab479690838a24be Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 23:34:47 -0400 Subject: [PATCH 378/910] fix: cargo fmt --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 309d02dd545..82977cadc1c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -619,12 +619,12 @@ impl< let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) else { + let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) + else { // this is only reachable in tests if cfg!(any(test, feature = "testing")) { return u64::MAX; - } - else { + } else { panic!("FATAL: epoch3 not defined"); } }; From fd9f569dc52bf4ef9139079c44d92bae986e7441 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:54:37 -0400 Subject: [PATCH 379/910] fix: handle rc_consensus_hash mismatch in one place --- stackslib/src/net/chat.rs | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 95d6fbac82a..4becf3891d3 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1917,25 +1917,14 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, + sortdb: &SortitionDB, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let local_peer = network.get_local_peer(); - let burnchain_view = network.get_chain_view(); - - // remote peer's Stacks chain tip is different from ours, meaning it might have a different - // stackerdb configuration view (and we won't be able to authenticate their chunks, and - // vice versa) - if burnchain_view.rc_consensus_hash != getchunkinv.rc_consensus_hash { - debug!( - "{:?}: NACK StackerDBGetChunkInv; {} != {}", - local_peer, &burnchain_view.rc_consensus_hash, &getchunkinv.rc_consensus_hash - ); - return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleView, - ))); - } - - Ok(network.make_StackerDBChunksInv_or_Nack(&getchunkinv.contract_id)) + Ok(network.make_StackerDBChunksInv_or_Nack( + sortdb, + &getchunkinv.contract_id, + &getchunkinv.rc_consensus_hash, + )) } /// Handle an inbound StackerDBGetChunkInv request. @@ -1943,10 +1932,12 @@ impl ConversationP2P { fn handle_stacker_db_getchunkinv( &mut self, network: &PeerNetwork, + sortdb: &SortitionDB, preamble: &Preamble, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let response = ConversationP2P::make_stacker_db_getchunkinv_response(network, getchunkinv)?; + let response = + ConversationP2P::make_stacker_db_getchunkinv_response(network, sortdb, getchunkinv)?; self.sign_and_reply( network.get_local_peer(), network.get_chain_view(), @@ -2363,7 +2354,7 @@ impl ConversationP2P { } } StacksMessageType::StackerDBGetChunkInv(ref getchunkinv) => { - self.handle_stacker_db_getchunkinv(network, &msg.preamble, getchunkinv) + self.handle_stacker_db_getchunkinv(network, sortdb, &msg.preamble, getchunkinv) } StacksMessageType::StackerDBGetChunk(ref getchunk) => { self.handle_stacker_db_getchunk(network, &msg.preamble, getchunk) From dcdb3aa2e963ff942dd8fa72009ac9ca35e3730d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:54:51 -0400 Subject: [PATCH 380/910] chore: document more connection options, and add maximum stackerdb message buffer size --- stackslib/src/net/connection.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 35779002793..0d4d5aafd6e 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -378,11 +378,18 @@ pub struct ConnectionOptions { pub max_microblock_push: u64, pub antientropy_retry: u64, pub antientropy_public: bool, + /// maximum number of Stacks 2.x BlocksAvailable messages that can be buffered before processing pub max_buffered_blocks_available: u64, + /// maximum number of Stacks 2.x MicroblocksAvailable that can be buffered before processing pub max_buffered_microblocks_available: u64, + /// maximum number of Stacks 2.x pushed Block messages we can buffer before processing pub max_buffered_blocks: u64, + /// maximum number of Stacks 2.x pushed Microblock messages we can buffer before processing pub max_buffered_microblocks: u64, + /// maximum number of pushed Nakamoto Block messages we can buffer before processing pub max_buffered_nakamoto_blocks: u64, + /// maximum number of pushed StackerDB chunk messages we can buffer before processing + pub max_buffered_stackerdb_chunks: u64, /// how often to query a remote peer for its mempool, in seconds pub mempool_sync_interval: u64, /// how many transactions to ask for in a mempool query @@ -522,6 +529,7 @@ impl std::default::Default for ConnectionOptions { max_buffered_blocks: 5, max_buffered_microblocks: 1024, max_buffered_nakamoto_blocks: 1024, + max_buffered_stackerdb_chunks: 4096, mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) From ecb28f6a9c2d1622cc2080f30a03fc5f6b502d3e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:55:40 -0400 Subject: [PATCH 381/910] chore: document NACK error codes, and add one for an unrecognized (future) StackerDB view --- stackslib/src/net/mod.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 96c5be3d30e..507a514144e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -91,7 +91,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, StacksHttpResponse, TipRequest, }; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{PeerNetwork, PendingMessages}; use crate::util_lib::bloom::{BloomFilter, BloomNodeHasher}; use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; @@ -1039,15 +1039,26 @@ pub struct NackData { pub error_code: u32, } pub mod NackErrorCodes { + /// A handshake is required before the protocol can proceed pub const HandshakeRequired: u32 = 1; + /// The protocol could not find a required burnchain block pub const NoSuchBurnchainBlock: u32 = 2; + /// The requester is sending too many requests pub const Throttled: u32 = 3; + /// The state the requester referenced referrs to a PoX fork we do not recognize pub const InvalidPoxFork: u32 = 4; + /// The message is inappropriate for this step of the protocol pub const InvalidMessage: u32 = 5; + /// The referenced StackerDB does not exist on this node pub const NoSuchDB: u32 = 6; + /// The referenced StackerDB chunk is out-of-date with respect to our replica pub const StaleVersion: u32 = 7; + /// The referenced StackerDB state view is out-of-date with respect to our replica pub const StaleView: u32 = 8; + /// The referenced StackerDB chunk is stale locally relative to the requested version pub const FutureVersion: u32 = 9; + /// The referenced StackerDB state view is stale locally relative to the requested version + pub const FutureView: u32 = 10; } #[derive(Debug, Clone, PartialEq)] @@ -1600,11 +1611,8 @@ impl NetworkResult { || self.has_stackerdb_chunks() } - pub fn consume_unsolicited( - &mut self, - unhandled_messages: HashMap>, - ) { - for (neighbor_key, messages) in unhandled_messages.into_iter() { + pub fn consume_unsolicited(&mut self, unhandled_messages: PendingMessages) { + for ((_event_id, neighbor_key), messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { match message.payload { StacksMessageType::Blocks(block_data) => { From 9857504789be8a8c9482edb4fe781d7732fe9da7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:56:02 -0400 Subject: [PATCH 382/910] feat: buffer messages to be retried when either the sortition view changes, or the stacks tip's tenure changes --- stackslib/src/net/p2p.rs | 71 +++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 72279e41e2e..9ffbda34c27 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -196,7 +196,7 @@ pub enum PeerNetworkWorkState { } pub type PeerMap = HashMap; -pub type PendingMessages = HashMap>; +pub type PendingMessages = HashMap<(usize, NeighborKey), Vec>; pub struct ConnectingPeer { socket: mio_net::TcpStream, @@ -416,9 +416,13 @@ pub struct PeerNetwork { /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent - /// chain view update. + /// burnchain view update. pub pending_messages: PendingMessages, + /// Pending messages (StackerDBPushChunk) that we can't process yet, but might be able + /// to process on a subsequent Stacks view update + pub pending_stacks_messages: PendingMessages, + // fault injection -- force disconnects fault_last_disconnect: u64, @@ -575,6 +579,7 @@ impl PeerNetwork { antientropy_start_reward_cycle: 0, pending_messages: PendingMessages::new(), + pending_stacks_messages: PendingMessages::new(), fault_last_disconnect: 0, @@ -1902,8 +1907,10 @@ impl PeerNetwork { "{:?}: Remove inventory state for Nakamoto {:?}", &self.local_peer, &nk ); - inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk, pubkh)); + inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk.clone(), pubkh)); } + self.pending_messages.remove(&(event_id, nk.clone())); + self.pending_stacks_messages.remove(&(event_id, nk.clone())); } match self.network { @@ -1922,7 +1929,6 @@ impl PeerNetwork { self.relay_handles.remove(&event_id); self.peers.remove(&event_id); - self.pending_messages.remove(&event_id); } /// Deregister by neighbor key @@ -4368,7 +4374,7 @@ impl PeerNetwork { sortdb: &SortitionDB, chainstate: &mut StacksChainState, ibd: bool, - ) -> Result>, net_error> { + ) -> Result { // update burnchain snapshot if we need to (careful -- it's expensive) let canonical_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = @@ -4407,8 +4413,6 @@ impl PeerNetwork { )?; } - let mut ret: HashMap> = HashMap::new(); - let (parent_stacks_tip, tenure_start_block_id) = if stacks_tip_changed { let tenure_start_block_id = if let Some(header) = NakamotoChainState::get_nakamoto_tenure_start_block_header( @@ -4576,12 +4580,45 @@ impl PeerNetwork { } // can't fail after this point - + let mut ret = PendingMessages::new(); if burnchain_tip_changed { // try processing previously-buffered messages (best-effort) + debug!( + "{:?}: handle unsolicited stacks messages: burnchain changed {} != {}, {} buffered", + self.get_local_peer(), + &self.burnchain_tip.consensus_hash, + &canonical_sn.consensus_hash, + self.pending_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()) + ); let buffered_messages = mem::replace(&mut self.pending_messages, HashMap::new()); - ret = - self.handle_unsolicited_messages(sortdb, chainstate, buffered_messages, ibd, false); + let unhandled = self.handle_unsolicited_sortition_messages( + sortdb, + chainstate, + buffered_messages, + ibd, + false, + ); + ret.extend(unhandled); + } + + if self.stacks_tip.consensus_hash != stacks_tip_ch { + // try processing previously-buffered messages (best-effort) + debug!( + "{:?}: handle unsolicited stacks messages: tenure changed {} != {}, {} buffered", + self.get_local_peer(), + &self.burnchain_tip.consensus_hash, + &canonical_sn.consensus_hash, + self.pending_stacks_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()) + ); + let buffered_stacks_messages = + mem::replace(&mut self.pending_stacks_messages, HashMap::new()); + let unhandled = + self.handle_unsolicited_stacks_messages(sortdb, buffered_stacks_messages, false); + ret.extend(unhandled); } // update cached stacks chain view for /v2/info and /v3/tenures/info @@ -4657,8 +4694,20 @@ impl PeerNetwork { ); self.deregister_peer(error_event); } + + // filter out unsolicited messages and buffer up ones that might become processable + let unhandled_messages = self.authenticate_unsolicited_messages(unsolicited_messages); + let unhandled_messages = self.handle_unsolicited_sortition_messages( + sortdb, + chainstate, + unhandled_messages, + ibd, + true, + ); + let unhandled_messages = - self.handle_unsolicited_messages(sortdb, chainstate, unsolicited_messages, ibd, true); + self.handle_unsolicited_stacks_messages(sortdb, unhandled_messages, true); + network_result.consume_unsolicited(unhandled_messages); // schedule now-authenticated inbound convos for pingback From 9f308aea7b397369c653321c56b285dd7e281ac7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:56:35 -0400 Subject: [PATCH 383/910] feat: consider buffering an unsolicited stackerdb pushed chunk if its rc_consensus_hash is potentially in the future --- stackslib/src/net/stackerdb/mod.rs | 77 +++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index d310998a194..901a0c00478 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -497,11 +497,16 @@ impl PeerNetwork { Ok(results) } - /// Create a StackerDBChunksInv, or a Nack if the requested DB isn't replicated here + /// Create a StackerDBChunksInv, or a Nack if the requested DB isn't replicated here. + /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, + sortdb: &SortitionDB, contract_id: &QualifiedContractIdentifier, + rc_consensus_hash: &ConsensusHash, ) -> StacksMessageType { + // N.B. check that the DB exists first, since we want to report StaleView only if the DB + // exists let slot_versions = match self.stackerdbs.get_slot_versions(contract_id) { Ok(versions) => versions, Err(e) => { @@ -517,6 +522,20 @@ impl PeerNetwork { } }; + // this DB exists, but is the view of this message recent? + if &self.get_chain_view().rc_consensus_hash != rc_consensus_hash { + // do we know about this consensus hash? + if let Ok(true) = + SortitionDB::has_block_snapshot_consensus(sortdb.conn(), rc_consensus_hash) + { + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); + } else { + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (local is potentially stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + return StacksMessageType::Nack(NackData::new(NackErrorCodes::FutureView)); + } + } + let num_outbound_replicas = self.count_outbound_stackerdb_replicas(contract_id) as u32; debug!( @@ -598,8 +617,11 @@ impl PeerNetwork { } /// Handle unsolicited StackerDBPushChunk messages. - /// Generate a reply handle for a StackerDBChunksInv to be sent to the remote peer, in which - /// the inventory vector is updated with this chunk's data. + /// Check to see that the message can be stored or buffered. + /// + /// Optionally, make a reply handle for a StackerDBChunksInv to be sent to the remote peer, in which + /// the inventory vector is updated with this chunk's data. Or, send a NACK if the chunk + /// cannot be buffered or stored. /// /// Note that this can happen *during* a StackerDB sync's execution, so be very careful about /// modifying a state machine's contents! The only modification possible here is to wakeup @@ -609,17 +631,30 @@ impl PeerNetwork { /// which this chunk arrived will have already bandwidth-throttled the remote peer, and because /// messages can be arbitrarily delayed (and bunched up) by the network anyway. /// - /// Return Ok(true) if we should store the chunk - /// Return Ok(false) if we should drop it. + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it either *won't* be valid + /// later, or if it can be stored right now. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. pub fn handle_unsolicited_StackerDBPushChunk( &mut self, + sortdb: &SortitionDB, event_id: usize, preamble: &Preamble, chunk_data: &StackerDBPushChunkData, - ) -> Result { - let mut payload = self.make_StackerDBChunksInv_or_Nack(&chunk_data.contract_id); + send_reply: bool, + ) -> Result<(bool, bool), net_error> { + let mut payload = self.make_StackerDBChunksInv_or_Nack( + sortdb, + &chunk_data.contract_id, + &chunk_data.rc_consensus_hash, + ); match payload { StacksMessageType::StackerDBChunkInv(ref mut data) => { + // this message corresponds to an existing DB, and comes from the same view of the + // stacks chain tip let stackerdb_config = if let Some(config) = self.get_stacker_db_configs().get(&chunk_data.contract_id) { @@ -630,7 +665,7 @@ impl PeerNetwork { "StackerDBChunk for {} ID {} is not available locally", &chunk_data.contract_id, chunk_data.chunk_data.slot_id ); - return Ok(false); + return Ok((false, false)); }; // sanity check @@ -640,7 +675,7 @@ impl PeerNetwork { &chunk_data.chunk_data, &data.slot_versions, )? { - return Ok(false); + return Ok((false, false)); } // patch inventory -- we'll accept this chunk @@ -654,10 +689,28 @@ impl PeerNetwork { } } } - _ => {} + StacksMessageType::Nack(ref nack_data) => { + if nack_data.error_code == NackErrorCodes::FutureView { + // chunk corresponds to a known DB but the view of the sender is potentially in + // the future. + // We should buffer this in case it becomes storable, but don't + // store it yet. + return Ok((true, false)); + } else { + return Ok((false, false)); + } + } + _ => { + // don't recognize the message, so don't buffer + return Ok((false, false)); + } + } + + if !send_reply { + return Ok((false, true)); } - // this is a reply to the pushed chunk + // this is a reply to the pushed chunk, and we can store it right now (so don't buffer it) let resp = self.sign_for_p2p_reply(event_id, preamble.seq, payload)?; let handle = self.send_p2p_message( event_id, @@ -665,6 +718,6 @@ impl PeerNetwork { self.connection_opts.neighbor_request_timeout, )?; self.add_relay_handle(event_id, handle); - Ok(true) + Ok((false, true)) } } From 6c53f9a408298090ea90ffb3cbc937cdbf8df529 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:57:13 -0400 Subject: [PATCH 384/910] chore: add test to verify that a Stacks node can receive buffered StackerDB pushed chunks --- stackslib/src/net/stackerdb/tests/sync.rs | 211 ++++++++++++++++++++++ 1 file changed, 211 insertions(+) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 9227eedecc7..e9474d9abfa 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -32,6 +32,7 @@ use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; use crate::net::p2p::PeerNetwork; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; @@ -829,6 +830,216 @@ fn test_stackerdb_push_relayer() { }) } +/// Verify that the relayer will push stackerdb chunks, AND, those chunks will get buffered if the +/// recipient has not yet processed the sortition. +/// Replica A has the data. +/// Replica B receives the data via StackerDB sync +/// Replica C receives the data from B's relayer pushes, but is not yet at the Stacks tip that A +/// and B are on. +/// Replica C processes them all when the Stacks tip advances +#[test] +fn test_stackerdb_push_relayer_late_chunks() { + with_timeout(600, move || { + std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 106); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 108); + let mut peer_3_config = TestPeerConfig::from_port(BASE_PORT + 110); + + peer_1_config.allowed = -1; + peer_2_config.allowed = -1; + peer_3_config.allowed = -1; + + // short-lived walks... + peer_1_config.connection_opts.walk_max_duration = 10; + peer_2_config.connection_opts.walk_max_duration = 10; + peer_3_config.connection_opts.walk_max_duration = 10; + + peer_3_config.connection_opts.disable_stackerdb_sync = true; + + // peer 1 crawls peer 2, and peer 2 crawls peer 1 and peer 3, and peer 3 crawls peer 2 + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); + + // set up stacker DBs for both peers + let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); + let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); + let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_3 = TestPeer::new(peer_3_config); + + // advance peers 1 and 2, but not 3 + let mut peer_1_nonce = 0; + let mut peer_2_nonce = 0; + let mut peer_3_nonce = 0; + peer_1.tenure_with_txs(&vec![], &mut peer_1_nonce); + peer_2.tenure_with_txs(&vec![], &mut peer_2_nonce); + + // sanity check -- peer 1 and 2 are at the same tip, but not 3 + let sn1 = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb().conn()).unwrap(); + let sn2 = SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb().conn()).unwrap(); + let sn3 = SortitionDB::get_canonical_burn_chain_tip(peer_3.sortdb().conn()).unwrap(); + assert_eq!(sn1.consensus_hash, sn2.consensus_hash); + assert_eq!(sn1.block_height, sn2.block_height); + + assert_ne!(sn1.consensus_hash, sn3.consensus_hash); + assert_ne!(sn2.consensus_hash, sn3.consensus_hash); + assert!(sn3.block_height < sn1.block_height); + assert!(sn3.block_height < sn2.block_height); + + let st1 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_1.sortdb().conn()).unwrap(); + let st2 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_2.sortdb().conn()).unwrap(); + let st3 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_3.sortdb().conn()).unwrap(); + + assert_eq!(st1, st2); + assert_ne!(st1, st3); + assert_ne!(st2, st3); + + // peer 1 gets the DB + setup_stackerdb(&mut peer_1, idx_1, true, 10); + setup_stackerdb(&mut peer_2, idx_2, false, 10); + setup_stackerdb(&mut peer_3, idx_2, false, 10); + + // verify that peer 1 got the data + let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); + assert_eq!(peer_1_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); + assert!(peer_1_db_chunks[i].1.len() > 0); + } + + // verify that peer 2 and 3 did NOT get the data + let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); + assert_eq!(peer_2_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); + assert!(peer_2_db_chunks[i].1.len() == 0); + } + + let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); + assert_eq!(peer_3_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); + assert!(peer_3_db_chunks[i].1.len() == 0); + } + + let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); + let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); + let peer_3_db_configs = peer_3.config.get_stacker_db_configs(); + + let mut i = 0; + let mut advanced_tenure = false; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + peer_3.network.stacker_db_configs = peer_3_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + let res_3 = peer_3.step_with_ibd(false); + + if let Ok(res) = res_1 { + check_sync_results(&res); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_2 { + check_sync_results(&res); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_3 { + check_sync_results(&res); + peer_3 + .relayer + .process_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_3 + .relayer + .process_pushed_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + let db1 = load_stackerdb(&peer_1, idx_1); + let db2 = load_stackerdb(&peer_2, idx_2); + let db3 = load_stackerdb(&peer_3, idx_3); + + if db1 == db2 && db2 == db3 { + break; + } + i += 1; + + debug!("StackerDB sync step {}", i); + + let num_pending = peer_3 + .network + .pending_stacks_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()); + debug!("peer_3.network.pending_stacks_messages: {}", num_pending); + + if num_pending >= 10 && !advanced_tenure { + debug!("======= Advancing peer 3 tenure ========"); + peer_3.tenure_with_txs(&vec![], &mut peer_3_nonce); + advanced_tenure = true; + } + } + + debug!("Completed stacker DB sync in {} step(s)", i); + }) +} + #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_10_chunks() { From fe41be243f653f8fab9d7261fc21eefe52930fbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:17 -0400 Subject: [PATCH 385/910] chore: API sync --- stackslib/src/net/chat.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 4becf3891d3..926340d7fef 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1917,11 +1917,11 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { Ok(network.make_StackerDBChunksInv_or_Nack( - sortdb, + chainstate, &getchunkinv.contract_id, &getchunkinv.rc_consensus_hash, )) @@ -1932,12 +1932,15 @@ impl ConversationP2P { fn handle_stacker_db_getchunkinv( &mut self, network: &PeerNetwork, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, preamble: &Preamble, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let response = - ConversationP2P::make_stacker_db_getchunkinv_response(network, sortdb, getchunkinv)?; + let response = ConversationP2P::make_stacker_db_getchunkinv_response( + network, + chainstate, + getchunkinv, + )?; self.sign_and_reply( network.get_local_peer(), network.get_chain_view(), @@ -2354,7 +2357,7 @@ impl ConversationP2P { } } StacksMessageType::StackerDBGetChunkInv(ref getchunkinv) => { - self.handle_stacker_db_getchunkinv(network, sortdb, &msg.preamble, getchunkinv) + self.handle_stacker_db_getchunkinv(network, chainstate, &msg.preamble, getchunkinv) } StacksMessageType::StackerDBGetChunk(ref getchunk) => { self.handle_stacker_db_getchunk(network, &msg.preamble, getchunk) From f3298c06bf11045b5bd725b2e4d09688830e7cc1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:30 -0400 Subject: [PATCH 386/910] chore: API sync --- stackslib/src/net/p2p.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 9ffbda34c27..f0693c10a0f 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4616,8 +4616,11 @@ impl PeerNetwork { ); let buffered_stacks_messages = mem::replace(&mut self.pending_stacks_messages, HashMap::new()); - let unhandled = - self.handle_unsolicited_stacks_messages(sortdb, buffered_stacks_messages, false); + let unhandled = self.handle_unsolicited_stacks_messages( + chainstate, + buffered_stacks_messages, + false, + ); ret.extend(unhandled); } @@ -4706,7 +4709,7 @@ impl PeerNetwork { ); let unhandled_messages = - self.handle_unsolicited_stacks_messages(sortdb, unhandled_messages, true); + self.handle_unsolicited_stacks_messages(chainstate, unhandled_messages, true); network_result.consume_unsolicited(unhandled_messages); From d295f45f734493fd12764fe0efe2bbf5aa896019 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:38 -0400 Subject: [PATCH 387/910] fix: treat a stackerdb chunk as potentially from the future if its rc_consensus_hash does not correspond to a processed Stacks block (but it may correspond to a sortition) --- stackslib/src/net/stackerdb/mod.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 901a0c00478..a2de124793c 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -501,7 +501,7 @@ impl PeerNetwork { /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, contract_id: &QualifiedContractIdentifier, rc_consensus_hash: &ConsensusHash, ) -> StacksMessageType { @@ -524,10 +524,13 @@ impl PeerNetwork { // this DB exists, but is the view of this message recent? if &self.get_chain_view().rc_consensus_hash != rc_consensus_hash { - // do we know about this consensus hash? - if let Ok(true) = - SortitionDB::has_block_snapshot_consensus(sortdb.conn(), rc_consensus_hash) - { + // is there a Stacks block (or tenure) with this consensus hash? + let tip_block_id = self.stacks_tip.block_id(); + if let Ok(Some(_)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + &rc_consensus_hash, + ) { debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); } else { @@ -640,14 +643,14 @@ impl PeerNetwork { /// *not* be processed. pub fn handle_unsolicited_StackerDBPushChunk( &mut self, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, event_id: usize, preamble: &Preamble, chunk_data: &StackerDBPushChunkData, send_reply: bool, ) -> Result<(bool, bool), net_error> { let mut payload = self.make_StackerDBChunksInv_or_Nack( - sortdb, + chainstate, &chunk_data.contract_id, &chunk_data.rc_consensus_hash, ); From 4b73229ff20c28b9c22b0699b9e066ae7e573dd6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:24:08 -0400 Subject: [PATCH 388/910] chore: API sync --- stackslib/src/net/tests/relay/epoch2x.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index e6a69f5dc02..b234460ddcc 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2592,7 +2592,9 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { ret }; let mut update_sortition = false; - for (event_id, pending) in peers[1].network.pending_messages.iter() { + for ((event_id, _neighbor_key), pending) in + peers[1].network.pending_messages.iter() + { debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); if pending.len() >= 1 { update_sortition = true; @@ -3086,7 +3088,7 @@ fn process_new_blocks_rejects_problematic_asts() { }, ]; let mut unsolicited = HashMap::new(); - unsolicited.insert(nk.clone(), bad_msgs.clone()); + unsolicited.insert((1, nk.clone()), bad_msgs.clone()); let mut network_result = NetworkResult::new( peer.network.stacks_tip.block_id(), From 0012f74b043e6cf13460a3e66adcb5c1013febfd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:03 -0400 Subject: [PATCH 389/910] chore: API sync --- stackslib/src/net/tests/relay/nakamoto.rs | 87 ++++++++++++++++++----- 1 file changed, 68 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 9691a628e20..fb9db70d5b6 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -388,6 +388,7 @@ fn test_buffer_data_message() { let (mut peer, _followers) = make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 5, bitvecs.clone(), 1); + let peer_nk = peer.to_neighbor().addr; let nakamoto_block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -472,43 +473,89 @@ fn test_buffer_data_message() { blocks: vec![nakamoto_block], }), ); + let stackerdb_chunk = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::StackerDBPushChunk(StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::parse( + "ST000000000000000000002AMW42H.signers-1-4", + ) + .unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + chunk_data: StackerDBChunkData { + slot_id: 0, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![1, 2, 3, 4, 5], + }, + }), + ); for _ in 0..peer.network.connection_opts.max_buffered_blocks_available { assert!(peer .network - .buffer_data_message(0, blocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, blocks_available.clone())); } assert!(!peer .network - .buffer_data_message(0, blocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, blocks_available.clone())); for _ in 0..peer .network .connection_opts .max_buffered_microblocks_available { + assert!(peer.network.buffer_sortition_data_message( + 0, + &peer_nk, + microblocks_available.clone() + )); + } + assert!(!peer.network.buffer_sortition_data_message( + 0, + &peer_nk, + microblocks_available.clone() + )); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks { assert!(peer .network - .buffer_data_message(0, microblocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, block.clone())); } assert!(!peer .network - .buffer_data_message(0, microblocks_available.clone())); - - for _ in 0..peer.network.connection_opts.max_buffered_blocks { - assert!(peer.network.buffer_data_message(0, block.clone())); - } - assert!(!peer.network.buffer_data_message(0, block.clone())); + .buffer_sortition_data_message(0, &peer_nk, block.clone())); for _ in 0..peer.network.connection_opts.max_buffered_microblocks { - assert!(peer.network.buffer_data_message(0, microblocks.clone())); + assert!(peer + .network + .buffer_sortition_data_message(0, &peer_nk, microblocks.clone())); } - assert!(!peer.network.buffer_data_message(0, microblocks.clone())); + assert!(!peer + .network + .buffer_sortition_data_message(0, &peer_nk, microblocks.clone())); for _ in 0..peer.network.connection_opts.max_buffered_nakamoto_blocks { - assert!(peer.network.buffer_data_message(0, nakamoto_block.clone())); + assert!(peer + .network + .buffer_sortition_data_message(0, &peer_nk, nakamoto_block.clone())); } - assert!(!peer.network.buffer_data_message(0, nakamoto_block.clone())); + assert!(!peer + .network + .buffer_sortition_data_message(0, &peer_nk, nakamoto_block.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_stackerdb_chunks { + assert!(peer + .network + .buffer_stacks_data_message(0, &peer_nk, stackerdb_chunk.clone())); + } + assert!(!peer + .network + .buffer_stacks_data_message(0, &peer_nk, stackerdb_chunk.clone())); } /// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead @@ -686,7 +733,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { blocks: blocks.clone(), }), ); - unsolicited.insert(peer_nk.clone(), vec![msg]); + unsolicited.insert((1, peer_nk.clone()), vec![msg]); if let Some(mut network_result) = network_result.take() { network_result.consume_unsolicited(unsolicited); @@ -882,7 +929,8 @@ fn test_buffer_nonready_nakamoto_blocks() { // pass this and other blocks to the p2p network's unsolicited message handler, // so they can be buffered up and processed. - let mut unsolicited_msgs: HashMap> = HashMap::new(); + let mut unsolicited_msgs: HashMap<(usize, NeighborKey), Vec> = + HashMap::new(); for (event_id, convo) in follower.network.peers.iter() { for blks in all_blocks.iter() { let msg = StacksMessage::from_chain_view( @@ -893,16 +941,17 @@ fn test_buffer_nonready_nakamoto_blocks() { blocks: blks.clone(), }), ); - - if let Some(msgs) = unsolicited_msgs.get_mut(event_id) { + let nk = convo.to_neighbor_key(); + if let Some(msgs) = unsolicited_msgs.get_mut(&(*event_id, nk)) { msgs.push(msg); } else { - unsolicited_msgs.insert(*event_id, vec![msg]); + unsolicited_msgs + .insert((*event_id, convo.to_neighbor_key()), vec![msg]); } } } - follower.network.handle_unsolicited_messages( + follower.network.handle_unsolicited_sortition_messages( &sortdb, &node.chainstate, unsolicited_msgs, From efc2563704e7a5a427512f42079229427fd4ed16 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:15 -0400 Subject: [PATCH 390/910] feat: add code path for buffering unsolicited messages that might become processable after the Stacks tenure changes --- stackslib/src/net/unsolicited.rs | 424 +++++++++++++++++++++++-------- 1 file changed, 317 insertions(+), 107 deletions(-) diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index f9ab5de87ea..d10a6ee3685 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -22,7 +22,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlockHeader}; -use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState}; +use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState, PendingMessages}; use crate::net::{ BlocksAvailableData, BlocksData, BlocksDatum, Error as NetError, MicroblocksData, NakamotoBlocksData, NeighborKey, Preamble, StacksMessage, StacksMessageType, @@ -62,7 +62,7 @@ impl PeerNetwork { else { test_debug!( "{:?}: No such neighbor event={}", - &self.local_peer, + &self.get_local_peer(), event_id ); return None; @@ -72,7 +72,7 @@ impl PeerNetwork { // drop -- a correct peer will have authenticated before sending this message test_debug!( "{:?}: Unauthenticated neighbor {:?}", - &self.local_peer, + &self.get_local_peer(), &remote_neighbor_key ); return None; @@ -116,7 +116,9 @@ impl PeerNetwork { Ok(None) => { debug!( "{:?}: We already know the inventory state in {} for {}", - &self.local_peer, outbound_neighbor_key, consensus_hash + &self.get_local_peer(), + outbound_neighbor_key, + consensus_hash ); return Ok(None); } @@ -124,12 +126,12 @@ impl PeerNetwork { // is this remote node simply ahead of us? if let Some(convo) = self.peers.get(&event_id) { if self.chain_view.burn_block_height < convo.burnchain_tip_height { - debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); + debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.get_local_peer(), consensus_hash, outbound_neighbor_key); return Err(NetError::NotFoundError); } } // not ahead of us -- it's a bad consensus hash - debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); + debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.get_local_peer(), consensus_hash, outbound_neighbor_key); return Ok(None); } Err(NetError::InvalidMessage) => { @@ -178,6 +180,7 @@ impl PeerNetwork { let mut blocks_data = 0; let mut microblocks_data = 0; let mut nakamoto_blocks_data = 0; + let mut stackerdb_chunks_data = 0; for stored_msg in msgs.iter() { match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { @@ -187,7 +190,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer BlocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_available + &self.get_local_peer(), event_id, blocks_available ); return false; } @@ -200,7 +203,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer MicroblocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_available + &self.get_local_peer(), event_id, microblocks_available ); return false; } @@ -212,7 +215,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer BlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_data + &self.get_local_peer(), event_id, blocks_data ); return false; } @@ -224,7 +227,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer MicroblocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_data + &self.get_local_peer(), event_id, microblocks_data ); return false; } @@ -236,7 +239,20 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer NakamotoBlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, nakamoto_blocks_data + &self.get_local_peer(), event_id, nakamoto_blocks_data + ); + return false; + } + } + StacksMessageType::StackerDBPushChunk(_) => { + stackerdb_chunks_data += 1; + if matches!(&msg.payload, StacksMessageType::StackerDBPushChunk(..)) + && stackerdb_chunks_data + >= self.connection_opts.max_buffered_stackerdb_chunks + { + debug!( + "{:?}: Cannot buffer StackerDBPushChunks from event {} -- already have {} buffered", + self.get_local_peer(), event_id, stackerdb_chunks_data ); return false; } @@ -253,12 +269,19 @@ impl PeerNetwork { /// If there is no space for the message, then silently drop it. /// Returns true if buffered. /// Returns false if not. - pub(crate) fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> bool { - let Some(msgs) = self.pending_messages.get(&event_id) else { - self.pending_messages.insert(event_id, vec![msg]); + pub(crate) fn buffer_sortition_data_message( + &mut self, + event_id: usize, + neighbor_key: &NeighborKey, + msg: StacksMessage, + ) -> bool { + let key = (event_id, neighbor_key.clone()); + let Some(msgs) = self.pending_messages.get(&key) else { + self.pending_messages.insert(key.clone(), vec![msg]); debug!( "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id + &self.get_local_peer(), + event_id ); return true; }; @@ -269,15 +292,71 @@ impl PeerNetwork { return false; } - if let Some(msgs) = self.pending_messages.get_mut(&event_id) { + let debug_msg = format!( + "{:?}: buffer message from event {} (buffered: {}): {:?}", + self.get_local_peer(), + event_id, + msgs.len() + 1, + &msg + ); + if let Some(msgs) = self.pending_messages.get_mut(&key) { // should always be reachable + debug!("{}", &debug_msg); msgs.push(msg); + } + true + } + + #[cfg_attr(test, mutants::skip)] + /// Buffer a message for re-processing once the stacks view updates. + /// If there is no space for the message, then silently drop it. + /// Returns true if buffered. + /// Returns false if not. + pub(crate) fn buffer_stacks_data_message( + &mut self, + event_id: usize, + neighbor_key: &NeighborKey, + msg: StacksMessage, + ) -> bool { + let key = (event_id, neighbor_key.clone()); + let Some(msgs) = self.pending_stacks_messages.get(&key) else { + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, &[], &msg) { + return false; + } debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, + "{:?}: buffer message from event {}: {:?}", + self.get_local_peer(), event_id, - msgs.len() + &msg + ); + self.pending_stacks_messages.insert(key.clone(), vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.get_local_peer(), + event_id ); + return true; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, msgs, &msg) { + return false; + } + + let debug_msg = format!( + "{:?}: buffer message from event {} (buffered: {}): {:?}", + self.get_local_peer(), + event_id, + msgs.len() + 1, + &msg + ); + if let Some(msgs) = self.pending_stacks_messages.get_mut(&key) { + // should always be reachable + debug!("{}", &debug_msg); + msgs.push(msg); } true } @@ -341,7 +420,7 @@ impl PeerNetwork { debug!( "{:?}: Process BlocksAvailable from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), &outbound_neighbor_key, new_blocks.available.len() ); @@ -361,7 +440,7 @@ impl PeerNetwork { } Err(NetError::NotFoundError) => { if buffer { - debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.get_local_peer(), &consensus_hash); to_buffer = true; } continue; @@ -369,7 +448,11 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + &self.get_local_peer(), + &consensus_hash, + &block_hash, + &outbound_neighbor_key, + &e ); continue; } @@ -408,7 +491,7 @@ impl PeerNetwork { // advance straight to download state if we're in inv state if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } self.have_data_to_download = true; } @@ -453,7 +536,7 @@ impl PeerNetwork { debug!( "{:?}: Process MicroblocksAvailable from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key, new_mblocks.available.len() ); @@ -473,7 +556,7 @@ impl PeerNetwork { } Err(NetError::NotFoundError) => { if buffer { - debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.get_local_peer(), &consensus_hash); to_buffer = true; } continue; @@ -481,7 +564,11 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + &self.get_local_peer(), + &consensus_hash, + &block_hash, + &outbound_neighbor_key, + &e ); continue; } @@ -516,7 +603,7 @@ impl PeerNetwork { // advance straight to download state if we're in inv state if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.get_local_peer(), mblock_sortition_height); } self.have_data_to_download = true; } @@ -551,7 +638,7 @@ impl PeerNetwork { debug!( "{:?}: Process BlocksData from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key_opt .clone() .or_else(|| { self.check_peer_authenticated(event_id) }), @@ -570,7 +657,7 @@ impl PeerNetwork { if buffer { debug!( "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, + &self.get_local_peer(), &consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( @@ -582,7 +669,7 @@ impl PeerNetwork { } else { debug!( "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, + &self.get_local_peer(), &consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( @@ -596,7 +683,9 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, consensus_hash, &e + &self.get_local_peer(), + consensus_hash, + &e ); continue; } @@ -605,7 +694,8 @@ impl PeerNetwork { if !sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, consensus_hash + &self.get_local_peer(), + consensus_hash ); continue; } @@ -613,7 +703,7 @@ impl PeerNetwork { if sn.winning_stacks_block_hash != block.block_hash() { info!( "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", - &self.local_peer, + &self.get_local_peer(), block.block_hash(), sn.winning_stacks_block_hash, sn.sortition @@ -667,7 +757,7 @@ impl PeerNetwork { debug!( "{:?}: Process MicroblocksData from {:?} for {} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key_opt.or_else(|| { self.check_peer_authenticated(event_id) }), &new_microblocks.index_anchor_block, new_microblocks.microblocks.len() @@ -677,20 +767,22 @@ impl PeerNetwork { match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { Ok(Some(_)) => { // yup; can process now - debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); + debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.get_local_peer(), &new_microblocks.index_anchor_block); !buffer } Ok(None) => { if buffer { debug!( "{:?}: Will buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block + &self.get_local_peer(), + &new_microblocks.index_anchor_block ); true } else { debug!( "{:?}: Will not buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block + &self.get_local_peer(), + &new_microblocks.index_anchor_block ); false } @@ -698,7 +790,9 @@ impl PeerNetwork { Err(e) => { warn!( "{:?}: Failed to get header hashes for {:?}: {:?}", - &self.local_peer, &new_microblocks.index_anchor_block, &e + &self.get_local_peer(), + &new_microblocks.index_anchor_block, + &e ); false } @@ -811,7 +905,7 @@ impl PeerNetwork { { debug!( "{:?}: Aleady have Nakamoto block {}", - &self.local_peer, + &self.get_local_peer(), &nakamoto_block.block_id() ); return false; @@ -850,7 +944,7 @@ impl PeerNetwork { ) -> bool { debug!( "{:?}: Process NakamotoBlocksData from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), &remote_neighbor_key_opt, nakamoto_blocks.blocks.len() ); @@ -860,7 +954,7 @@ impl PeerNetwork { if self.is_nakamoto_block_bufferable(sortdb, chainstate, nakamoto_block) { debug!( "{:?}: Will buffer unsolicited NakamotoBlocksData({}) ({})", - &self.local_peer, + &self.get_local_peer(), &nakamoto_block.block_id(), &nakamoto_block.header.consensus_hash, ); @@ -905,7 +999,12 @@ impl PeerNetwork { /// Handle an unsolicited message, with either the intention of just processing it (in which /// case, `buffer` will be `false`), or with the intention of not only processing it, but also /// determining if it can be bufferred and retried later (in which case, `buffer` will be - /// `true`). + /// `true`). This applies to messages that can be reprocessed after the next sortition (not + /// the next Stacks tenure) + /// + /// This code gets called with `buffer` set to true when the message is first received. If + /// this method returns (true, x), then this code gets called with the same message a + /// subsequent time when the sortition changes (and in that case, `buffer` will be false). /// /// Returns (true, x) if we should buffer the message and try processing it again later. /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid @@ -914,12 +1013,11 @@ impl PeerNetwork { /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. /// Returns (x, false) if we should *not* forward the message to the relayer, because it will /// *not* be processed. - fn handle_unsolicited_message( + fn handle_unsolicited_sortition_message( &mut self, sortdb: &SortitionDB, chainstate: &StacksChainState, event_id: usize, - preamble: &Preamble, payload: &StacksMessageType, ibd: bool, buffer: bool, @@ -984,54 +1082,78 @@ impl PeerNetwork { (to_buffer, true) } + _ => (false, true), + } + } + + #[cfg_attr(test, mutants::skip)] + /// Handle an unsolicited message, with either the intention of just processing it (in which + /// case, `buffer` will be `false`), or with the intention of not only processing it, but also + /// determining if it can be bufferred and retried later (in which case, `buffer` will be + /// `true`). This applies to messages that can be reprocessed after the next Stacks tenure. + /// + /// This code gets called with `buffer` set to true when the message is first received. If + /// this method returns (true, x), then this code gets called with the same message a + /// subsequent time when the sortition changes (and in that case, `buffer` will be false). + /// + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid + /// later. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. + fn handle_unsolicited_stacks_message( + &mut self, + chainstate: &mut StacksChainState, + event_id: usize, + preamble: &Preamble, + payload: &StacksMessageType, + buffer: bool, + ) -> (bool, bool) { + match payload { StacksMessageType::StackerDBPushChunk(ref data) => { - match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { - Ok(x) => { - // don't buffer, but do reject if invalid - (false, x) - } - Err(e) => { + // N.B. send back a reply if we're calling to buffer, since this would be the first + // time we're seeing this message (instead of a subsequent time on follow-up + // processing). + let (can_buffer, can_store) = self + .handle_unsolicited_StackerDBPushChunk( + chainstate, event_id, preamble, data, buffer, + ) + .unwrap_or_else(|e| { info!( - "{:?}: failed to handle unsolicited {:?}: {:?}", - &self.local_peer, payload, &e + "{:?}: failed to handle unsolicited {:?} when buffer = {}: {:?}", + self.get_local_peer(), + payload, + buffer, + &e ); (false, false) - } + }); + if buffer && can_buffer && !can_store { + debug!( + "{:?}: Buffering {:?} to retry on next sortition", + self.get_local_peer(), + &payload + ); } + (can_buffer, can_store) } _ => (false, true), } } - #[cfg_attr(test, mutants::skip)] - /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. - /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the - /// relayer can do something useful with them. - /// - /// Invalid messages are dropped silently, with an log message. - /// - /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent - /// call if the handler for it deems the message valid. - /// - /// If `buffer` is false, then if the message handler deems the message valid, it will be - /// forwraded to the relayer. - /// - /// Returns the messages to be forward to the relayer, keyed by sender. - pub fn handle_unsolicited_messages( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, + /// Authenticate unsolicited messages -- find the address of the neighbor that sent them. + pub fn authenticate_unsolicited_messages( + &self, unsolicited: HashMap>, - ibd: bool, - buffer: bool, - ) -> HashMap> { - let mut unhandled: HashMap> = HashMap::new(); - for (event_id, messages) in unsolicited.into_iter() { + ) -> PendingMessages { + unsolicited.into_iter().filter_map(|(event_id, messages)| { if messages.len() == 0 { // no messages for this event - continue; + return None; } - if buffer && self.check_peer_authenticated(event_id).is_none() { + if self.check_peer_authenticated(event_id).is_none() { if cfg!(test) && self .connection_opts @@ -1039,14 +1161,11 @@ impl PeerNetwork { { test_debug!( "{:?}: skip unsolicited message authentication", - &self.local_peer + &self.get_local_peer() ); } else { - // do not buffer messages from unknown peers - // (but it's fine to process messages that were previosuly buffered, since the peer - // may have since disconnected) debug!("Will not handle unsolicited messages from unauthenticated or dead event {}", event_id); - continue; + return None; } }; let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { @@ -1054,62 +1173,153 @@ impl PeerNetwork { } else { debug!( "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", - &self.local_peer, + &self.get_local_peer(), event_id, messages.len() ); - continue; + return None; }; + Some(((event_id, neighbor_key), messages)) + }) + .collect() + } - debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); - - for message in messages.into_iter() { + #[cfg_attr(test, mutants::skip)] + /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. + /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the + /// relayer can do something useful with them. + /// + /// This applies only to messages that might be processable after the next sortition. It does + /// *NOT* apply to messages that might be processable after the next tenure. + /// + /// Invalid messages are dropped silently, with an log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + /// + /// Returns messages we could not buffer, keyed by sender and event ID. This can be fed + /// directly into `handle_unsolicited_stacks_messages()` + pub fn handle_unsolicited_sortition_messages( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + mut unsolicited: PendingMessages, + ibd: bool, + buffer: bool, + ) -> HashMap<(usize, NeighborKey), Vec> { + unsolicited.retain(|(event_id, neighbor_key), messages| { + debug!("{:?}: Process {} unsolicited sortition-bound messages from {:?}", &self.get_local_peer(), messages.len(), neighbor_key; "buffer" => %buffer); + messages.retain(|message| { if buffer && !self.can_buffer_data_message( - event_id, - self.pending_messages.get(&event_id).unwrap_or(&vec![]), + *event_id, + self.pending_messages.get(&(*event_id, neighbor_key.clone())).unwrap_or(&vec![]), &message, ) { - // asked to buffer, but we don't have space - continue; + // unable to store this due to quota being exceeded + return false; } if !buffer { debug!( - "{:?}: Re-try handling buffered message {} from {:?}", - &self.local_peer, + "{:?}: Re-try handling buffered sortition-bound message {} from {:?}", + &self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); } - let (to_buffer, relay) = self.handle_unsolicited_message( + let (to_buffer, relay) = self.handle_unsolicited_sortition_message( sortdb, chainstate, - event_id, - &message.preamble, + *event_id, &message.payload, ibd, buffer, ); if buffer && to_buffer { - self.buffer_data_message(event_id, message); - } else if relay { + self.buffer_sortition_data_message(*event_id, neighbor_key, message.clone()); + return false; + } + if relay { // forward to relayer for processing debug!( "{:?}: Will forward message {} from {:?} to relayer", - &self.local_peer, + &self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); - if let Some(msgs) = unhandled.get_mut(&neighbor_key) { - msgs.push(message); - } else { - unhandled.insert(neighbor_key.clone(), vec![message]); - } } + true + }); + messages.len() > 0 + }); + unsolicited + } + + #[cfg_attr(test, mutants::skip)] + /// Handle unsolicited and unhandled messages returned by + /// `handle_unsolicited_sortition_messages()`, to see if any of them could be processed at the + /// start of the next Stacks tenure. That is, the `unsolicited` map contains messages that + /// came from authenticated peers and do not exceed buffer quotas. + /// + /// Invalid messages are dropped silently, with a log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + /// + /// Returns messages we could not buffer, keyed by sender. + pub fn handle_unsolicited_stacks_messages( + &mut self, + chainstate: &mut StacksChainState, + mut unsolicited: PendingMessages, + buffer: bool, + ) -> HashMap<(usize, NeighborKey), Vec> { + unsolicited.retain(|(event_id, neighbor_key), messages| { + if messages.len() == 0 { + // no messages for this node + return false; } - } - unhandled + debug!("{:?}: Process {} unsolicited tenure-bound messages from {:?}", &self.get_local_peer(), messages.len(), &neighbor_key; "buffer" => %buffer); + messages.retain(|message| { + if !buffer { + debug!( + "{:?}: Re-try handling buffered tenure-bound message {} from {:?}", + &self.get_local_peer(), + &message.payload.get_message_description(), + neighbor_key + ); + } + let (to_buffer, relay) = self.handle_unsolicited_stacks_message( + chainstate, + *event_id, + &message.preamble, + &message.payload, + buffer, + ); + if buffer && to_buffer { + self.buffer_stacks_data_message(*event_id, neighbor_key, message.clone()); + return false; + } + if relay { + // forward to relayer for processing + debug!( + "{:?}: Will forward message {} from {:?} to relayer", + &self.get_local_peer(), + &message.payload.get_message_description(), + &neighbor_key + ); + } + true + }); + messages.len() > 0 + }); + unsolicited } } From 1e1ae4d314628062afa38f7031d998c9e9cdddcd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:38 -0400 Subject: [PATCH 391/910] chore: add fault injection to slow down burnchain block processing in order to test this branch end-to-end --- .../src/burnchains/bitcoin_regtest_controller.rs | 14 ++++++++++++++ testnet/stacks-node/src/config.rs | 8 ++++++++ 2 files changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 32d590dd396..4a4f0cad8c8 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -606,9 +606,23 @@ impl BitcoinRegtestController { received_at: Instant::now(), }; + let received = self + .chain_tip + .as_ref() + .map(|tip| tip.block_snapshot.block_height) + .unwrap_or(0) + == burnchain_tip.block_snapshot.block_height; self.chain_tip = Some(burnchain_tip.clone()); debug!("Done receiving blocks"); + if self.config.burnchain.fault_injection_burnchain_block_delay > 0 && received { + info!( + "Fault injection: delaying burnchain blocks by {} milliseconds", + self.config.burnchain.fault_injection_burnchain_block_delay + ); + sleep_ms(self.config.burnchain.fault_injection_burnchain_block_delay); + } + Ok((burnchain_tip, burnchain_height)) } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf5..e4751a10108 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1440,6 +1440,9 @@ pub struct BurnchainConfig { pub wallet_name: String, pub ast_precheck_size_height: Option, pub affirmation_overrides: HashMap, + /// fault injection to simulate a slow burnchain peer. + /// Delay burnchain block downloads by the given number of millseconds + pub fault_injection_burnchain_block_delay: u64, } impl BurnchainConfig { @@ -1479,6 +1482,7 @@ impl BurnchainConfig { wallet_name: "".to_string(), ast_precheck_size_height: None, affirmation_overrides: HashMap::new(), + fault_injection_burnchain_block_delay: 0, } } pub fn get_rpc_url(&self, wallet: Option) -> String { @@ -1573,6 +1577,7 @@ pub struct BurnchainConfigFile { pub wallet_name: Option, pub ast_precheck_size_height: Option, pub affirmation_overrides: Option>, + pub fault_injection_burnchain_block_delay: Option, } impl BurnchainConfigFile { @@ -1785,6 +1790,9 @@ impl BurnchainConfigFile { .pox_prepare_length .or(default_burnchain_config.pox_prepare_length), affirmation_overrides, + fault_injection_burnchain_block_delay: self + .fault_injection_burnchain_block_delay + .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), }; if let BitcoinNetworkType::Mainnet = config.get_bitcoin_network().1 { From 3cedc0d5134ea2e61d0a6223b667f98015e80b03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 15:59:40 -0400 Subject: [PATCH 392/910] fix: fix failing integration test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..c0d05938570 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3942,6 +3942,9 @@ fn forked_tenure_is_ignored() { info!("Starting Tenure C."); + // force the timestamp to be different + sleep_ms(2000); + // Submit a block commit op for tenure C. // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -3973,6 +3976,7 @@ fn forked_tenure_is_ignored() { let block_c = blocks.last().unwrap(); info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash()); info!("Tenure C last block: {}", &block_c.block_id); + assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); // Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A assert_eq!( From 45c9c2cc5ca86671405a3edf67aa5c8859eca63b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 16:00:33 -0400 Subject: [PATCH 393/910] chore: fix comment --- stackslib/src/net/stackerdb/tests/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index e9474d9abfa..f45e3acb93e 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -862,7 +862,7 @@ fn test_stackerdb_push_relayer_late_chunks() { peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); - // set up stacker DBs for both peers + // set up stacker DBs for all peers let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); From 1f67900e512e7723a7e53059c2197cf4703e71d5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 22:12:14 -0400 Subject: [PATCH 394/910] fix: fix timed-out unit test --- stackslib/src/net/stackerdb/sync.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 53a5e13e487..32d7a7e37ed 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -729,7 +729,9 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; @@ -846,7 +848,9 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; @@ -983,7 +987,9 @@ impl StackerDBSync { &self.smart_contract_id, data.error_code ); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } else if data.error_code == NackErrorCodes::StaleVersion { // try again immediately, without throttling @@ -1129,7 +1135,9 @@ impl StackerDBSync { &naddr, data.error_code ); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; From 0cca6e163bdef46846242295890a523352ecc76d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 09:41:52 -0400 Subject: [PATCH 395/910] Add a sleep to ensure that the proposed stacks block has a different timestamp than its parent Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..680fa42843d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -711,6 +711,7 @@ pub fn next_block_and_wait_for_commits( (0..commits_before.len()).map(|_| None).collect(); let mut commit_sent_time: Vec> = (0..commits_before.len()).map(|_| None).collect(); + sleep_ms(2000); // Make sure that the proposed stacks block has a different timestamp than its parent next_block_and(btc_controller, timeout_secs, || { for i in 0..commits_submitted.len() { let commits_sent = commits_submitted[i].load(Ordering::SeqCst); @@ -739,6 +740,7 @@ pub fn next_block_and_wait_for_commits( .as_ref() .ok_or("TEST-ERROR: Processed time wasn't set")?; if commits_sent <= commits_before[i] { + info!("NO COMMITS"); return Ok(false); } let commit_sent_time = commit_sent_time[i] @@ -746,22 +748,28 @@ pub fn next_block_and_wait_for_commits( .ok_or("TEST-ERROR: Processed time wasn't set")?; // try to ensure the commit was sent after the block was processed if commit_sent_time > block_processed_time { + info!("COMMIT NOT SENT AFTER BLOCK PROCESSED TIME"); continue; } // if two commits have been sent, one of them must have been after if commits_sent >= commits_before[i] + 2 { + info!("MORE THAN ENOUGH COMMITS"); continue; } // otherwise, just timeout if the commit was sent and its been long enough // for a new commit pass to have occurred if block_processed_time.elapsed() > Duration::from_secs(10) { + info!("TIMEOUT COMMIT"); continue; } + info!("CONDITIONS OF COMMIT CHECK NOT MET"); return Ok(false); } else { + info!("NO BLOCK PROCESSED IN COMMIT CHECK"); return Ok(false); } } + info!("ALL CONDITIONS MET IN COMMIT CHECK"); Ok(true) }) } From bd1fa7a0cc1b3bc6d8b85d546cc93ab3db45a535 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 14:18:41 -0400 Subject: [PATCH 396/910] chore: `test_debug!` -> `debug!` --- .../burn/operations/leader_key_register.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index b892f7efd8d..0a3c0057a6b 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -136,32 +136,30 @@ impl LeaderKeyRegisterOp { let num_outputs = tx.num_recipients(); if num_inputs == 0 { - test_debug!( + debug!( "Invalid tx: inputs: {}, outputs: {}", - num_inputs, - num_outputs, + num_inputs, num_outputs, ); return Err(op_error::InvalidInput); } if num_outputs < 1 { - test_debug!( + debug!( "Invalid tx: inputs: {}, outputs: {}", - num_inputs, - num_outputs + num_inputs, num_outputs ); return Err(op_error::InvalidInput); } if tx.opcode() != Opcodes::LeaderKeyRegister as u8 { - test_debug!("Invalid tx: invalid opcode {}", tx.opcode()); + debug!("Invalid tx: invalid opcode {}", tx.opcode()); return Err(op_error::InvalidInput); } let data = match LeaderKeyRegisterOp::parse_data(&tx.data()) { Some(data) => data, None => { - test_debug!("Invalid tx data"); + debug!("Invalid tx data"); return Err(op_error::ParseError); } }; From f680c3168f0b9ce25a48d8de6d58b7f7933be9cb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 14:24:29 -0400 Subject: [PATCH 397/910] refactor: combine `if` statements --- .../chainstate/burn/operations/leader_key_register.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 0a3c0057a6b..44402adc0c8 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -135,7 +135,7 @@ impl LeaderKeyRegisterOp { let num_inputs = tx.num_signers(); let num_outputs = tx.num_recipients(); - if num_inputs == 0 { + if num_inputs == 0 || num_outputs < 1 { debug!( "Invalid tx: inputs: {}, outputs: {}", num_inputs, num_outputs, @@ -143,14 +143,6 @@ impl LeaderKeyRegisterOp { return Err(op_error::InvalidInput); } - if num_outputs < 1 { - debug!( - "Invalid tx: inputs: {}, outputs: {}", - num_inputs, num_outputs - ); - return Err(op_error::InvalidInput); - } - if tx.opcode() != Opcodes::LeaderKeyRegister as u8 { debug!("Invalid tx: invalid opcode {}", tx.opcode()); return Err(op_error::InvalidInput); From 5780503c2fe7b723a78aab4d39fc1b9d34b1705a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 15:49:15 -0400 Subject: [PATCH 398/910] fix: ensure that key register ops have change output --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 32d590dd396..145e73a3897 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -889,7 +889,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, - false, + true, // key register op requires change output to exist )?; increment_btc_ops_sent_counter(); @@ -1466,7 +1466,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, - true, // only block commit op requires change output to exist + true, // block commit op requires change output to exist )?; let serialized_tx = SerializedTx::new(tx.clone()); From 0508758cc26cba42c149b2f1e906e3ea8eb58964 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 19:13:14 -0400 Subject: [PATCH 399/910] Add a monitor-signers cli command for polling signers stackerdb messages every x number of seconds Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 17 +++ stacks-signer/src/client/stacks_client.rs | 38 +++++-- stacks-signer/src/main.rs | 130 +++++++++++++++++++++- stacks-signer/src/runloop.rs | 40 +------ stacks-signer/src/signerdb.rs | 13 +-- stacks-signer/src/v0/signer.rs | 22 ++-- 6 files changed, 194 insertions(+), 66 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 74e2cd2344c..c83239828b1 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; +use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; @@ -97,6 +98,8 @@ pub enum Command { GenerateVote(GenerateVoteArgs), /// Verify the vote for a specified SIP against a public key and vote info VerifyVote(VerifyVoteArgs), + /// Verify signer signatures by checking stackerdb slots contain the correct data + MonitorSigners(MonitorSignersArgs), } /// Basic arguments for all cyrptographic and stacker-db functionality @@ -258,6 +261,20 @@ impl TryFrom for Vote { } } +#[derive(Parser, Debug, Clone)] +/// Arguments for the MonitorSigners command +pub struct MonitorSignersArgs { + /// The Stacks node to connect to + #[arg(long)] + pub host: SocketAddr, + /// Whether the node is mainnet. Default is true + #[arg(long, default_value = "true")] + pub mainnet: bool, + /// Set the polling interval in seconds. Default is 60 seconds. + #[arg(long, short, default_value = "60")] + pub interval: u64, +} + #[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914bd..b6e9c8a3819 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,4 @@ -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -19,7 +19,7 @@ use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -56,6 +56,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; +use super::SignerSlotID; use crate::client::{retry_with_exponential_backoff, ClientError}; use crate::config::GlobalConfig; use crate::runloop::RewardCycleInfo; @@ -158,7 +159,7 @@ impl StacksClient { } /// Helper function that attempts to deserialize a clarity hext string as a list of signer slots and their associated number of signer slots - pub fn parse_signer_slots( + fn parse_signer_slots( &self, value: ClarityValue, ) -> Result, ClientError> { @@ -180,6 +181,29 @@ impl StacksClient { Ok(signer_slots) } + /// Get the stackerdb signer slots for a specific reward cycle + pub fn get_parsed_signer_slots( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { + let signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); + // Get the signer writers from the stacker-db to find the signer slot id + let stackerdb_signer_slots = + self.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; + let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); + for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { + signer_slot_ids.insert( + address, + SignerSlotID( + u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), + ), + ); + } + Ok(signer_slot_ids) + } + /// Get the vote for a given round, reward cycle, and signer address pub fn get_vote_for_aggregate_public_key( &self, @@ -541,13 +565,13 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { + Err(backoff::Error::transient(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); - return Err(backoff::Error::permanent(ClientError::RequestFailure( + Err(backoff::Error::permanent(ClientError::RequestFailure( status, - ))); + ))) } }; let stackers_response = diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 184876373bd..f0f907bfd94 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,22 +26,30 @@ extern crate serde; extern crate serde_json; extern crate toml; +use std::collections::HashMap; use std::io::{self, Write}; +use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; -use clarity::types::chainstate::StacksPublicKey; +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::slog_debug; -use stacks_common::debug; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::{debug, info, warn}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, - GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, + GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, + VerifyVoteArgs, }; +use stacks_signer::client::StacksClient; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -188,6 +196,119 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { valid_vote } +fn handle_monitor_signers(args: MonitorSignersArgs) { + info!("Monitoring signers stackerdb..."); + let interval_ms = args.interval * 1000; + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle + args.host, + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + let mut reward_cycle = stacks_client + .get_current_reward_cycle_info() + .unwrap() + .reward_cycle; + let mut contract_name = + NakamotoSigners::make_signers_db_name(reward_cycle, MessageSlotID::BlockResponse.to_u32()); + let mut contract_id = boot_code_id(contract_name.as_str(), args.mainnet); + let mut session = stackerdb_session(&args.host.to_string(), contract_id); + let mut signers_slots = stacks_client + .get_parsed_signer_slots(reward_cycle) + .expect("Failed to get signer slots"); + let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); + for (signer_address, slot_id) in signers_slots.iter() { + signers_addresses.insert(*slot_id, *signer_address); + } + let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); + + // Poll stackerdb slots every 200 ms to check for new mock signatures. + let mut last_messages = HashMap::with_capacity(slot_ids.len()); + let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + loop { + let mut missing_signers = Vec::with_capacity(slot_ids.len()); + let mut stale_signers = Vec::with_capacity(slot_ids.len()); + + let next_reward_cycle = stacks_client + .get_current_reward_cycle_info() + .unwrap() + .reward_cycle; + if next_reward_cycle != reward_cycle { + reward_cycle = next_reward_cycle; + contract_name = NakamotoSigners::make_signers_db_name( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + ); + contract_id = boot_code_id(contract_name.as_str(), args.mainnet); + session = stackerdb_session(&args.host.to_string(), contract_id); + signers_slots = stacks_client + .get_parsed_signer_slots(reward_cycle) + .expect("Failed to get signer slots"); + slot_ids = signers_slots.values().map(|value| value.0).collect(); + last_messages = HashMap::with_capacity(slot_ids.len()); + } + let new_messages: Vec> = session + .get_latest_chunks(&slot_ids) + .expect("Failed to get latest signer messages") + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + for ((signer_address, slot_id), signer_message_opt) in + signers_slots.clone().into_iter().zip(new_messages) + { + if let Some(signer_message) = signer_message_opt { + if let Some(last_message) = last_messages.get(&slot_id) { + if last_message == &signer_message { + continue; + } + match last_message { + SignerMessage::MockSignature(_) => { + if args.mainnet { + warn!("Mock signature found for signer {signer_address} in slot {slot_id} but we are on mainnet"); + continue; + } + } + SignerMessage::BlockResponse(_) => { + if args.mainnet { + warn!("Block response found for signer {signer_address} in slot {slot_id} but we are on mainnet"); + continue; + } + } + _ => { + warn!("Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; + } + } + } + last_messages.insert(slot_id, signer_message); + signer_last_write_time.insert(slot_id, std::time::Instant::now()); + } else { + missing_signers.push(signer_address); + } + } + if !missing_signers.is_empty() { + warn!( + "Missing messages for {} signers: {missing_signers:?}", + missing_signers.len() + ); + } + for (slot_id, last_write_time) in signer_last_write_time.iter() { + if last_write_time.elapsed().as_secs() > 600 { + let address = signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } + } + if !stale_signers.is_empty() { + warn!("The following {} signers have not written to stackerdb in over 10 minutes: {stale_signers:?}", stale_signers.len()); + } + sleep_ms(interval_ms); + } +} + fn main() { let cli = Cli::parse(); @@ -224,6 +345,7 @@ fn main() { Command::VerifyVote(args) => { handle_verify_vote(args, true); } + Command::MonitorSigners(args) => handle_monitor_signers(args), } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b5..a9901c354f7 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -18,19 +18,16 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; -use blockstack_lib::util_lib::boot::boot_code_id; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; use wsts::common::MerkleRoot; use wsts::state_machine::OperationResult; use crate::chainstate::SortitionsView; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient}; +use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; @@ -246,30 +243,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(Some(entries)) } - /// Get the stackerdb signer slots for a specific reward cycle - pub fn get_parsed_signer_slots( - &self, - stacks_client: &StacksClient, - reward_cycle: u64, - ) -> Result, ClientError> { - let signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = - boot_code_id(SIGNERS_NAME, self.config.network.is_mainnet()); - // Get the signer writers from the stacker-db to find the signer slot id - let stackerdb_signer_slots = - stacks_client.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; - let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); - for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { - signer_slot_ids.insert( - address, - SignerSlotID( - u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), - ), - ); - } - Ok(signer_slot_ids) - } /// Get a signer configuration for a specific reward cycle from the stacks node fn get_signer_config( &mut self, @@ -284,8 +257,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo return Err(e); } }; - let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) - { + let signer_slot_ids = match self.stacks_client.get_parsed_signer_slots(reward_cycle) { Ok(x) => x, Err(e) => { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); @@ -431,10 +403,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { self.refresh_signer_config(current_reward_cycle); } - if is_in_next_prepare_phase { - if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { - self.refresh_signer_config(next_reward_cycle); - } + if is_in_next_prepare_phase + && !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) + { + self.refresh_signer_config(next_reward_cycle); } self.cleanup_stale_signers(current_reward_cycle); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2d2e9cc22a7..b920b8b176b 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -490,7 +490,7 @@ impl SignerDb { .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, @@ -534,7 +534,7 @@ impl SignerDb { let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; let args = params![ block_sighash, - serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + serde_json::to_string(signature).map_err(DBError::SerializationError)? ]; debug!("Inserting block signature."; @@ -590,7 +590,7 @@ impl SignerDb { if broadcasted == 0 { return Ok(None); } - Ok(u64::try_from(broadcasted).ok()) + Ok(Some(broadcasted)) } } @@ -880,15 +880,12 @@ mod tests { assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); db.add_block_signature(&block_id, &sig1).unwrap(); - assert_eq!( - db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone()] - ); + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![sig1]); db.add_block_signature(&block_id, &sig2).unwrap(); assert_eq!( db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone(), sig2.clone()] + vec![sig1, sig2] ); } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f51..cb1d4f8a6de 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -244,7 +244,7 @@ impl From for Signer { .signer_entries .signer_ids .iter() - .map(|(addr, id)| (*id, addr.clone())) + .map(|(addr, id)| (*id, *addr)) .collect(); let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); @@ -262,7 +262,7 @@ impl From for Signer { signer_id, addr ); }; - (addr.clone(), key_ids.len()) + (*addr, key_ids.len()) }) .collect(); @@ -484,7 +484,7 @@ impl Signer { ( BlockResponse::accepted(signer_signature_hash, signature), block_info, - Some(signature.clone()), + Some(signature), ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -550,7 +550,7 @@ impl Signer { addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) }); u32::try_from(signing_weight) @@ -607,16 +607,12 @@ impl Signer { }; // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + let stacker_address = StacksAddress::p2pkh(true, &public_key); - // it only matters that the address hash bytes match - stacker_address.bytes == addr.bytes - }) - .is_some(); + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive invalid signature {signature}. Will not store."); From 5b6a96989fc1f6790793b52a19944053fb7fb318 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:04:05 -0400 Subject: [PATCH 400/910] Add epoch gating and make message timeout configurable Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 3 +++ stacks-signer/src/main.rs | 51 +++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index c83239828b1..9707a847d63 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -273,6 +273,9 @@ pub struct MonitorSignersArgs { /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] pub interval: u64, + /// Max age in seconds before a signer message is considered stale. Default is 600 seconds. + #[arg(long, short, default_value = "600")] + pub max_age: u64, } #[derive(Clone, Debug, PartialEq)] diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index f0f907bfd94..78ce3d2d6bc 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -35,6 +35,7 @@ use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_ke use clap::Parser; use clarity::codec::read_next; use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; @@ -197,7 +198,6 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { - info!("Monitoring signers stackerdb..."); let interval_ms = args.interval * 1000; let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle @@ -205,6 +205,12 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { "FOO".to_string(), // We don't care about authorized paths. Just accessing public info args.mainnet, ); + + let epoch = stacks_client.get_node_epoch().unwrap(); + assert!( + epoch >= StacksEpochId::Epoch25, + "Cannot Monitor Signers before Epoch 2.5. Current epoch: {epoch:?}", + ); let mut reward_cycle = stacks_client .get_current_reward_cycle_info() .unwrap() @@ -225,7 +231,13 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { // Poll stackerdb slots every 200 ms to check for new mock signatures. let mut last_messages = HashMap::with_capacity(slot_ids.len()); let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", + args.interval, args.max_age + ); loop { + info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); @@ -234,6 +246,10 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .unwrap() .reward_cycle; if next_reward_cycle != reward_cycle { + info!( + "Reward cycle has changed from {} to {}.", + reward_cycle, next_reward_cycle + ); reward_cycle = next_reward_cycle; contract_name = NakamotoSigners::make_signers_db_name( reward_cycle, @@ -263,23 +279,16 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { if last_message == &signer_message { continue; } - match last_message { - SignerMessage::MockSignature(_) => { - if args.mainnet { - warn!("Mock signature found for signer {signer_address} in slot {slot_id} but we are on mainnet"); - continue; - } - } - SignerMessage::BlockResponse(_) => { - if args.mainnet { - warn!("Block response found for signer {signer_address} in slot {slot_id} but we are on mainnet"); - continue; - } - } - _ => { - warn!("Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } + if epoch == StacksEpochId::Epoch25 + && !matches!(last_message, SignerMessage::MockSignature(_)) + { + warn!("Epoch 2.5 Signers Should be Sending MockSignature messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; + } else if epoch > StacksEpochId::Epoch25 + && !matches!(last_message, SignerMessage::BlockResponse(_)) + { + warn!("Nakamoto Signers Should be Sending BlockResponse messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; } } last_messages.insert(slot_id, signer_message); @@ -290,12 +299,12 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } if !missing_signers.is_empty() { warn!( - "Missing messages for {} signers: {missing_signers:?}", + "Missing expected messages for {} signers: {missing_signers:?}", missing_signers.len() ); } for (slot_id, last_write_time) in signer_last_write_time.iter() { - if last_write_time.elapsed().as_secs() > 600 { + if last_write_time.elapsed().as_secs() > args.max_age { let address = signers_addresses .get(slot_id) .expect("BUG: missing signer address for given slot id"); @@ -303,7 +312,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } if !stale_signers.is_empty() { - warn!("The following {} signers have not written to stackerdb in over 10 minutes: {stale_signers:?}", stale_signers.len()); + warn!("The following {} signers have not written to stackerdb in over {} seconds: {stale_signers:?}", stale_signers.len(), args.max_age); } sleep_ms(interval_ms); } From 80fb2935ee3cbf76697fed61c1778cd8dddce0bf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:07:35 -0400 Subject: [PATCH 401/910] Update signer addresses per reward cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 78ce3d2d6bc..4998c704f38 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -262,6 +262,9 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .expect("Failed to get signer slots"); slot_ids = signers_slots.values().map(|value| value.0).collect(); last_messages = HashMap::with_capacity(slot_ids.len()); + for (signer_address, slot_id) in signers_slots.iter() { + signers_addresses.insert(*slot_id, *signer_address); + } } let new_messages: Vec> = session .get_latest_chunks(&slot_ids) From 79b72e4af8afb1ea5445e93daf20ac104a52d108 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:25:17 -0400 Subject: [PATCH 402/910] Clear last_messages and last_updates every reward cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 52 ++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 4998c704f38..51359da7ce8 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -30,7 +30,6 @@ use std::collections::HashMap; use std::io::{self, Write}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; @@ -215,10 +214,6 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .get_current_reward_cycle_info() .unwrap() .reward_cycle; - let mut contract_name = - NakamotoSigners::make_signers_db_name(reward_cycle, MessageSlotID::BlockResponse.to_u32()); - let mut contract_id = boot_code_id(contract_name.as_str(), args.mainnet); - let mut session = stackerdb_session(&args.host.to_string(), contract_id); let mut signers_slots = stacks_client .get_parsed_signer_slots(reward_cycle) .expect("Failed to get signer slots"); @@ -228,10 +223,18 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); - // Poll stackerdb slots every 200 ms to check for new mock signatures. + // Poll stackerdb slots to check for new expected messages let mut last_messages = HashMap::with_capacity(slot_ids.len()); - let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + let mut last_updates = HashMap::with_capacity(slot_ids.len()); + let mut session = stackerdb_session( + &args.host.to_string(), + NakamotoSigners::make_signers_db_contract_id( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + args.mainnet, + ), + ); info!( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", args.interval, args.max_age @@ -247,26 +250,31 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .reward_cycle; if next_reward_cycle != reward_cycle { info!( - "Reward cycle has changed from {} to {}.", + "Reward cycle has changed from {} to {}. Updating stacker db session.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; - contract_name = NakamotoSigners::make_signers_db_name( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - ); - contract_id = boot_code_id(contract_name.as_str(), args.mainnet); - session = stackerdb_session(&args.host.to_string(), contract_id); signers_slots = stacks_client .get_parsed_signer_slots(reward_cycle) .expect("Failed to get signer slots"); slot_ids = signers_slots.values().map(|value| value.0).collect(); - last_messages = HashMap::with_capacity(slot_ids.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + session = stackerdb_session( + &args.host.to_string(), + NakamotoSigners::make_signers_db_contract_id( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + args.mainnet, + ), + ); + + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); } - let new_messages: Vec> = session + let new_messages: Vec<_> = session .get_latest_chunks(&slot_ids) .expect("Failed to get latest signer messages") .into_iter() @@ -295,7 +303,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } last_messages.insert(slot_id, signer_message); - signer_last_write_time.insert(slot_id, std::time::Instant::now()); + last_updates.insert(slot_id, std::time::Instant::now()); } else { missing_signers.push(signer_address); } @@ -306,8 +314,8 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { missing_signers.len() ); } - for (slot_id, last_write_time) in signer_last_write_time.iter() { - if last_write_time.elapsed().as_secs() > args.max_age { + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > args.max_age { let address = signers_addresses .get(slot_id) .expect("BUG: missing signer address for given slot id"); @@ -315,7 +323,11 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } if !stale_signers.is_empty() { - warn!("The following {} signers have not written to stackerdb in over {} seconds: {stale_signers:?}", stale_signers.len(), args.max_age); + warn!( + "No new updates from {} signers in over {} seconds: {stale_signers:?}", + stale_signers.len(), + args.max_age + ); } sleep_ms(interval_ms); } From 090a4a6b6ce433dccaa557baf86c73165717d1c6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 22:17:07 -0400 Subject: [PATCH 403/910] Retry on errors Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 4 +++ stacks-signer/src/main.rs | 55 ++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 32951d7990e..cfc3bfcb27a 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -23,6 +23,7 @@ use std::time::Duration; use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; +use libsigner::RPCError; use libstackerdb::Error as StackerDBError; use slog::slog_debug; pub use stackerdb::*; @@ -94,6 +95,9 @@ pub enum ClientError { /// A successful sortition's info response should be parseable into a SortitionState #[error("A successful sortition's info response should be parseable into a SortitionState")] UnexpectedSortitionInfo, + /// An RPC libsigner error occurred + #[error("A libsigner RPC error occurred: {0}")] + RPCError(#[from] RPCError), } /// Retry a function F with an exponential backoff and notification on transient failure diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 51359da7ce8..a6357ba760c 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -40,16 +40,16 @@ use clarity::vm::types::QualifiedContractIdentifier; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_info, slog_warn}; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, info, warn}; +use stacks_common::{debug, error, info, warn}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::StacksClient; +use stacks_signer::client::{ClientError, StacksClient}; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -197,7 +197,6 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { - let interval_ms = args.interval * 1000; let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle args.host, @@ -205,18 +204,30 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { args.mainnet, ); - let epoch = stacks_client.get_node_epoch().unwrap(); - assert!( - epoch >= StacksEpochId::Epoch25, - "Cannot Monitor Signers before Epoch 2.5. Current epoch: {epoch:?}", - ); - let mut reward_cycle = stacks_client - .get_current_reward_cycle_info() - .unwrap() - .reward_cycle; - let mut signers_slots = stacks_client - .get_parsed_signer_slots(reward_cycle) - .expect("Failed to get signer slots"); + loop { + if let Err(e) = start_monitoring_signers(&stacks_client, &args) { + error!( + "Error occurred monitoring signers: {:?}. Waiting and trying again.", + e + ); + sleep_ms(1000); + } + } +} + +fn start_monitoring_signers( + stacks_client: &StacksClient, + args: &MonitorSignersArgs, +) -> Result<(), ClientError> { + let interval_ms = args.interval * 1000; + let epoch = stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature( + "Signer monitoring is only supported for Epoch 2.5 and later".into(), + )); + } + let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; + let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -244,19 +255,14 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); - let next_reward_cycle = stacks_client - .get_current_reward_cycle_info() - .unwrap() - .reward_cycle; + let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { info!( "Reward cycle has changed from {} to {}. Updating stacker db session.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; - signers_slots = stacks_client - .get_parsed_signer_slots(reward_cycle) - .expect("Failed to get signer slots"); + signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -275,8 +281,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { last_updates.clear(); } let new_messages: Vec<_> = session - .get_latest_chunks(&slot_ids) - .expect("Failed to get latest signer messages") + .get_latest_chunks(&slot_ids)? .into_iter() .map(|chunk_opt| { chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) From e829158752cd6a40143054c943e4c19481437276 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 00:21:31 -0400 Subject: [PATCH 404/910] Cleanup logs and set mainnet default to false Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 7 +- stacks-signer/src/client/stacks_client.rs | 5 +- stacks-signer/src/main.rs | 105 +++++++++++------- stacks-signer/src/tests/chainstate.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 10 +- testnet/stacks-node/src/tests/signer/v1.rs | 11 +- 6 files changed, 74 insertions(+), 66 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 9707a847d63..4006e0a7d14 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; -use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; @@ -266,9 +265,9 @@ impl TryFrom for Vote { pub struct MonitorSignersArgs { /// The Stacks node to connect to #[arg(long)] - pub host: SocketAddr, - /// Whether the node is mainnet. Default is true - #[arg(long, default_value = "true")] + pub host: String, + /// Whether the node is mainnet. Default is false. + #[arg(long, default_value = "false")] pub mainnet: bool, /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6e9c8a3819..a303c59bd12 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,5 @@ use std::collections::{HashMap, VecDeque}; + // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -14,8 +15,6 @@ use std::collections::{HashMap, VecDeque}; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::net::SocketAddr; - use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ @@ -107,7 +106,7 @@ impl StacksClient { /// Create a new signer StacksClient with the provided private key, stacks node host endpoint, version, and auth password pub fn new( stacks_private_key: StacksPrivateKey, - node_host: SocketAddr, + node_host: String, auth_password: String, mainnet: bool, ) -> Self { diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a6357ba760c..75893cb2a0e 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -29,7 +29,6 @@ extern crate toml; use std::collections::HashMap; use std::io::{self, Write}; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; @@ -197,9 +196,11 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { + // Verify that the host is a valid URL + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle - args.host, + args.host.clone(), "FOO".to_string(), // We don't care about authorized paths. Just accessing public info args.mainnet, ); @@ -238,27 +239,22 @@ fn start_monitoring_signers( let mut last_messages = HashMap::with_capacity(slot_ids.len()); let mut last_updates = HashMap::with_capacity(slot_ids.len()); - let mut session = stackerdb_session( - &args.host.to_string(), - NakamotoSigners::make_signers_db_contract_id( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - args.mainnet, - ), - ); + let contract = MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle); + let mut session = stackerdb_session(&args.host.to_string(), contract.clone()); info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", args.interval, args.max_age ); loop { info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); + let mut unexpected_messages = HashMap::new(); let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { info!( - "Reward cycle has changed from {} to {}. Updating stacker db session.", + "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; @@ -269,11 +265,7 @@ fn start_monitoring_signers( } session = stackerdb_session( &args.host.to_string(), - NakamotoSigners::make_signers_db_contract_id( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - args.mainnet, - ), + MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), ); // Clear the last messages and signer last update times. @@ -295,17 +287,14 @@ fn start_monitoring_signers( if last_message == &signer_message { continue; } - if epoch == StacksEpochId::Epoch25 - && !matches!(last_message, SignerMessage::MockSignature(_)) - { - warn!("Epoch 2.5 Signers Should be Sending MockSignature messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } else if epoch > StacksEpochId::Epoch25 - && !matches!(last_message, SignerMessage::BlockResponse(_)) - { - warn!("Nakamoto Signers Should be Sending BlockResponse messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } + } + if (epoch == StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::MockSignature(_))) + || (epoch > StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::BlockResponse(_))) + { + unexpected_messages.insert(signer_address, (signer_message, slot_id)); + continue; } last_messages.insert(slot_id, signer_message); last_updates.insert(slot_id, std::time::Instant::now()); @@ -313,12 +302,6 @@ fn start_monitoring_signers( missing_signers.push(signer_address); } } - if !missing_signers.is_empty() { - warn!( - "Missing expected messages for {} signers: {missing_signers:?}", - missing_signers.len() - ); - } for (slot_id, last_update_time) in last_updates.iter() { if last_update_time.elapsed().as_secs() > args.max_age { let address = signers_addresses @@ -327,12 +310,56 @@ fn start_monitoring_signers( stale_signers.push(*address); } } - if !stale_signers.is_empty() { - warn!( - "No new updates from {} signers in over {} seconds: {stale_signers:?}", - stale_signers.len(), - args.max_age + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + && !signers_addresses.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + signers_addresses.len() ); + } else { + if !missing_signers.is_empty() { + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + warn!( + "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); + "signers" => formatted_signers + ); + } + if !stale_signers.is_empty() { + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + signers_addresses.len(), + args.max_age; + "signers" => formatted_signers + ); + } + if !unexpected_messages.is_empty() { + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} Epoch {epoch} signer(s).", + unexpected_messages.len(), + signers_addresses.len(); + "signers" => formatted_signers + ); + } } sleep_ms(interval_ms); } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d0c7f1d9f3f..b552e8a0a0f 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -92,7 +92,7 @@ fn setup_test_environment( let stacks_client = StacksClient::new( StacksPrivateKey::new(), - SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).into(), + SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).to_string(), "FOO".into(), false, ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..5140f54597a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; -use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; @@ -5176,16 +5175,9 @@ fn signer_chainstate() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted.clone()); - let socket = naka_conf - .node - .rpc_bind - .to_socket_addrs() - .unwrap() - .next() - .unwrap(); let signer_client = stacks_signer::client::StacksClient::new( StacksPrivateKey::from_seed(&[0, 1, 2, 3]), - socket, + naka_conf.node.rpc_bind.clone(), naka_conf .connection_options .auth_token diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 6e9ed71f365..4134eb7c02e 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashSet; -use std::net::ToSocketAddrs; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -248,15 +247,7 @@ impl SignerTest { } fn generate_invalid_transactions(&self) -> Vec { - let host = self - .running_nodes - .conf - .node - .rpc_bind - .to_socket_addrs() - .unwrap() - .next() - .unwrap(); + let host = self.running_nodes.conf.node.rpc_bind.clone(); // Get the signer indices let reward_cycle = self.get_current_reward_cycle(); From cb9946e02a5ee977c2198a3d3e5955052a432c82 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 09:37:00 -0400 Subject: [PATCH 405/910] Add additional log messages to know the total reward set Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 75893cb2a0e..0fd5543bb48 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -245,6 +245,9 @@ fn start_monitoring_signers( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", args.interval, args.max_age ); + info!("Confirming messages for {} registered signers", signers_addresses.len(); + "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); loop { info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); @@ -260,9 +263,13 @@ fn start_monitoring_signers( reward_cycle = next_reward_cycle; signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); + signers_addresses.clear(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + info!("Confirming messages for {} registered signers", signers_addresses.len(); + "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); session = stackerdb_session( &args.host.to_string(), MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), @@ -328,7 +335,7 @@ fn start_monitoring_signers( .join(", "); warn!( "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } if !stale_signers.is_empty() { @@ -342,7 +349,7 @@ fn start_monitoring_signers( stale_signers.len(), signers_addresses.len(), args.max_age; - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } if !unexpected_messages.is_empty() { @@ -357,7 +364,7 @@ fn start_monitoring_signers( "Unexpected messages from {} of {} Epoch {epoch} signer(s).", unexpected_messages.len(), signers_addresses.len(); - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } } @@ -401,7 +408,9 @@ fn main() { Command::VerifyVote(args) => { handle_verify_vote(args, true); } - Command::MonitorSigners(args) => handle_monitor_signers(args), + Command::MonitorSigners(args) => { + handle_monitor_signers(args); + } } } From edaf99dba8af259150b531f454421fc9d7e25575 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 10:17:04 -0400 Subject: [PATCH 406/910] Change max_age default to 20 minutes Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4006e0a7d14..37e9218a9d3 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -272,8 +272,8 @@ pub struct MonitorSignersArgs { /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] pub interval: u64, - /// Max age in seconds before a signer message is considered stale. Default is 600 seconds. - #[arg(long, short, default_value = "600")] + /// Max age in seconds before a signer message is considered stale. Default is 1200 seconds. + #[arg(long, short, default_value = "1200")] pub max_age: u64, } From 7be5a9147b2061c417d0729b62276323524ef243 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 11:49:46 -0400 Subject: [PATCH 407/910] Print the public keys of the signer addresses as well Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 68 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 0fd5543bb48..18b9e4277c9 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -32,7 +32,7 @@ use std::io::{self, Write}; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; -use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; @@ -229,6 +229,16 @@ fn start_monitoring_signers( } let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; + let entries = stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| panic!("No signers found for the current reward cycle {reward_cycle}")); + let mut signers_keys = HashMap::with_capacity(entries.len()); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); + signers_keys.insert(stacks_address, public_key); + } let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -256,6 +266,8 @@ fn start_monitoring_signers( let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { + signers_addresses.clear(); + signers_keys.clear(); info!( "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", reward_cycle, next_reward_cycle @@ -263,10 +275,20 @@ fn start_monitoring_signers( reward_cycle = next_reward_cycle; signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); - signers_addresses.clear(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + let entries = stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); + signers_keys.insert(stacks_address, public_key); + } info!("Confirming messages for {} registered signers", signers_addresses.len(); "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") ); @@ -333,9 +355,21 @@ fn start_monitoring_signers( .map(|addr| format!("{addr}")) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } if !stale_signers.is_empty() { @@ -344,12 +378,24 @@ fn start_monitoring_signers( .map(|addr| format!("{addr}")) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "No new updates from {} of {} signer(s) in over {} seconds", stale_signers.len(), signers_addresses.len(), args.max_age; - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } if !unexpected_messages.is_empty() { @@ -360,11 +406,23 @@ fn start_monitoring_signers( }) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "Unexpected messages from {} of {} Epoch {epoch} signer(s).", unexpected_messages.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } } From d37e1873e00c061cf8ab86ba00be76e88555f35f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 16:37:56 -0400 Subject: [PATCH 408/910] Cleanup signer monitor function Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 461 ++++++++++++++++++++++---------------- 1 file changed, 266 insertions(+), 195 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 18b9e4277c9..64e5c48a96e 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -48,7 +48,7 @@ use stacks_signer::cli::{ GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::{ClientError, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -197,16 +197,9 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { fn handle_monitor_signers(args: MonitorSignersArgs) { // Verify that the host is a valid URL - url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( - StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle - args.host.clone(), - "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); - + let mut signer_monitor = SignerMonitor::new(args); loop { - if let Err(e) = start_monitoring_signers(&stacks_client, &args) { + if let Err(e) = signer_monitor.start() { error!( "Error occurred monitoring signers: {:?}. Waiting and trying again.", e @@ -216,107 +209,259 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } -fn start_monitoring_signers( - stacks_client: &StacksClient, - args: &MonitorSignersArgs, -) -> Result<(), ClientError> { - let interval_ms = args.interval * 1000; - let epoch = stacks_client.get_node_epoch()?; - if epoch < StacksEpochId::Epoch25 { - return Err(ClientError::UnsupportedStacksFeature( - "Signer monitoring is only supported for Epoch 2.5 and later".into(), - )); +struct SignerMonitor { + stacks_client: StacksClient, + cycle_state: RewardCycleState, + args: MonitorSignersArgs, +} + +#[derive(Debug, Default, Clone)] +struct RewardCycleState { + signers_slots: HashMap, + signers_keys: HashMap, + signers_addresses: HashMap, + slot_ids: Vec, + /// Reward cycle is not known until the first successful call to the node + reward_cycle: Option, +} + +impl SignerMonitor { + fn new(args: MonitorSignersArgs) -> Self { + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to read + args.host.clone(), + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + Self { + stacks_client, + cycle_state: RewardCycleState::default(), + args, + } } - let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; - let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; - let entries = stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| panic!("No signers found for the current reward cycle {reward_cycle}")); - let mut signers_keys = HashMap::with_capacity(entries.len()); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); - signers_keys.insert(stacks_address, public_key); + + fn refresh_state(&mut self) -> Result { + let reward_cycle = self + .stacks_client + .get_current_reward_cycle_info()? + .reward_cycle; + if Some(reward_cycle) == self.cycle_state.reward_cycle { + // The reward cycle has not changed. Nothing to refresh. + return Ok(false); + } + self.cycle_state.reward_cycle = Some(reward_cycle); + + self.cycle_state.signers_keys.clear(); + self.cycle_state.signers_addresses.clear(); + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + self.cycle_state.slot_ids = self + .cycle_state + .signers_slots + .values() + .map(|value| value.0) + .collect(); + + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + self.cycle_state + .signers_keys + .insert(stacks_address, public_key); + } + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + } + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + self.cycle_state.slot_ids.push(slot_id.0); + } + Ok(true) } - let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); - for (signer_address, slot_id) in signers_slots.iter() { - signers_addresses.insert(*slot_id, *signer_address); + + fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { + if missing_signers.is_empty() { + return; + } + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Missing messages for {} of {} signer(s). ", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { + if stale_signers.is_empty() { + return; + } + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + self.cycle_state.signers_addresses.len(), + self.args.max_age; + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); } - let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); - - // Poll stackerdb slots to check for new expected messages - let mut last_messages = HashMap::with_capacity(slot_ids.len()); - let mut last_updates = HashMap::with_capacity(slot_ids.len()); - - let contract = MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle); - let mut session = stackerdb_session(&args.host.to_string(), contract.clone()); - info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", - args.interval, args.max_age - ); - info!("Confirming messages for {} registered signers", signers_addresses.len(); - "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - loop { - info!("Polling signers stackerdb for new messages..."); - let mut missing_signers = Vec::with_capacity(slot_ids.len()); - let mut stale_signers = Vec::with_capacity(slot_ids.len()); - let mut unexpected_messages = HashMap::new(); - - let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; - if next_reward_cycle != reward_cycle { - signers_addresses.clear(); - signers_keys.clear(); - info!( - "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", - reward_cycle, next_reward_cycle - ); - reward_cycle = next_reward_cycle; - signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; - slot_ids = signers_slots.values().map(|value| value.0).collect(); - for (signer_address, slot_id) in signers_slots.iter() { - signers_addresses.insert(*slot_id, *signer_address); - } - let entries = stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| { - panic!("No signers found for the current reward cycle {reward_cycle}") - }); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); - signers_keys.insert(stacks_address, public_key); - } - info!("Confirming messages for {} registered signers", signers_addresses.len(); - "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - session = stackerdb_session( - &args.host.to_string(), - MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), - ); - // Clear the last messages and signer last update times. - last_messages.clear(); - last_updates.clear(); + fn print_unexpected_messages( + &self, + unexpected_messages: &HashMap, + ) { + if unexpected_messages.is_empty() { + return; } - let new_messages: Vec<_> = session - .get_latest_chunks(&slot_ids)? - .into_iter() - .map(|chunk_opt| { - chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") }) - .collect(); - for ((signer_address, slot_id), signer_message_opt) in - signers_slots.clone().into_iter().zip(new_messages) - { - if let Some(signer_message) = signer_message_opt { + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} signer(s).", + unexpected_messages.len(), + self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + /// Start monitoring the signers stackerdb slots for expected new messages + pub fn start(&mut self) -> Result<(), ClientError> { + self.refresh_state()?; + let nmb_signers = self.cycle_state.signers_keys.len(); + let interval_ms = self.args.interval * 1000; + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = + MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", + self.args.interval, self.args.max_age + ); + let mut session = stackerdb_session(&self.args.host, contract); + info!("Confirming messages for {nmb_signers} registered signers"; + "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); + let mut last_messages = HashMap::with_capacity(nmb_signers); + let mut last_updates = HashMap::with_capacity(nmb_signers); + loop { + info!("Polling signers stackerdb for new messages..."); + let mut missing_signers = Vec::with_capacity(nmb_signers); + let mut stale_signers = Vec::with_capacity(nmb_signers); + let mut unexpected_messages = HashMap::new(); + + if self.refresh_state()? { + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", + ); + session = stackerdb_session(&self.args.host, contract); + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); + } + let new_messages: Vec<_> = session + .get_latest_chunks(&self.cycle_state.slot_ids)? + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + for ((signer_address, slot_id), signer_message_opt) in self + .cycle_state + .signers_slots + .clone() + .into_iter() + .zip(new_messages) + { + let Some(signer_message) = signer_message_opt else { + missing_signers.push(signer_address); + continue; + }; if let Some(last_message) = last_messages.get(&slot_id) { if last_message == &signer_message { continue; } } + let epoch = self.stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); + } if (epoch == StacksEpochId::Epoch25 && !matches!(signer_message, SignerMessage::MockSignature(_))) || (epoch > StacksEpochId::Epoch25 @@ -327,106 +472,32 @@ fn start_monitoring_signers( } last_messages.insert(slot_id, signer_message); last_updates.insert(slot_id, std::time::Instant::now()); - } else { - missing_signers.push(signer_address); - } - } - for (slot_id, last_update_time) in last_updates.iter() { - if last_update_time.elapsed().as_secs() > args.max_age { - let address = signers_addresses - .get(slot_id) - .expect("BUG: missing signer address for given slot id"); - stale_signers.push(*address); - } - } - if missing_signers.is_empty() - && stale_signers.is_empty() - && unexpected_messages.is_empty() - && !signers_addresses.is_empty() - { - info!( - "All {} signers are sending messages as expected.", - signers_addresses.len() - ); - } else { - if !missing_signers.is_empty() { - let formatted_signers = missing_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if missing_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); } - if !stale_signers.is_empty() { - let formatted_signers = stale_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if stale_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "No new updates from {} of {} signer(s) in over {} seconds", - stale_signers.len(), - signers_addresses.len(), - args.max_age; - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > self.args.max_age { + let address = self + .cycle_state + .signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } } - if !unexpected_messages.is_empty() { - let formatted_signers = unexpected_messages - .iter() - .map(|(addr, (msg, slot))| { - format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") - }) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if unexpected_messages.contains_key(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Unexpected messages from {} of {} Epoch {epoch} signer(s).", - unexpected_messages.len(), - signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + nmb_signers ); + } else { + self.print_missing_signers(&missing_signers); + self.print_stale_signers(&stale_signers); + self.print_unexpected_messages(&unexpected_messages); } + sleep_ms(interval_ms); } - sleep_ms(interval_ms); } } From 7ca1ce1e11568c5a9776d7cc66ed19fe51e92472 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 30 Aug 2024 22:48:31 -0400 Subject: [PATCH 409/910] feat: retry check for UTXOs on startup Fixes #5124 --- .github/workflows/bitcoin-tests.yml | 2 + testnet/stacks-node/src/run_loop/nakamoto.rs | 35 +++-- testnet/stacks-node/src/run_loop/neon.rs | 35 +++-- .../src/tests/nakamoto_integrations.rs | 144 ++++++++++++++++++ 4 files changed, 190 insertions(+), 26 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d6..68cb6153ffd 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -108,6 +108,8 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::nakamoto_integrations::utxo_check_on_startup_panic + - tests::nakamoto_integrations::utxo_check_on_startup_recover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 44a6c0fba90..8b206a66abc 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -155,6 +155,11 @@ impl RunLoop { self.miner_status.clone() } + /// Seconds to wait before retrying UTXO check during startup + const UTXO_RETRY_INTERVAL: u64 = 10; + /// Number of times to retry UTXO check during startup + const UTXO_RETRY_COUNT: u64 = 6; + /// Determine if we're the miner. /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { @@ -187,22 +192,26 @@ impl RunLoop { )); } - for (epoch_id, btc_addr) in btc_addrs.into_iter() { - info!("Miner node: checking UTXOs at address: {}", &btc_addr); - let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); - if utxos.is_none() { - warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); - } else { - info!("UTXOs found - will run as a Miner node"); + // retry UTXO check a few times, in case bitcoind is still starting up + for _ in 0..Self::UTXO_RETRY_COUNT { + for (epoch_id, btc_addr) in &btc_addrs { + info!("Miner node: checking UTXOs at address: {btc_addr}"); + let utxos = + burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)"); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.get_node_config(false).mock_mining { + info!("No UTXOs found, but configured to mock mine"); return true; } + thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } else { - return false; - } + panic!("No UTXOs found, exiting"); } else { info!("Will run as a Follower node"); false diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5d5ff3653d4..36777c4912b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -358,6 +358,11 @@ impl RunLoop { } } + /// Seconds to wait before retrying UTXO check during startup + const UTXO_RETRY_INTERVAL: u64 = 10; + /// Number of times to retry UTXO check during startup + const UTXO_RETRY_COUNT: u64 = 6; + /// Determine if we're the miner. /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { @@ -390,22 +395,26 @@ impl RunLoop { )); } - for (epoch_id, btc_addr) in btc_addrs.into_iter() { - info!("Miner node: checking UTXOs at address: {}", &btc_addr); - let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); - if utxos.is_none() { - warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); - } else { - info!("UTXOs found - will run as a Miner node"); + // retry UTXO check a few times, in case bitcoind is still starting up + for _ in 0..Self::UTXO_RETRY_COUNT { + for (epoch_id, btc_addr) in &btc_addrs { + info!("Miner node: checking UTXOs at address: {btc_addr}"); + let utxos = + burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)"); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.get_node_config(false).mock_mining { + info!("No UTXOs found, but configured to mock mine"); return true; } + thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } else { - return false; - } + panic!("No UTXOs found, exiting"); } else { info!("Will run as a Follower node"); false diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf64..0eaca052457 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7552,3 +7552,147 @@ fn mock_mining() { run_loop_thread.join().unwrap(); follower_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test checks for the proper handling of the case where UTXOs are not +/// available on startup. After 1 minute, the miner thread should panic. +fn utxo_check_on_startup_panic() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + println!("Nakamoto node started with config: {:?}", naka_conf); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); + let (last, rest) = epochs.split_last_mut().unwrap(); + for (index, epoch) in rest.iter_mut().enumerate() { + epoch.start_height = index as u64; + epoch.end_height = (index + 1) as u64; + } + last.start_height = 131; + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + // Do not fully bootstrap the chain, so that the UTXOs are not yet available + btc_regtest_controller.bootstrap_chain(99); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + let timeout = Duration::from_secs(70); + let start_time = Instant::now(); + + loop { + // Check if the thread has panicked + if run_loop_thread.is_finished() { + match run_loop_thread.join() { + Ok(_) => { + // Thread completed without panicking + panic!("Miner should have panicked but it exited cleanly."); + } + Err(_) => { + // Thread panicked + info!("Thread has panicked!"); + break; + } + } + } + + // Check if 70 seconds have passed + assert!( + start_time.elapsed() < timeout, + "Miner should have panicked." + ); + + thread::sleep(Duration::from_millis(1000)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); +} + +#[test] +#[ignore] +/// This test checks for the proper handling of the case where UTXOs are not +/// available on startup, but become available later, before the 1 minute +/// timeout. The miner thread should recover and continue mining. +fn utxo_check_on_startup_recover() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + println!("Nakamoto node started with config: {:?}", naka_conf); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); + let (last, rest) = epochs.split_last_mut().unwrap(); + for (index, epoch) in rest.iter_mut().enumerate() { + epoch.start_height = index as u64; + epoch.end_height = (index + 1) as u64; + } + last.start_height = 131; + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + // Do not fully bootstrap the chain, so that the UTXOs are not yet available + btc_regtest_controller.bootstrap_chain(99); + // btc_regtest_controller.bootstrap_chain(108); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + // Sleep for 30s to allow the miner to start and reach the UTXO check loop + thread::sleep(Duration::from_secs(30)); + + btc_regtest_controller.bootstrap_chain(3); + + wait_for_runloop(&blocks_processed); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_thread.join().unwrap(); +} From ab46d76d4b29cd2066516e75b2b672d6d748799f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 31 Aug 2024 23:02:12 -0400 Subject: [PATCH 410/910] fix: add pause to ensure block is accepted --- .../stacks-node/src/tests/nakamoto_integrations.rs | 7 ------- testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++--------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 680fa42843d..fb3e9076770 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -740,7 +740,6 @@ pub fn next_block_and_wait_for_commits( .as_ref() .ok_or("TEST-ERROR: Processed time wasn't set")?; if commits_sent <= commits_before[i] { - info!("NO COMMITS"); return Ok(false); } let commit_sent_time = commit_sent_time[i] @@ -748,28 +747,22 @@ pub fn next_block_and_wait_for_commits( .ok_or("TEST-ERROR: Processed time wasn't set")?; // try to ensure the commit was sent after the block was processed if commit_sent_time > block_processed_time { - info!("COMMIT NOT SENT AFTER BLOCK PROCESSED TIME"); continue; } // if two commits have been sent, one of them must have been after if commits_sent >= commits_before[i] + 2 { - info!("MORE THAN ENOUGH COMMITS"); continue; } // otherwise, just timeout if the commit was sent and its been long enough // for a new commit pass to have occurred if block_processed_time.elapsed() > Duration::from_secs(10) { - info!("TIMEOUT COMMIT"); continue; } - info!("CONDITIONS OF COMMIT CHECK NOT MET"); return Ok(false); } else { - info!("NO BLOCK PROCESSED IN COMMIT CHECK"); return Ok(false); } } - info!("ALL CONDITIONS MET IN COMMIT CHECK"); Ok(true) }) } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1ab18b93e12..e17f8e98355 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3330,8 +3330,8 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_before = blocks_proposed.load(Ordering::SeqCst); - info!("proposed_blocks: {proposed_before}, proposed_blocks2: {proposed_before_2}"); + + sleep_ms(1000); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3339,10 +3339,6 @@ fn partial_tenure_fork() { let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_1 = blocks_proposed.load(Ordering::SeqCst); - info!( - "Fork initiated: {fork_initiated}, Mined 1 blocks: {mined_1}, Mined 2 blocks {mined_2}, Proposed blocks: {proposed_1}, Proposed blocks 2: {proposed_2}", - ); Ok((fork_initiated && proposed_2 > proposed_before_2) || mined_1 > mined_before_1 @@ -3431,8 +3427,8 @@ fn partial_tenure_fork() { } else { if miner_2_tenures < min_miner_2_tenures { assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); - } else if miner_2_tenures == min_miner_2_tenures { - // If this is the forking tenure, miner 2 should have mined 0 blocks + } else { + // Miner 2 should have mined 0 blocks after the fork assert_eq!(mined_2, mined_before_2); } } @@ -3447,7 +3443,6 @@ fn partial_tenure_fork() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_2_height, ignore_block - 1); assert_eq!( peer_1_height, From 4e583e9dd99fe2684eb49f0748d85d9b211ec1cc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 12:20:11 -0400 Subject: [PATCH 411/910] test: fix assertion and remove useless code --- testnet/stacks-node/src/tests/signer/v0.rs | 23 ++++++---------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e17f8e98355..2aa09238c54 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3444,10 +3444,12 @@ fn partial_tenure_fork() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; assert_eq!(peer_2_height, ignore_block - 1); - assert_eq!( - peer_1_height, - pre_nakamoto_peer_1_height - + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + // The height may be higher than expected due to extra transactions waiting + // to be mined during the forking miner's tenure. + assert!( + peer_1_height + >= pre_nakamoto_peer_1_height + + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) ); assert_eq!( btc_blocks_mined, @@ -3473,18 +3475,5 @@ fn partial_tenure_fork() { .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); - let (chainstate, _) = StacksChainState::open( - false, - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - let blocks = chainstate - .get_stacks_chain_tips_at_height(ignore_block) - .unwrap(); - info!("blocks: {:?}", blocks); - signer_test.shutdown(); } From fd189d57e723044e7c550c28969354884a56bfff Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 12:47:49 -0400 Subject: [PATCH 412/910] chore: cleanup unused code --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2aa09238c54..7b35279863c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -37,7 +37,7 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; -use stacks::net::relay::fault_injection::{clear_ignore_block, set_ignore_block}; +use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -3263,7 +3263,6 @@ fn partial_tenure_fork() { &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_proposed = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); From 32cc4a48e0f66a94d633d98ea65c7748246255c2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 16:23:47 -0400 Subject: [PATCH 413/910] test: adjust bitcoin block count to account for setup --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7b35279863c..235186e2dbb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3085,7 +3085,7 @@ fn multiple_miners_with_nakamoto_blocks() { let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 0; + let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { From 568722e8138fac74bf31e99d005e96e8339d47a2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Sep 2024 10:22:56 -0400 Subject: [PATCH 414/910] fix: resolve errors after merge --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4657d00e525..a1398f7b2c5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3139,7 +3139,8 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3373,7 +3374,8 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); From 48e23b94216c05259e1e2dd9000bed4617852e12 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Sep 2024 21:47:37 -0400 Subject: [PATCH 415/910] test: fix remaining errors in tests --- testnet/stacks-node/src/tests/signer/v0.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a1398f7b2c5..2ca6a12a8ec 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -278,13 +278,14 @@ impl SignerTest { self.run_until_epoch_3_boundary(); - let commits_submitted = self.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Waiting 1 burnchain block for miner VRF key confirmation"); - // Wait one block to confirm the VRF register, wait until a block commit is submitted + // Wait until we see the first block of epoch 3.0. + // Note, we don't use `nakamoto_blocks_mined` counter, because there + // could be other miners mining blocks. + let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; + info!("Waiting for first Nakamoto block: {}", height_before + 1); next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; + Ok(height > height_before) }) .unwrap(); info!("Ready to mine Nakamoto blocks!"); @@ -3202,6 +3203,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; + let mut sender_nonce = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); @@ -3234,9 +3236,9 @@ fn multiple_miners_with_nakamoto_blocks() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); // submit a tx so that the miner will mine an extra block - let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); wait_for(60, || { @@ -3300,7 +3302,7 @@ fn multiple_miners_with_nakamoto_blocks() { assert_eq!(peer_1_height, peer_2_height); assert_eq!( peer_1_height, - pre_nakamoto_peer_1_height + btc_blocks_mined * (inter_blocks_per_tenure + 1) + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); assert_eq!( btc_blocks_mined, From 05174209f9d99d4844237cacfaf094f58d795ab1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:04 -0400 Subject: [PATCH 416/910] feat: add has_consensus_hash() function to sortition handle --- stackslib/src/chainstate/burn/db/sortdb.rs | 62 ++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 808cb73c1f0..942e6774bde 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2620,6 +2620,27 @@ impl<'a> SortitionHandleConn<'a> { serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); Ok(pox_addrs) } + + /// Is a consensus hash's sortition valid on the fork represented by this handle? + /// Return Ok(true) if so + /// Return Ok(false) if not (including if there is no sortition with this consensus hash) + /// Return Err(..) on DB error + pub fn has_consensus_hash(&self, consensus_hash: &ConsensusHash) -> Result { + let Some(sn) = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? else { + // no sortition with this consensus hash + return Ok(false); + }; + + let Some(expected_sortition_id) = + get_ancestor_sort_id(self, sn.block_height, &self.context.chain_tip)? + else { + // no ancestor at this sortition height relative to this chain tip + // (e.g. perhaps this consensus hash is in the "future" relative to this chain tip) + return Ok(false); + }; + + Ok(sn.sortition_id == expected_sortition_id) + } } // Connection methods @@ -10948,4 +10969,45 @@ pub mod tests { SORTITION_DB_VERSION )); } + + #[test] + fn test_has_consensus_hash() { + let first_burn_hash = BurnchainHeaderHash::from_hex( + "10000000000000000000000000000000000000000000000000000000000000ff", + ) + .unwrap(); + let mut db = SortitionDB::connect_test(0, &first_burn_hash).unwrap(); + + let last_snapshot = SortitionDB::get_first_block_snapshot(db.conn()).unwrap(); + + // fork 1: 0 <-- 1 <-- 2 <-- 3 <-- 4 + // \ + // fork 2: *---- 5 <-- 6 + + let all_snapshots = make_fork_run(&mut db, &last_snapshot, 5, 0); + let fork_snapshots = make_fork_run(&mut db, &all_snapshots[3], 2, 0x80); + + let tip = &all_snapshots[4]; + let tip_2 = &fork_snapshots[1]; + assert_ne!(tip, tip_2); + + let ih = db.index_handle(&tip.sortition_id); + for sn in all_snapshots.iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + for sn in fork_snapshots.iter() { + assert!(!ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + + let ih = db.index_handle(&tip_2.sortition_id); + for sn in fork_snapshots.iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + for sn in all_snapshots[0..4].iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + assert!(!ih + .has_consensus_hash(&all_snapshots[4].consensus_hash) + .unwrap()); + } } From 44c820bd8c175753147cd84873d923fcd73510ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:20 -0400 Subject: [PATCH 417/910] feat: add a way to query the highest known block in a given tenure on any Stacks fork --- stackslib/src/chainstate/nakamoto/mod.rs | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b007a3e7d67..78baa2578bc 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2615,7 +2615,7 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } - /// Get the highest block in the given tenure. + /// Get the highest block in the given tenure on a given fork. /// Only works on Nakamoto blocks. /// TODO: unit test pub fn get_highest_block_header_in_tenure( @@ -2631,6 +2631,29 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } + /// Get the highest block in a given tenure (identified by its consensus hash). + /// Ties will be broken by timestamp. + /// + /// Used to verify that a signer-submitted block proposal builds atop the highest known block + /// in the given tenure, regardless of which fork it's on. + /// + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + pub fn get_highest_known_block_header_in_tenure( + db: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + // see if we have a nakamoto block in this tenure + let qry = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC, timestamp DESC LIMIT 1"; + let args = params![consensus_hash]; + if let Some(header) = query_row(db, qry, args)? { + return Ok(Some(header)); + } + + // see if this is an epoch2 header. If it exists, then there will only be one. + Ok(StacksChainState::get_stacks_block_header_info_by_consensus_hash(db, consensus_hash)?) + } + /// Get the VRF proof for a Stacks block. /// For Nakamoto blocks, this is the VRF proof contained in the coinbase of the tenure-start /// block of the given tenure identified by the consensus hash. From 22c42ab075f6cad2645f5c1a7f401f1848d68d4b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:44 -0400 Subject: [PATCH 418/910] feat: add additional checks to the postblock endpoint to verify that the proposed block builds on a canonical tenure, and it builds on the highest known block in that tenure --- stackslib/src/net/api/postblock_proposal.rs | 146 +++++++++++++++++++- 1 file changed, 145 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6112ea0fae0..9c5ab712c3a 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -74,7 +74,9 @@ define_u8_enum![ValidateRejectCode { BadTransaction = 1, InvalidBlock = 2, ChainstateError = 3, - UnknownParent = 4 + UnknownParent = 4, + NonCanonicalTenure = 5, + NoSuchTenure = 6 }]; impl TryFrom for ValidateRejectCode { @@ -194,6 +196,136 @@ impl NakamotoBlockProposal { }) } + /// Check to see if a block builds atop the highest block in a given tenure. + /// That is: + /// - its parent must exist, and + /// - its parent must be as high as the highest block in the given tenure. + pub(crate) fn check_block_builds_on_highest_block_in_tenure( + chainstate: &StacksChainState, + tenure_id: &ConsensusHash, + parent_block_id: &StacksBlockId, + ) -> Result<(), BlockValidateRejectReason> { + let Some(highest_header) = NakamotoChainState::get_highest_known_block_header_in_tenure( + chainstate.db(), + tenure_id, + ) + .map_err(|e| BlockValidateRejectReason { + reason_code: ValidateRejectCode::ChainstateError, + reason: format!("Failed to query highest block in tenure ID: {:?}", &e), + })? + else { + warn!( + "Rejected block proposal"; + "reason" => "Block is not a tenure-start block, and has an unrecognized tenure consensus hash", + "consensus_hash" => %tenure_id, + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".into(), + }); + }; + let Some(parent_header) = + NakamotoChainState::get_block_header(chainstate.db(), parent_block_id).map_err( + |e| BlockValidateRejectReason { + reason_code: ValidateRejectCode::ChainstateError, + reason: format!("Failed to query block header by block ID: {:?}", &e), + }, + )? + else { + warn!( + "Rejected block proposal"; + "reason" => "Block has no parent", + "parent_block_id" => %parent_block_id + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::UnknownParent, + reason: "Block has no parent".into(), + }); + }; + if parent_header.anchored_header.height() != highest_header.anchored_header.height() { + warn!( + "Rejected block proposal"; + "reason" => "Block's parent is not the highest block in this tenure", + "consensus_hash" => %tenure_id, + "parent_header.height" => parent_header.anchored_header.height(), + "highest_header.height" => highest_header.anchored_header.height(), + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block is not higher than the highest block in its tenure".into(), + }); + } + Ok(()) + } + + /// Verify that the block we received builds upon a valid tenure. + /// Implemented as a static function to facilitate testing. + pub(crate) fn check_block_has_valid_tenure( + db_handle: &SortitionHandleConn, + tenure_id: &ConsensusHash, + ) -> Result<(), BlockValidateRejectReason> { + // Verify that the block's tenure is on the canonical sortition history + if !db_handle.has_consensus_hash(tenure_id)? { + warn!( + "Rejected block proposal"; + "reason" => "Block's tenure consensus hash is not on the canonical Bitcoin fork", + "consensus_hash" => %tenure_id, + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::NonCanonicalTenure, + reason: "Tenure consensus hash is not on the canonical Bitcoin fork".into(), + }); + } + Ok(()) + } + + /// Verify that the block we received builds on the highest block in its tenure. + /// * For tenure-start blocks, the parent must be as high as the highest block in the parent + /// block's tenure. + /// * For all other blocks, the parent must be as high as the highest block in the tenure. + /// + /// Implemented as a static function to facilitate testing + pub(crate) fn check_block_has_valid_parent( + chainstate: &StacksChainState, + block: &NakamotoBlock, + ) -> Result<(), BlockValidateRejectReason> { + let is_tenure_start = + block + .is_wellformed_tenure_start_block() + .map_err(|_| BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block is not well-formed".into(), + })?; + + if !is_tenure_start { + // this is a well-formed block that is not the start of a tenure, so it must build + // atop an existing block in its tenure. + Self::check_block_builds_on_highest_block_in_tenure( + chainstate, + &block.header.consensus_hash, + &block.header.parent_block_id, + )?; + } else { + // this is a tenure-start block, so it must build atop a parent which has the + // highest height in the *previous* tenure. + let parent_header = NakamotoChainState::get_block_header( + chainstate.db(), + &block.header.parent_block_id, + )? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::UnknownParent, + reason: "No parent block".into(), + })?; + + Self::check_block_builds_on_highest_block_in_tenure( + chainstate, + &parent_header.consensus_hash, + &block.header.parent_block_id, + )?; + } + Ok(()) + } + /// Test this block proposal against the current chain state and /// either accept or reject the proposal /// @@ -232,6 +364,18 @@ impl NakamotoBlockProposal { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); + + // (For the signer) + // Verify that the block's tenure is on the canonical sortition history + Self::check_block_has_valid_tenure(&db_handle, &self.block.header.consensus_hash)?; + + // (For the signer) + // Verify that this block's parent is the highest such block we can build off of + Self::check_block_has_valid_parent(chainstate, &self.block)?; + + // get the burnchain tokens spent for this block. There must be a record of this (i.e. + // there must be a block-commit for this), or otherwise this block doesn't correspond to + // any burnchain chainstate. let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; if expected_burn_opt.is_none() { From cd8d8e841686f3e4ea49321b20f454988145c955 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:40:42 -0400 Subject: [PATCH 419/910] fix: get postblock proposal test to pass --- .../src/net/api/tests/postblock_proposal.rs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 391afc949f1..4f553efd21e 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -229,16 +229,21 @@ fn test_try_make_response() { let tip = SortitionDB::get_canonical_burn_chain_tip(&rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( + rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); let mut block = { let chainstate = rpc_test.peer_1.chainstate(); - let parent_stacks_header = NakamotoChainState::get_block_header( - chainstate.db(), - &tip.get_canonical_stacks_block_id(), - ) - .unwrap() - .unwrap(); + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) + .unwrap() + .unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -262,7 +267,7 @@ fn test_try_make_response() { let addr = auth.origin().address_testnet(); let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); tx.chain_id = 0x80000000; - tx.auth.set_origin_nonce(34); + tx.auth.set_origin_nonce(36); tx.set_post_condition_mode(TransactionPostConditionMode::Allow); tx.set_tx_fee(300); let mut tx_signer = StacksTransactionSigner::new(&tx); @@ -271,8 +276,8 @@ fn test_try_make_response() { let mut builder = NakamotoBlockBuilder::new( &parent_stacks_header, - &tip.consensus_hash, - 25000, + &parent_stacks_header.consensus_hash, + 26000, None, None, 8, From 23a4d2ec482e5168663695080689dbcba913ef39 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:41:07 -0400 Subject: [PATCH 420/910] chore: remove wait_on_signers timeout option, and remove the integration test for verifying that it works --- testnet/stacks-node/src/config.rs | 9 -- testnet/stacks-node/src/tests/signer/mod.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 108 +------------------- testnet/stacks-node/src/tests/signer/v1.rs | 14 +-- 4 files changed, 11 insertions(+), 130 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e4751a10108..f1c37750566 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2363,8 +2363,6 @@ pub struct MinerConfig { /// When selecting the "nicest" tip, do not consider tips that are more than this many blocks /// behind the highest tip. pub max_reorg_depth: u64, - /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block - pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined @@ -2398,8 +2396,6 @@ impl Default for MinerConfig { txs_to_consider: MemPoolWalkTxTypes::all(), filter_origins: HashSet::new(), max_reorg_depth: 3, - // TODO: update to a sane value based on stackerdb benchmarking - wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, } @@ -2750,7 +2746,6 @@ pub struct MinerConfigFile { pub txs_to_consider: Option, pub filter_origins: Option, pub max_reorg_depth: Option, - pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, } @@ -2857,10 +2852,6 @@ impl MinerConfigFile { max_reorg_depth: self .max_reorg_depth .unwrap_or(miner_default_config.max_reorg_depth), - wait_on_signers: self - .wait_on_signers_ms - .map(Duration::from_millis) - .unwrap_or(miner_default_config.wait_on_signers), pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42407a1a76b..0b38a792346 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -114,15 +114,10 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new( - num_signers: usize, - initial_balances: Vec<(StacksAddress, u64)>, - wait_on_signers: Option, - ) -> Self { + fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { Self::new_with_config_modifications( num_signers, initial_balances, - wait_on_signers, |_| {}, |_| {}, None, @@ -136,7 +131,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, - wait_on_signers: Option, mut signer_config_modifier: F, mut node_config_modifier: G, btc_miner_pubkeys: Option>, @@ -167,8 +161,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); @@ -546,7 +546,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); @@ -609,7 +609,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -793,7 +793,6 @@ fn reloads_signer_set_in() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |_config| {}, |_| {}, None, @@ -917,7 +916,6 @@ fn forked_tenure_testing( let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -1229,7 +1227,6 @@ fn bitcoind_forking_test() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -1395,7 +1392,6 @@ fn multiple_miners() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1657,7 +1653,6 @@ fn miner_forking() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1914,7 +1909,6 @@ fn end_of_tenure() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(500)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); @@ -2060,96 +2054,6 @@ fn end_of_tenure() { signer_test.shutdown(); } -#[test] -#[ignore] -/// This test checks that the miner will retry when signature collection times out. -fn retry_on_timeout() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - - signer_test.boot_to_epoch_3(); - - signer_test.mine_nakamoto_block(Duration::from_secs(30)); - - // Stall block validation so the signers will not be able to sign. - TEST_VALIDATE_STALL.lock().unwrap().replace(true); - - let proposals_before = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - - // submit a tx so that the miner will mine a block - let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - submit_tx(&http_origin, &transfer_tx); - - info!("Submitted transfer tx and waiting for block proposal"); - loop { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - if blocks_proposed > proposals_before { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - info!("Block proposed, verifying that it is not processed"); - - // Wait 10 seconds to be sure that the timeout has occurred - std::thread::sleep(Duration::from_secs(10)); - assert_eq!( - signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst), - blocks_before - ); - - // Disable the stall and wait for the block to be processed on retry - info!("Disable the stall and wait for the block to be processed"); - TEST_VALIDATE_STALL.lock().unwrap().replace(false); - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - if blocks_mined > blocks_before { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - signer_test.shutdown(); -} - #[test] #[ignore] /// This test checks that the signers will broadcast a block once they receive enough signatures. @@ -2173,7 +2077,6 @@ fn signers_broadcast_signed_blocks() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -2272,7 +2175,6 @@ fn empty_sortition() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2446,7 +2348,6 @@ fn mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), |_| {}, |node_config| { node_config.miner.pre_nakamoto_mock_signing = true; @@ -2644,7 +2545,6 @@ fn signer_set_rollover() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, initial_balances, - None, |_| {}, |naka_conf| { for toml in new_signer_configs.clone() { @@ -2875,7 +2775,6 @@ fn min_gap_between_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -2994,7 +2893,6 @@ fn duplicate_signers() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], - None, |_| {}, |_| {}, None, diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 6e9ed71f365..3d6c9342086 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -486,7 +486,7 @@ fn dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![], None); + let mut signer_test = SignerTest::new(10, vec![]); info!("Boot to epoch 3.0 reward calculation..."); boot_to_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -596,7 +596,7 @@ fn sign_request_rejected() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(10, vec![]); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); @@ -691,7 +691,7 @@ fn delayed_dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![], None); + let mut signer_test = SignerTest::new(num_signers, vec![]); boot_to_epoch_3_reward_set_calculation_boundary( &signer_test.running_nodes.conf, &signer_test.running_nodes.blocks_processed, @@ -884,7 +884,7 @@ fn block_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); @@ -945,7 +945,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -1020,7 +1020,7 @@ fn filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1108,7 +1108,7 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Setup -------------------------"); let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); From 25e14a83d5b4a2245e429cff35f7894c843da229 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:41:33 -0400 Subject: [PATCH 421/910] fix: the miner never times out waiting for signers --- .../src/nakamoto_node/sign_coordinator.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b9..d2c4f2b3900 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; use std::sync::mpsc::Receiver; -use std::time::{Duration, Instant}; +use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -73,7 +73,6 @@ pub struct SignCoordinator { wsts_public_keys: PublicKeys, is_mainnet: bool, miners_session: StackerDBSession, - signing_round_timeout: Duration, signer_entries: HashMap, weight_threshold: u32, total_weight: u32, @@ -302,7 +301,6 @@ impl SignCoordinator { wsts_public_keys, is_mainnet, miners_session, - signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, @@ -324,7 +322,6 @@ impl SignCoordinator { wsts_public_keys, is_mainnet, miners_session, - signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, @@ -485,8 +482,7 @@ impl SignCoordinator { )); }; - let start_ts = Instant::now(); - while start_ts.elapsed() <= self.signing_round_timeout { + loop { let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { @@ -630,10 +626,6 @@ impl SignCoordinator { }; } } - - Err(NakamotoNodeError::SignerSignatureError( - "Timed out waiting for group signature".into(), - )) } /// Do we ignore signer signatures? @@ -736,8 +728,7 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); - let start_ts = Instant::now(); - while start_ts.elapsed() <= self.signing_round_timeout { + loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold if let Ok(Some((stored_block, _sz))) = chain_state @@ -947,9 +938,5 @@ impl SignCoordinator { return Ok(gathered_signatures.values().cloned().collect()); } } - - Err(NakamotoNodeError::SignerSignatureError( - "Timed out waiting for group signature".into(), - )) } } From 49585ebea8cb2fc031a3cd029a45d09f7e4fdad0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 12:14:17 -0400 Subject: [PATCH 422/910] test: update `multiple_miners` assertiions --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ca6a12a8ec..5d067a59643 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1563,7 +1563,7 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); assert_eq!( - btc_blocks_mined, + btc_blocks_mined + 1, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); From 603bb630f85d9d23c917ac2188ce2e529aaf013d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 12:17:41 -0400 Subject: [PATCH 423/910] test: better update to `multiple_miners` --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5d067a59643..a0a5082b28a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1490,7 +1490,7 @@ fn multiple_miners() { let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 0; + let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { @@ -1563,7 +1563,7 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); assert_eq!( - btc_blocks_mined + 1, + btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); From af802a93a2dfb7ea7e7de7939ebec3b7f1c5c0df Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 12:48:55 -0400 Subject: [PATCH 424/910] WIP: Add block state and aggregate signature rejections Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 132 ++++-- stacks-signer/src/chainstate.rs | 117 ++++-- stacks-signer/src/client/stackerdb.rs | 4 + stacks-signer/src/lib.rs | 2 +- stacks-signer/src/runloop.rs | 4 +- stacks-signer/src/signerdb.rs | 299 ++++++++++++-- stacks-signer/src/tests/chainstate.rs | 2 +- stacks-signer/src/v0/signer.rs | 387 +++++++++++++----- stacks-signer/src/v1/signer.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 4 +- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 11 files changed, 739 insertions(+), 217 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 5f7b82a937e..b82ee3bab24 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -42,6 +42,7 @@ use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::{ make_structured_data_domain, structured_data_message_hash, }; +use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksPrivateKey, StacksPublicKey, }; @@ -615,8 +616,8 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash + "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}", + r.reason_code, r.reason, r.signer_signature_hash, r.signature ) } } @@ -629,9 +630,14 @@ impl BlockResponse { Self::Accepted((hash, sig)) } - /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code - pub fn rejected(hash: Sha512Trunc256Sum, reject_code: RejectCode) -> Self { - Self::Rejected(BlockRejection::new(hash, reject_code)) + /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code and sign it with the provided private key + pub fn rejected( + hash: Sha512Trunc256Sum, + reject_code: RejectCode, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + Self::Rejected(BlockRejection::new(hash, reject_code, private_key, mainnet)) } } @@ -677,16 +683,94 @@ pub struct BlockRejection { pub reason_code: RejectCode, /// The signer signature hash of the block that was rejected pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the rejection + pub signature: MessageSignature, + /// The chain id + pub chain_id: u32, } impl BlockRejection { /// Create a new BlockRejection for the provided block and reason code - pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { - Self { + pub fn new( + signer_signature_hash: Sha512Trunc256Sum, + reason_code: RejectCode, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + let chain_id = if mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + let mut rejection = Self { reason: reason_code.to_string(), reason_code, signer_signature_hash, + signature: MessageSignature::empty(), + chain_id, + }; + rejection + .sign(private_key) + .expect("Failed to sign BlockRejection"); + rejection + } + + /// Create a new BlockRejection from a BlockValidateRejection + pub fn from_validate_rejection( + reject: BlockValidateReject, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + let chain_id = if mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + let mut rejection = Self { + reason: reject.reason, + reason_code: RejectCode::ValidationFailed(reject.reason_code), + signer_signature_hash: reject.signer_signature_hash, + chain_id, + signature: MessageSignature::empty(), + }; + rejection + .sign(private_key) + .expect("Failed to sign BlockRejection"); + rejection + } + + /// The signature hash for the block rejection + pub fn hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("block-rejection", "1.0.0", self.chain_id); + let data = Value::buff_from(self.signer_signature_hash.as_bytes().into()).unwrap(); + structured_data_message_hash(data, domain_tuple) + } + + /// Sign the block rejection and set the internal signature field + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; + Ok(()) + } + + /// Verify the rejection's signature against the provided signer public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); } + let signature_hash = self.hash(); + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } + + /// Recover the public key from the rejection signature + pub fn recover_public_key(&self) -> Result { + if self.signature == MessageSignature::empty() { + return Err("No signature to recover public key from"); + } + let signature_hash = self.hash(); + StacksPublicKey::recover_to_pubkey(signature_hash.as_bytes(), &self.signature) } } @@ -695,6 +779,8 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.reason.as_bytes().to_vec())?; write_next(fd, &self.reason_code)?; write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.signature)?; Ok(()) } @@ -705,24 +791,18 @@ impl StacksMessageCodec for BlockRejection { })?; let reason_code = read_next::(fd)?; let signer_signature_hash = read_next::(fd)?; + let chain_id = read_next::(fd)?; + let signature = read_next::(fd)?; Ok(Self { reason, reason_code, signer_signature_hash, + chain_id, + signature, }) } } -impl From for BlockRejection { - fn from(reject: BlockValidateReject) -> Self { - Self { - reason: reject.reason, - reason_code: RejectCode::ValidationFailed(reject.reason_code), - signer_signature_hash: reject.signer_signature_hash, - } - } -} - impl StacksMessageCodec for RejectCode { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; @@ -792,12 +872,6 @@ impl From for SignerMessage { } } -impl From for BlockResponse { - fn from(rejection: BlockValidateReject) -> Self { - Self::Rejected(rejection.into()) - } -} - #[cfg(test)] mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; @@ -851,14 +925,20 @@ mod test { let rejection = BlockRejection::new( Sha512Trunc256Sum([0u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) .expect("Failed to deserialize BlockRejection"); assert_eq!(rejection, deserialized_rejection); - let rejection = - BlockRejection::new(Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues); + let rejection = BlockRejection::new( + Sha512Trunc256Sum([1u8; 32]), + RejectCode::ConnectivityIssues, + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, + ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) .expect("Failed to deserialize BlockRejection"); @@ -877,6 +957,8 @@ mod test { let response = BlockResponse::Rejected(BlockRejection::new( Sha512Trunc256Sum([1u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c35ceb67e03..adfea4900d5 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -280,42 +280,19 @@ impl SortitionsView { }; if let Some(tenure_change) = block.get_tenure_change_tx_payload() { - // in tenure changes, we need to check: - // (1) if the tenure change confirms the expected parent block (i.e., - // the last block we signed in the parent tenure) - // (2) if the parent tenure was a valid choice - let confirms_expected_parent = - Self::check_tenure_change_block_confirmation(tenure_change, block, signer_db)?; - if !confirms_expected_parent { - return Ok(false); - } - // now, we have to check if the parent tenure was a valid choice. - let is_valid_parent_tenure = Self::check_parent_tenure_choice( - proposed_by.state(), + if !self.validate_tenure_change_payload( + &proposed_by, + tenure_change, block, signer_db, client, - &self.config.first_proposal_burn_block_timing, - )?; - if !is_valid_parent_tenure { - return Ok(false); - } - let last_in_tenure = signer_db - .get_last_signed_block_in_tenure(&block.header.consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if let Some(last_in_tenure) = last_in_tenure { - warn!( - "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), - ); + )? { return Ok(false); } } else { // check if the new block confirms the last block in the current tenure let confirms_latest_in_tenure = - Self::confirms_known_blocks_in(block, &block.header.consensus_hash, signer_db)?; + Self::confirms_latest_block_in_same_tenure(block, signer_db)?; if !confirms_latest_in_tenure { return Ok(false); } @@ -453,32 +430,94 @@ impl SortitionsView { Ok(true) } - fn check_tenure_change_block_confirmation( + /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, signer_db: &SignerDb, ) -> Result { - // in tenure changes, we need to check: - // (1) if the tenure change confirms the expected parent block (i.e., - // the last block we signed in the parent tenure) - // (2) if the parent tenure was a valid choice - Self::confirms_known_blocks_in(block, &tenure_change.prev_tenure_consensus_hash, signer_db) + let Some(last_globally_accepted_block) = signer_db + .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))? + else { + info!( + "Have no globally accepted blocks in the parent tenure, assuming block confirmation is correct"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "tenure" => %block.header.consensus_hash, + ); + return Ok(true); + }; + if block.header.chain_length > last_globally_accepted_block.block.header.chain_length { + Ok(true) + } else { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => last_globally_accepted_block.block.header.chain_length + 1, + ); + Ok(false) + } + } + + /// in tenure changes, we need to check: + /// (1) if the tenure change confirms the expected parent block (i.e., + /// the last globally accepted block in the parent tenure) + /// (2) if the parent tenure was a valid choice + fn validate_tenure_change_payload( + &self, + proposed_by: &ProposedBy, + tenure_change: &TenureChangePayload, + block: &NakamotoBlock, + signer_db: &SignerDb, + client: &StacksClient, + ) -> Result { + // Ensure that the tenure change block confirms the expected parent block + let confirms_expected_parent = + Self::check_tenure_change_confirms_parent(tenure_change, block, signer_db)?; + if !confirms_expected_parent { + return Ok(false); + } + // now, we have to check if the parent tenure was a valid choice. + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + proposed_by.state(), + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; + if !is_valid_parent_tenure { + return Ok(false); + } + let last_in_tenure = signer_db + .get_last_globally_accepted_block(&block.header.consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + if let Some(last_in_tenure) = last_in_tenure { + warn!( + "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + ); + return Ok(false); + } + Ok(true) } - fn confirms_known_blocks_in( + fn confirms_latest_block_in_same_tenure( block: &NakamotoBlock, - tenure: &ConsensusHash, signer_db: &SignerDb, ) -> Result { let Some(last_known_block) = signer_db - .get_last_signed_block_in_tenure(tenure) + .get_last_accepted_block(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))? else { info!( - "Have not signed off on any blocks in the parent tenure, assuming block confirmation is correct"; + "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "tenure" => %tenure, ); return Ok(true); }; diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index de77ccbd72d..f2b574ef4fb 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -234,7 +234,9 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; + use clarity::util::secp256k1::MessageSignature; use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; + use rand::{thread_rng, RngCore}; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; @@ -278,6 +280,8 @@ mod tests { reason: "Did not like it".into(), reason_code: RejectCode::RejectedInPriorRound, signer_signature_hash: block.header.signer_signature_hash(), + chain_id: thread_rng().next_u32(), + signature: MessageSignature::empty(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index e16a70b6072..c61ae397312 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -80,7 +80,7 @@ pub trait Signer: Debug + Display { command: Option, ); /// Check if the signer is in the middle of processing blocks - fn has_pending_blocks(&self) -> bool; + fn has_unprocessed_blocks(&self) -> bool; } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b5..86d8458e30c 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -466,7 +466,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo std::cmp::Ordering::Equal => { // We are the next reward cycle, so check if we were registered and have any pending blocks to process match signer { - ConfiguredSigner::RegisteredSigner(signer) => !signer.has_pending_blocks(), + ConfiguredSigner::RegisteredSigner(signer) => { + !signer.has_unprocessed_blocks() + } _ => true, } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2d2e9cc22a7..98037d991a5 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Display; use std::path::Path; use std::time::SystemTime; @@ -22,7 +23,7 @@ use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; -use clarity::types::chainstate::BurnchainHeaderHash; +use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{ @@ -34,7 +35,7 @@ use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMes use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, error}; +use stacks_common::{debug, define_u8_enum, error}; use wsts::net::NonceRequest; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -113,6 +114,49 @@ impl ExtraBlockInfo { } } +define_u8_enum!( +/// Block state relative to the signer's view of the stacks blockchain +BlockState { + /// The block has not yet been processed by the signer + Unprocessed = 0, + /// The block is accepted by the signer but a threshold of signers has not yet signed it + LocallyAccepted = 1, + /// The block is rejected by the signer but a threshold of signers has not accepted/rejected it yet + LocallyRejected = 2, + /// A threshold number of signers have signed the block + GloballyAccepted = 3, + /// A threshold number of signers have rejected the block + GloballyRejected = 4 +}); + +impl TryFrom for BlockState { + type Error = String; + fn try_from(value: u8) -> Result { + let state = match value { + 0 => BlockState::Unprocessed, + 1 => BlockState::LocallyAccepted, + 2 => BlockState::LocallyRejected, + 3 => BlockState::GloballyAccepted, + 4 => BlockState::GloballyRejected, + _ => return Err("Invalid block state".into()), + }; + Ok(state) + } +} + +impl Display for BlockState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = match self { + BlockState::Unprocessed => "Unprocessed", + BlockState::LocallyAccepted => "LocallyAccepted", + BlockState::LocallyRejected => "LocallyRejected", + BlockState::GloballyAccepted => "GloballyAccepted", + BlockState::GloballyRejected => "GloballyRejected", + }; + write!(f, "{}", state) + } +} + /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { @@ -134,6 +178,8 @@ pub struct BlockInfo { pub signed_self: Option, /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) pub signed_group: Option, + /// The block state relative to the signer's view of the stacks blockchain + pub state: BlockState, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } @@ -151,6 +197,7 @@ impl From for BlockInfo { signed_self: None, signed_group: None, ext: ExtraBlockInfo::default(), + state: BlockState::Unprocessed, } } } @@ -163,18 +210,70 @@ impl BlockInfo { block_info } - /// Mark this block as valid, signed over, and record a timestamp in the block info if it wasn't + /// Mark this block as locally accepted, valid, signed over, and records a timestamp in the block info if it wasn't /// already set. - pub fn mark_signed_and_valid(&mut self) { + pub fn mark_locally_accepted(&mut self) -> Result<(), String> { self.valid = Some(true); self.signed_over = true; self.signed_self.get_or_insert(get_epoch_time_secs()); + self.move_to(BlockState::LocallyAccepted) + } + + /// Mark this block as globally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// already set. + pub fn mark_globally_accepted(&mut self) -> Result<(), String> { + self.valid = Some(true); + self.signed_over = true; + self.signed_group.get_or_insert(get_epoch_time_secs()); + self.move_to(BlockState::GloballyAccepted) + } + + /// Mark the block as locally rejected and invalid + pub fn mark_locally_rejected(&mut self) -> Result<(), String> { + self.valid = Some(false); + self.move_to(BlockState::LocallyRejected) + } + + /// Mark the block as globally rejected and invalid + pub fn mark_globally_rejected(&mut self) -> Result<(), String> { + self.valid = Some(false); + self.move_to(BlockState::GloballyRejected) } /// Return the block's signer signature hash pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { self.block.header.signer_signature_hash() } + + /// Check if the block state transition is valid + fn check_state(&self, state: BlockState) -> bool { + let prev_state = &self.state; + match state { + BlockState::Unprocessed => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::LocallyAccepted => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::LocallyRejected => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), + BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), + } + } + + /// Attempt to transition the block state + pub fn move_to(&mut self, state: BlockState) -> Result<(), String> { + if !self.check_state(state) { + return Err(format!( + "Invalid state transition from {} to {state}", + self.state + )); + } + self.state = state; + Ok(()) + } } /// This struct manages a SQLite database connection @@ -197,6 +296,19 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; +static CREATE_BLOCKS_TABLE_2: &str = " +CREATE TABLE IF NOT EXISTS blocks ( + reward_cycle INTEGER NOT NULL, + signer_signature_hash TEXT NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + PRIMARY KEY (reward_cycle, signer_signature_hash) +) STRICT"; + static CREATE_INDEXES_1: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); @@ -204,6 +316,14 @@ CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$. CREATE INDEX IF NOT EXISTS burn_blocks_height ON burn_blocks (block_height); "; +static CREATE_INDEXES_2: &str = r#" +CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); +"#; + +static CREATE_INDEXES_3: &str = r#" +CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -235,18 +355,11 @@ static DROP_SCHEMA_1: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; -static CREATE_BLOCKS_TABLE_2: &str = " -CREATE TABLE IF NOT EXISTS blocks ( - reward_cycle INTEGER NOT NULL, - signer_signature_hash TEXT NOT NULL, - block_info TEXT NOT NULL, - consensus_hash TEXT NOT NULL, - signed_over INTEGER NOT NULL, - broadcasted INTEGER, - stacks_height INTEGER NOT NULL, - burn_block_height INTEGER NOT NULL, - PRIMARY KEY (reward_cycle, signer_signature_hash) -) STRICT"; +static DROP_SCHEMA_2: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" CREATE TABLE IF NOT EXISTS block_signatures ( @@ -260,9 +373,17 @@ CREATE TABLE IF NOT EXISTS block_signatures ( PRIMARY KEY (signature) ) STRICT;"#; -static CREATE_INDEXES_2: &str = r#" -CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); -"#; +static CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL, + -- the signer address that rejected the block + signer_addr TEXT NOT NULL, + PRIMARY KEY (signer_addr) +) STRICT;"#; static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, @@ -286,9 +407,23 @@ static SCHEMA_2: &[&str] = &[ "INSERT INTO db_config (version) VALUES (2);", ]; +static SCHEMA_3: &[&str] = &[ + DROP_SCHEMA_2, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE_2, + CREATE_SIGNER_STATE_TABLE, + CREATE_BLOCK_SIGNATURES_TABLE, + CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE, + CREATE_INDEXES_1, + CREATE_INDEXES_2, + CREATE_INDEXES_3, + "INSERT INTO db_config (version) VALUES (3);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 2; + pub const SCHEMA_VERSION: u32 = 3; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -346,6 +481,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 2 to schema 3 + fn schema_3_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 3 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_3.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -356,7 +505,8 @@ impl SignerDb { match version { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, - 2 => break, + 2 => Self::schema_3_migration(&sql_tx)?, + 3 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -438,6 +588,34 @@ impl SignerDb { try_deserialize(result) } + /// Return the last accepted block in a tenure (identified by its consensus hash). + pub fn get_last_accepted_block( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; + let args = params![ + tenure, + &BlockState::GloballyAccepted.to_string(), + &BlockState::LocallyAccepted.to_string() + ]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + + /// Return the last globally accepted block in a tenure (identified by its consensus hash). + pub fn get_last_globally_accepted_block( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC LIMIT 1"; + let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Insert or replace a burn block into the database pub fn insert_burn_block( &mut self, @@ -491,7 +669,6 @@ impl SignerDb { .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; - debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -516,11 +693,17 @@ impl SignerDb { Ok(()) } - /// Determine if there are any pending blocks that have not yet been processed by checking the block_info.valid field - pub fn has_pending_blocks(&self, reward_cycle: u64) -> Result { - let query = "SELECT block_info FROM blocks WHERE reward_cycle = ? AND json_extract(block_info, '$.valid') IS NULL LIMIT 1"; - let result: Option = - query_row(&self.db, query, params!(&u64_to_sql(reward_cycle)?))?; + /// Determine if there are any unprocessed blocks + pub fn has_unprocessed_blocks(&self, reward_cycle: u64) -> Result { + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND json_extract(block_info, '$.state') = ?2 LIMIT 1"; + let result: Option = query_row( + &self.db, + query, + params!( + &u64_to_sql(reward_cycle)?, + &BlockState::Unprocessed.to_string() + ), + )?; Ok(result.is_some()) } @@ -559,6 +742,34 @@ impl SignerDb { .collect() } + /// Record an observed block rejection_signature + pub fn add_block_rejection_signer_addr( + &self, + block_sighash: &Sha512Trunc256Sum, + addr: &StacksAddress, + ) -> Result<(), DBError> { + let qry = "INSERT OR REPLACE INTO block_rejection_signer_addrs (signer_signature_hash, signer_addr) VALUES (?1, ?2);"; + let args = params![block_sighash, addr.to_string(),]; + + debug!("Inserting block rejection."; + "block_sighash" => %block_sighash, + "signer_address" => %addr); + + self.db.execute(qry, args)?; + Ok(()) + } + + /// Get all signer addresses that rejected the block + pub fn get_block_rejection_signer_addrs( + &self, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = + "SELECT signer_addr FROM block_rejection_signer_addrs WHERE signer_signature_hash = ?1"; + let args = params![block_sighash]; + query_rows(&self.db, qry, args) + } + /// Mark a block as having been broadcasted pub fn set_block_broadcasted( &self, @@ -592,6 +803,18 @@ impl SignerDb { } Ok(u64::try_from(broadcasted).ok()) } + + /// Get the current state of a given block in the database + pub fn get_block_state( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; + let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; + let state: Option = query_row(&self.db, qry, args)?; + try_deserialize(state) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -759,7 +982,9 @@ mod tests { .unwrap() .is_none()); - block_info.mark_signed_and_valid(); + block_info + .mark_locally_accepted() + .expect("Failed to mark block as locally accepted"); db.insert_block(&block_info).unwrap(); let fetched_info = db @@ -824,7 +1049,7 @@ mod tests { } #[test] - fn test_has_pending_blocks() { + fn test_has_unprocessed_blocks() { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (mut block_info_1, _block_proposal) = create_block_override(|b| { @@ -841,21 +1066,27 @@ mod tests { db.insert_block(&block_info_2) .expect("Unable to insert block into db"); - assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); - block_info_1.valid = Some(true); + block_info_1.state = BlockState::LocallyRejected; db.insert_block(&block_info_1) .expect("Unable to update block in db"); - assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); - block_info_2.valid = Some(true); + block_info_2.state = BlockState::LocallyAccepted; db.insert_block(&block_info_2) .expect("Unable to update block in db"); - assert!(!db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(!db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); } #[test] diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d0c7f1d9f3f..1e12eeee5a8 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -229,7 +229,7 @@ fn reorg_timing_testing( reward_cycle: 1, }; let mut block_info_1 = BlockInfo::from(block_proposal_1); - block_info_1.mark_signed_and_valid(); + block_info_1.mark_locally_accepted().unwrap(); signer_db.insert_block(&block_info_1).unwrap(); let sortition_time = SystemTime::UNIX_EPOCH diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f51..3f99860ae29 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,14 +16,17 @@ use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::sync::mpsc::Sender; -use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::net::api::postblock_proposal::{ + BlockValidateOk, BlockValidateReject, BlockValidateResponse, +}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, + SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; @@ -37,7 +40,7 @@ use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; -use crate::signerdb::{BlockInfo, SignerDb}; +use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; /// The stacks signer registered for the reward cycle @@ -128,10 +131,7 @@ impl SignerTrait for Signer { let SignerMessage::BlockResponse(block_response) = message else { continue; }; - let BlockResponse::Accepted((block_hash, signature)) = block_response else { - continue; - }; - self.handle_block_signature(stacks_client, block_hash, signature); + self.handle_block_response(stacks_client, block_response); } } SignerEvent::MinerMessages(messages, miner_pubkey) => { @@ -217,9 +217,9 @@ impl SignerTrait for Signer { } } - fn has_pending_blocks(&self) -> bool { + fn has_unprocessed_blocks(&self) -> bool { self.signer_db - .has_pending_blocks(self.reward_cycle) + .has_unprocessed_blocks(self.reward_cycle) .unwrap_or_else(|e| { error!("{self}: Failed to check for pending blocks: {e:?}",); // Assume we have pending blocks to prevent premature cleanup @@ -300,6 +300,8 @@ impl Signer { BlockResponse::rejected( block_info.signer_signature_hash(), RejectCode::RejectedInPriorRound, + &self.private_key, + self.mainnet, ) }; Some(response) @@ -389,6 +391,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::ConnectivityIssues, + &self.private_key, + self.mainnet, )) } // Block proposal is bad @@ -401,6 +405,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::SortitionViewMismatch, + &self.private_key, + self.mainnet, )) } // Block proposal passed check, still don't know if valid @@ -415,6 +421,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::NoSortitionView, + &self.private_key, + self.mainnet, )) }; @@ -448,6 +456,104 @@ impl Signer { .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } + /// Handle block response messages from a signer + fn handle_block_response( + &mut self, + stacks_client: &StacksClient, + block_response: &BlockResponse, + ) { + match block_response { + BlockResponse::Accepted((block_hash, signature)) => { + self.handle_block_signature(stacks_client, block_hash, signature); + } + BlockResponse::Rejected(block_rejection) => { + self.handle_block_rejection(block_rejection); + } + } + } + /// Handle the block validate ok response. Returns our block response if we have one + fn handle_block_validate_ok( + &mut self, + stacks_client: &StacksClient, + block_validate_ok: &BlockValidateOk, + ) -> Option { + crate::monitoring::increment_block_validation_responses(true); + let signer_signature_hash = block_validate_ok.signer_signature_hash; + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return None; + } + }; + if let Err(e) = block_info.mark_locally_accepted() { + warn!("{self}: Failed to mark block as locally accepted: {e:?}",); + return None; + } + let signature = self + .private_key + .sign(&signer_signature_hash.0) + .expect("Failed to sign block"); + + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + // have to save the signature _after_ the block info + self.handle_block_signature( + stacks_client, + &block_info.signer_signature_hash(), + &signature, + ); + Some(BlockResponse::accepted(signer_signature_hash, signature)) + } + + /// Handle the block validate reject response. Returns our block response if we have one + fn handle_block_validate_reject( + &mut self, + block_validate_reject: &BlockValidateReject, + ) -> Option { + crate::monitoring::increment_block_validation_responses(false); + let signer_signature_hash = block_validate_reject.signer_signature_hash; + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}"); + return None; + } + }; + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + return None; + } + let block_rejection = BlockRejection::from_validate_rejection( + block_validate_reject.clone(), + &self.private_key, + self.mainnet, + ); + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + self.handle_block_rejection(&block_rejection); + Some(BlockResponse::Rejected(block_rejection)) + } + /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response( &mut self, @@ -455,68 +561,20 @@ impl Signer { block_validate_response: &BlockValidateResponse, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); - let (response, block_info, signature_opt) = match block_validate_response { + let block_response = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { - crate::monitoring::increment_block_validation_responses(true); - let signer_signature_hash = block_validate_ok.signer_signature_hash; - // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } - }; - block_info.mark_signed_and_valid(); - let signature = self - .private_key - .sign(&signer_signature_hash.0) - .expect("Failed to sign block"); - - ( - BlockResponse::accepted(signer_signature_hash, signature), - block_info, - Some(signature.clone()), - ) + self.handle_block_validate_ok(stacks_client, block_validate_ok) } BlockValidateResponse::Reject(block_validate_reject) => { - crate::monitoring::increment_block_validation_responses(false); - let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } - }; - block_info.valid = Some(false); - ( - BlockResponse::from(block_validate_reject.clone()), - block_info, - None, - ) + self.handle_block_validate_reject(block_validate_reject) } }; + let Some(response) = block_response else { + return; + }; // Submit a proposal response to the .signers contract for miners info!( "{self}: Broadcasting a block response to stacks node: {response:?}"; - "signer_sighash" => %block_info.signer_signature_hash(), ); match self .stackerdb @@ -530,18 +588,6 @@ impl Signer { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } } - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - - if let Some(signature) = signature_opt { - // have to save the signature _after_ the block info - self.handle_block_signature( - stacks_client, - &block_info.signer_signature_hash(), - &signature, - ); - } } /// Compute the signing weight, given a list of signatures @@ -567,6 +613,99 @@ impl Signer { .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) } + /// Handle an observed rejection from another signer + fn handle_block_rejection(&mut self, rejection: &BlockRejection) { + debug!("{self}: Received a block-reject signature: {rejection:?}"); + + let block_hash = &rejection.signer_signature_hash; + let signature = &rejection.signature; + + let mut block_info = match self.signer_db.block_lookup(self.reward_cycle, block_hash) { + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block rejection for a block that is already marked as {}. Ignoring...", block_info.state); + return; + } + block_info + } + Ok(None) => { + debug!("{self}: Received block rejection for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + warn!("{self}: Failed to load block state: {e:?}",); + return; + } + }; + + // recover public key + let Ok(public_key) = rejection.recover_public_key() else { + debug!("{self}: Received block rejection with an unrecovarable signature. Will not store."; + "block_hash" => %block_hash, + "signature" => %signature + ); + return; + }; + + let signer_address = StacksAddress::p2pkh(self.mainnet, &public_key); + + // authenticate the signature -- it must be signed by one of the stacking set + let is_valid_sig = self + .signer_addresses + .iter() + .find(|addr| { + // it only matters that the address hash bytes match + signer_address.bytes == addr.bytes + }) + .is_some(); + + if !is_valid_sig { + debug!("{self}: Receive block rejection with an invalid signature. Will not store."; + "block_hash" => %block_hash, + "signature" => %signature + ); + return; + } + + // signature is valid! store it + if let Err(e) = self + .signer_db + .add_block_rejection_signer_addr(block_hash, &signer_address) + { + warn!("{self}: Failed to save block rejection signature: {e:?}",); + } + + // do we have enough signatures to mark a block a globally rejected? + // i.e. is (set-size) - (threshold) + 1 reached. + let rejection_addrs = match self.signer_db.get_block_rejection_signer_addrs(block_hash) { + Ok(addrs) => addrs, + Err(e) => { + warn!("{self}: Failed to load block rejection addresses: {e:?}.",); + return; + } + }; + let total_reject_weight = self.compute_signature_signing_weight(rejection_addrs.iter()); + let total_weight = self.compute_signature_total_weight(); + + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) + .unwrap_or_else(|_| { + panic!("{self}: Failed to compute threshold weight for {total_weight}") + }); + if total_reject_weight.saturating_add(min_weight) <= total_weight { + // Not enough rejection signatures to make a decision + return; + } + debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + if let Err(e) = block_info.mark_globally_rejected() { + warn!("{self}: Failed to mark block as globally rejected: {e:?}",); + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state: {e:?}",); + } + } + /// Handle an observed signature from another signer fn handle_block_signature( &mut self, @@ -574,26 +713,27 @@ impl Signer { block_hash: &Sha512Trunc256Sum, signature: &MessageSignature, ) { - if !self.broadcast_signed_blocks { - debug!("{self}: Will ignore block-accept signature, since configured not to broadcast signed blocks"); - return; - } - debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); - // have we broadcasted before? - if let Some(ts) = self + // Have we already processed this block? + match self .signer_db - .get_block_broadcasted(self.reward_cycle, block_hash) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }) + .get_block_state(self.reward_cycle, block_hash) { - debug!( - "{self}: have already broadcasted block {} at {}, so will not re-attempt", - block_hash, ts - ); - return; + Ok(Some(state)) => { + if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { + debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); + return; + } + } + Ok(None) => { + debug!("{self}: Received block signature for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + warn!("{self}: Failed to load block state: {e:?}",); + return; + } } // recover public key @@ -611,7 +751,7 @@ impl Signer { .signer_addresses .iter() .find(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); // it only matters that the address hash bytes match stacker_address.bytes == addr.bytes @@ -676,9 +816,11 @@ impl Signer { warn!("{self}: No such block {block_hash}"); return; }; - - // record time at which we reached the threshold - block_info.signed_group = Some(get_epoch_time_secs()); + // move block to globally accepted state. If this is not possible, we have a bug in our block handling logic. + if let Err(e) = block_info.mark_globally_accepted() { + // Do not abort as we should still try to store the block signature threshold + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + } let _ = self.signer_db.insert_block(&block_info).map_err(|e| { warn!( "Failed to set group threshold signature timestamp for {}: {:?}", @@ -687,6 +829,25 @@ impl Signer { e }); + if self.broadcast_signed_blocks { + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); + } else { + debug!( + "{self}: Not broadcasting signed block {block_hash} since broadcast_signed_blocks is false"; + "stacks_block_id" => %block_info.block.block_id(), + "parent_block_id" => %block_info.block.header.parent_block_id, + "burnchain_consensus_hash" => %block_info.block.header.consensus_hash + ); + } + } + + fn broadcast_signed_block( + &self, + stacks_client: &StacksClient, + mut block: NakamotoBlock, + addrs_to_sigs: &HashMap, + ) { + let block_hash = block.header.signer_signature_hash(); // collect signatures for the block let signatures: Vec<_> = self .signer_addresses @@ -694,30 +855,30 @@ impl Signer { .filter_map(|addr| addrs_to_sigs.get(addr).cloned()) .collect(); - let mut block = block_info.block; + block.header.signer_signature_hash(); block.header.signer_signature = signatures; debug!( "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - let broadcasted = stacks_client - .post_block(&block) - .map_err(|e| { - warn!( - "{self}: Failed to post block {block_hash} (id {}): {e:?}", - &block.block_id() - ); - e - }) - .is_ok(); - if broadcasted { - self.signer_db - .set_block_broadcasted(self.reward_cycle, block_hash, get_epoch_time_secs()) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }); + if let Err(e) = stacks_client.post_block(&block) { + warn!( + "{self}: Failed to post block {block_hash}: {e:?}"; + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, + "burnchain_consensus_hash" => %block.header.consensus_hash + ); + return; + } + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } } diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index fca9282ec53..08ccde5a920 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -273,9 +273,9 @@ impl SignerTrait for Signer { self.process_next_command(stacks_client, current_reward_cycle); } - fn has_pending_blocks(&self) -> bool { + fn has_unprocessed_blocks(&self) -> bool { self.signer_db - .has_pending_blocks(self.reward_cycle) + .has_unprocessed_blocks(self.reward_cycle) .unwrap_or_else(|e| { error!("{self}: Failed to check if there are pending blocks: {e:?}"); // Assume there are pending blocks to prevent premature cleanup diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9f30ea2908d..97b612747c7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -89,7 +89,7 @@ use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; -use stacks_signer::signerdb::{BlockInfo, ExtraBlockInfo, SignerDb}; +use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -5498,6 +5498,7 @@ fn signer_chainstate() { signed_self: None, signed_group: None, ext: ExtraBlockInfo::None, + state: BlockState::Unprocessed, }) .unwrap(); @@ -5575,6 +5576,7 @@ fn signer_chainstate() { signed_self: None, signed_group: None, ext: ExtraBlockInfo::None, + state: BlockState::Unprocessed, }) .unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5a0c294329b..1a5baefef98 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -501,6 +501,7 @@ fn block_proposal_rejection() { reason: _reason, reason_code, signer_signature_hash, + .. })) = message { if signer_signature_hash == block_signer_signature_hash_1 { From 9eef4066cce42414a0d0690de4da23027b8292ba Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:49:59 -0400 Subject: [PATCH 425/910] chore: remove spurious deadlock condition arising from needlessly opening a transaction whenever we open the sortition DB --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 15a3bf56416..b538ae17fab 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3456,6 +3456,14 @@ impl SortitionDB { SortitionDB::apply_schema_9(&tx.deref(), epochs)?; tx.commit()?; } else if version == expected_version { + // this transaction is almost never needed + let validated_epochs = StacksEpoch::validate_epochs(epochs); + let existing_epochs = Self::get_stacks_epochs(self.conn())?; + if existing_epochs == validated_epochs { + return Ok(()); + } + + // epochs are out of date let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; tx.commit()?; From 33ec49af9906a97a7c820da92b84bd05f84a179b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 13:45:14 -0400 Subject: [PATCH 426/910] Fix miners to not accept multiple messages from the same signer for the same block Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 176 ++++++++++-------- 1 file changed, 96 insertions(+), 80 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index d2c4f2b3900..8104d2ebd24 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -722,6 +722,7 @@ impl SignCoordinator { let mut total_weight_signed: u32 = 0; let mut total_reject_weight: u32 = 0; + let mut responded_signers = HashSet::new(); let mut gathered_signatures = BTreeMap::new(); info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; @@ -800,24 +801,108 @@ impl SignCoordinator { ); for (message, slot_id) in messages.into_iter().zip(slot_ids) { - let (response_hash, signature) = match message { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + + if responded_signers.contains(&signer_pubkey) { + debug!( + "Signer {slot_id} already responded for block {}. Ignoring {message:?}.", block.header.signer_signature_hash(); + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + continue; + } + + match message { SignerMessageV0::BlockResponse(BlockResponse::Accepted(( response_hash, signature, - ))) => (response_hash, signature), - SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); + ))) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature for a different block. Will try to continue."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, + "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + "response_hash" => %response_hash + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; }; - if rejected_data.signer_signature_hash - != block.header.signer_signature_hash() - { - debug!("Received rejected block response for a block besides my own. Ignoring."); + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + + if Self::fault_injection_ignore_signatures() { + warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); continue; } + info!("SignCoordinator: Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + gathered_signatures.insert(slot_id, signature); + responded_signers.insert(signer_pubkey); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let rejected_pubkey = match rejected_data.recover_public_key() { + Ok(rejected_pubkey) => { + if rejected_pubkey != signer_pubkey { + warn!("Recovered public key from rejected data does not match signer's public key. Ignoring."); + continue; + } + rejected_pubkey + } + Err(e) => { + warn!("Failed to recover public key from rejected data: {e:?}. Ignoring."); + continue; + } + }; + responded_signers.insert(rejected_pubkey); debug!( "Signer {} rejected our block {}/{}", slot_id, @@ -858,75 +943,6 @@ impl SignCoordinator { continue; } }; - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature for a different block. Will try to continue."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "response_hash" => %response_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - - if Self::fault_injection_ignore_signatures() { - warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - info!("SignCoordinator: Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - gathered_signatures.insert(slot_id, signature); } // After gathering all signatures, return them if we've hit the threshold From afdaaf6b04d72b1d3c58f42bf7644b975ef1ded1 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:47:07 -0700 Subject: [PATCH 427/910] Add deadlock fix addtion to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2add5b99f65..5302fc60c26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Fix block proposal rejection test (#5084) - Mock signing revamp (#5070) - Multi miner fixes jude (#5040) +- Remove spurious deadlock condition whenever the sortition DB is opened ## [2.5.0.0.6] From cd8d5d4c5969b2591431c490f13bc1f1b09ed6e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 14:54:35 -0400 Subject: [PATCH 428/910] feat: add /v3/tenures/tip/{:consensus_hash} for getting the highest block header in a tenure --- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- stackslib/src/net/api/gettenuretip.rs | 184 ++++++++++++++++++++++ stackslib/src/net/api/mod.rs | 2 + 3 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 stackslib/src/net/api/gettenuretip.rs diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e8b5c7bb41b..ed3158c7612 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -160,7 +160,7 @@ pub struct MinerPaymentSchedule { pub vtxindex: u32, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum StacksBlockHeaderTypes { Epoch2(StacksBlockHeader), Nakamoto(NakamotoBlockHeader), diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs new file mode 100644 index 00000000000..328aafda4d8 --- /dev/null +++ b/stackslib/src/net/api/gettenuretip.rs @@ -0,0 +1,184 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use {serde, serde_json}; + +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCNakamotoTenureTipRequestHandler { + consensus_hash: Option, +} + +impl RPCNakamotoTenureTipRequestHandler { + pub fn new() -> Self { + Self { + consensus_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoTenureTipRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/tenures/tip/(?P[0-9a-f]{40})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/tenures/tip/:consensus_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + let consensus_hash = request::get_consensus_hash(captures, "consensus_hash")?; + self.consensus_hash = Some(consensus_hash); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoTenureTipRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.consensus_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let consensus_hash = self + .consensus_hash + .take() + .ok_or(NetError::SendError("`consensus_hash` not set".into()))?; + + let tenure_tip_resp = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + let header_info = match NakamotoChainState::get_highest_known_block_header_in_tenure(chainstate.db(), &consensus_hash) { + Ok(Some(header)) => header, + Ok(None) => { + let msg = format!( + "No blocks in tenure {}", + &consensus_hash + ); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + Ok(header_info.anchored_header) + }); + + let tenure_tip = match tenure_tip_resp { + Ok(tenure_tip) => tenure_tip, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&tenure_tip)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoTenureTipRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenure_tip: StacksBlockHeaderTypes = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenure_tip)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_tenure_tip(host: PeerHost, consensus_hash: &ConsensusHash) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/tenures/tip/{}", consensus_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_tenure_tip(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let tenure_tip: StacksBlockHeaderTypes = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(tenure_tip) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 5bbc6281a24..0246ac31524 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -62,6 +62,7 @@ pub mod getstackers; pub mod getstxtransfercost; pub mod gettenure; pub mod gettenureinfo; +pub mod gettenuretip; pub mod gettransaction_unconfirmed; pub mod liststackerdbreplicas; pub mod postblock; @@ -120,6 +121,7 @@ impl StacksHttp { self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); + self.register_rpc_endpoint(gettenuretip::RPCNakamotoTenureTipRequestHandler::new()); self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), From b675546d0a1fb93a8d41406cd14d017b61c5da4d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 15:49:56 -0400 Subject: [PATCH 429/910] test: fix `follower_bootup` integration test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0eaca052457..4839bee3be8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3111,6 +3111,7 @@ fn follower_bootup() { wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); + follower_conf.node.miner = false; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; From f5f6b99f30ca6b576778c499817b197eb4c9f386 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 16:03:51 -0400 Subject: [PATCH 430/910] test: fix mistake on last update to `multiple_miners` test --- testnet/stacks-node/src/tests/signer/v0.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a0a5082b28a..4d6ba5b8069 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1561,7 +1561,10 @@ fn multiple_miners() { let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + btc_blocks_mined - 1 + ); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From 0ec5688fcb2e92745badadff4b93eab24d9c816f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 16:55:52 -0400 Subject: [PATCH 431/910] WIP: broken check_proposal reorg timing test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 79 +++++++++++---- stacks-signer/src/client/mod.rs | 7 ++ stacks-signer/src/client/stacks_client.rs | 59 ++++++++++- stacks-signer/src/tests/chainstate.rs | 97 ++++++++++++++----- stacks-signer/src/v0/signer.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 94 +++++++++++++++--- 6 files changed, 280 insertions(+), 59 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index adfea4900d5..b7a8272040a 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -27,7 +27,7 @@ use stacks_common::{info, warn}; use crate::client::{ClientError, StacksClient}; use crate::config::SignerConfig; -use crate::signerdb::SignerDb; +use crate::signerdb::{BlockState, SignerDb}; #[derive(thiserror::Error, Debug)] /// Error type for the signer chainstate module @@ -185,9 +185,10 @@ impl SortitionsView { pub fn check_proposal( &mut self, client: &StacksClient, - signer_db: &SignerDb, + signer_db: &mut SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, + reward_cycle: u64, ) -> Result { if self .cur_sortition @@ -284,6 +285,7 @@ impl SortitionsView { &proposed_by, tenure_change, block, + reward_cycle, signer_db, client, )? { @@ -434,21 +436,56 @@ impl SortitionsView { fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, - signer_db: &SignerDb, + reward_cycle: u64, + signer_db: &mut SignerDb, + client: &StacksClient, ) -> Result { - let Some(last_globally_accepted_block) = signer_db + // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last globally accepted block in the parent tenure. + let last_globally_accepted_block = signer_db .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))? - else { - info!( - "Have no globally accepted blocks in the parent tenure, assuming block confirmation is correct"; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "tenure" => %block.header.consensus_hash, - ); - return Ok(true); + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + + if let Some(global_info) = last_globally_accepted_block { + if block.header.chain_length <= global_info.block.header.chain_length { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => global_info.block.header.chain_length + 1, + ); + return Ok(false); + } + } + + let tip = match client.get_tenure_tip(&tenure_change.prev_tenure_consensus_hash) { + Ok(tip) => tip, + Err(e) => { + warn!( + "Miner block proposal contains a tenure change, but failed to fetch the tenure tip for the parent tenure: {e:?}. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %tenure_change.prev_tenure_consensus_hash, + ); + return Ok(false); + } }; - if block.header.chain_length > last_globally_accepted_block.block.header.chain_length { + if let Some(nakamoto_tip) = tip.as_stacks_nakamoto() { + // If we have seen this block already, make sure its state is updated to globally accepted + if let Ok(Some(mut block_info)) = + signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) + { + if block_info.state != BlockState::GloballyAccepted { + if let Err(e) = block_info.mark_globally_accepted() { + warn!("Failed to update block info in db: {e}"); + } else if let Err(e) = signer_db.insert_block(&block_info) { + warn!("Failed to update block info in db: {e}"); + } + } + } + } + let tip_height = tip.height(); + if block.header.chain_length > tip_height { Ok(true) } else { warn!( @@ -456,7 +493,7 @@ impl SortitionsView { "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, - "expected_at_least" => last_globally_accepted_block.block.header.chain_length + 1, + "expected_at_least" => tip_height + 1, ); Ok(false) } @@ -471,12 +508,18 @@ impl SortitionsView { proposed_by: &ProposedBy, tenure_change: &TenureChangePayload, block: &NakamotoBlock, - signer_db: &SignerDb, + reward_cycle: u64, + signer_db: &mut SignerDb, client: &StacksClient, ) -> Result { // Ensure that the tenure change block confirms the expected parent block - let confirms_expected_parent = - Self::check_tenure_change_confirms_parent(tenure_change, block, signer_db)?; + let confirms_expected_parent = Self::check_tenure_change_confirms_parent( + tenure_change, + block, + reward_cycle, + signer_db, + client, + )?; if !confirms_expected_parent { return Ok(false); } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 32951d7990e..b32f465b11d 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -123,6 +123,7 @@ pub(crate) mod tests { use std::net::{SocketAddr, TcpListener}; use blockstack_lib::chainstate::stacks::boot::POX_4_NAME; + use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::{ @@ -597,4 +598,10 @@ pub(crate) mod tests { let clarity_value = ClarityValue::UInt(threshold as u128); build_read_only_response(&clarity_value) } + + pub fn build_get_tenure_tip_response(header_types: &StacksBlockHeaderTypes) -> String { + let response_json = + serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); + format!("HTTP/1.1 200 OK\n\n{response_json}") + } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914bd..05c3b0f1564 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -21,6 +21,7 @@ use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, TransactionPostConditionMode, @@ -139,6 +140,28 @@ impl StacksClient { &self.stacks_address } + /// Get the stacks tip header of the tenure given its consensus hash + pub fn get_tenure_tip( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.tenure_tip_path(consensus_hash)) + .send() + .map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + /// Retrieve the signer slots stored within the stackerdb contract pub fn get_stackerdb_signer_slots( &self, @@ -826,6 +849,10 @@ impl StacksClient { format!("{}/v2/fees/transaction", self.http_origin) } + fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { + format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) + } + /// Helper function to create a stacks transaction for a modifying contract call #[allow(clippy::too_many_arguments)] pub fn build_unsigned_contract_call_transaction( @@ -893,12 +920,16 @@ mod tests { use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, }; + use clarity::types::chainstate::{StacksBlockId, TrieHash}; + use clarity::util::hash::Sha512Trunc256Sum; + use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{ ListData, ListTypeData, ResponseData, SequenceData, TupleData, TupleTypeSignature, TypeSignature, }; use rand::thread_rng; use rand_core::RngCore; + use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; use wsts::curve::scalar::Scalar; @@ -907,8 +938,9 @@ mod tests { build_account_nonce_response, build_get_approved_aggregate_key_response, build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_tenure_tip_response, build_get_vote_for_aggregate_key_response, + build_get_weight_threshold_response, build_read_only_response, write_response, + MockServerClient, }; #[test] @@ -1542,4 +1574,27 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), estimate); } + + #[test] + fn get_tenure_tip_should_succeed() { + let mock = MockServerClient::new(); + let consensus_hash = ConsensusHash([15; 20]); + let header = StacksBlockHeaderTypes::Nakamoto(NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([15; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }); + let response = build_get_tenure_tip_response(&header); + let h = spawn(move || mock.client.get_tenure_tip(&consensus_hash)); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), header); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 1e12eeee5a8..41f493ed57e 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -18,6 +18,7 @@ use std::net::{Ipv4Addr, SocketAddrV4}; use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ CoinbasePayload, SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionAuth, @@ -124,33 +125,45 @@ fn setup_test_environment( #[test] fn check_proposal_units() { - let (stacks_client, signer_db, block_pk, mut view, block) = + let (stacks_client, mut signer_db, block_pk, mut view, block) = setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } #[test] fn check_proposal_miner_pkh_mismatch() { - let (stacks_client, signer_db, _block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, _block_pk, mut view, mut block) = setup_test_environment("miner_pkh_mismatch"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &block, + &different_block_pk, + 1 + ) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &block, + &different_block_pk, + 1 + ) .unwrap()); } @@ -158,6 +171,7 @@ fn reorg_timing_testing( test_name: &str, first_proposal_burn_block_timing_secs: u64, sortition_timing_secs: u64, + check_tip: bool, ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); @@ -228,6 +242,7 @@ fn reorg_timing_testing( burn_height: 2, reward_cycle: 1, }; + let mut header_clone = block_proposal_1.block.header.clone(); let mut block_info_1 = BlockInfo::from(block_proposal_1); block_info_1.mark_locally_accepted().unwrap(); signer_db.insert_block(&block_info_1).unwrap(); @@ -238,13 +253,27 @@ fn reorg_timing_testing( .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) .unwrap(); - let MockServerClient { server, client, .. } = MockServerClient::new(); - let h = std::thread::spawn(move || view.check_proposal(&client, &signer_db, &block, &block_pk)); + let MockServerClient { + mut server, + client, + config, + } = MockServerClient::new(); + let h = std::thread::spawn(move || { + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) + }); crate::client::tests::write_response( server, format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), ); + if check_tip { + server = crate::client::tests::mock_server_from_config(&config); + header_clone.chain_length -= 1; + let response = crate::client::tests::build_get_tenure_tip_response( + &StacksBlockHeaderTypes::Nakamoto(header_clone), + ); + crate::client::tests::write_response(server, response.as_bytes()); + } let result = h.join().unwrap(); info!("Result: {result:?}"); @@ -253,32 +282,32 @@ fn reorg_timing_testing( #[test] fn check_proposal_reorg_timing_bad() { - let result = reorg_timing_testing("reorg_timing_bad", 30, 31); + let result = reorg_timing_testing("reorg_timing_bad", 30, 31, false); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } #[test] fn check_proposal_reorg_timing_ok() { - let result = reorg_timing_testing("reorg_timing_okay", 30, 30); + let result = reorg_timing_testing("reorg_timing_okay", 30, 30, true); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } #[test] fn check_proposal_invalid_status() { - let (stacks_client, signer_db, block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -289,7 +318,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } @@ -328,7 +357,7 @@ fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { #[test] fn check_proposal_tenure_extend_invalid_conditions() { - let (stacks_client, signer_db, block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment("tenure_extend"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let mut extend_payload = make_tenure_change_payload(); @@ -338,7 +367,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -348,7 +377,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } @@ -370,21 +399,45 @@ fn check_block_proposal_timeout() { .unwrap(); assert!(view - .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &curr_sortition_block, + &block_pk, + 1 + ) .unwrap()); assert!(!view - .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &last_sortition_block, + &block_pk, + 1 + ) .unwrap()); // Sleep a bit to time out the block proposal std::thread::sleep(Duration::from_secs(5)); assert!(!view - .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &curr_sortition_block, + &block_pk, + 1 + ) .unwrap()); assert!(view - .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &last_sortition_block, + &block_pk, + 1 + ) .unwrap()); } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 3f99860ae29..c71a2ff6373 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -377,9 +377,10 @@ impl Signer { let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( stacks_client, - &self.signer_db, + &mut self.signer_db, &block_proposal.block, miner_pubkey, + self.reward_cycle, ) { // Error validating block Err(e) => { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 97b612747c7..1b3bf397ab3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5427,6 +5427,13 @@ fn signer_chainstate() { ) .unwrap(); + let reward_cycle = burnchain + .block_height_to_reward_cycle( + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height, + ) + .unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), @@ -5441,7 +5448,13 @@ fn signer_chainstate() { last_tenures_proposals { let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, prior_tenure_first, miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + prior_tenure_first, + miner_pk, + reward_cycle, + ) .unwrap(); assert!( !valid, @@ -5449,7 +5462,13 @@ fn signer_chainstate() { ); for block in prior_tenure_interims.iter() { let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, block, miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + block, + miner_pk, + reward_cycle, + ) .unwrap(); assert!( !valid, @@ -5472,20 +5491,26 @@ fn signer_chainstate() { thread::sleep(Duration::from_secs(1)); }; + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) + .check_proposal( + &signer_client, + &mut signer_db, + &proposal.0, + &proposal.1, + reward_cycle, + ) .unwrap(); assert!( valid, "Nakamoto integration test produced invalid block proposal" ); - let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height; - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_block_height) - .unwrap(); signer_db .insert_block(&BlockInfo { block: proposal.0.clone(), @@ -5531,9 +5556,10 @@ fn signer_chainstate() { let valid = sortitions_view .check_proposal( &signer_client, - &signer_db, + &mut signer_db, &proposal_interim.0, &proposal_interim.1, + reward_cycle, ) .unwrap(); @@ -5548,14 +5574,21 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, - &signer_db, + &mut signer_db, &proposal_interim.0, &proposal_interim.1, + reward_cycle, ) .unwrap(); @@ -5618,10 +5651,21 @@ fn signer_chainstate() { block_proposal_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); - + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5672,7 +5716,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5729,7 +5779,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5788,7 +5844,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); From 29d42a577417e23a96b1c6df97d9508e435708c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:53:36 -0400 Subject: [PATCH 432/910] fix: serde requires an owned string for decoding a hex-encoded BitVec. Also, add serde round-trip test --- stacks-common/src/bitvec.rs | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 792532e135f..6602f62e5c3 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -100,8 +100,8 @@ impl Serialize for BitVec { impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { fn deserialize>(deserializer: D) -> Result { - let hex: &str = Deserialize::deserialize(deserializer)?; - let bytes = hex_bytes(hex).map_err(serde::de::Error::custom)?; + let hex: String = Deserialize::deserialize(deserializer)?; + let bytes = hex_bytes(hex.as_str()).map_err(serde::de::Error::custom)?; Self::consensus_deserialize(&mut bytes.as_slice()).map_err(serde::de::Error::custom) } } @@ -412,4 +412,21 @@ mod test { check_ok_vector(i.as_slice()); } } + + #[test] + fn test_serde() { + let mut bitvec_zero_10 = BitVec::<10>::zeros(10).unwrap(); + bitvec_zero_10.set(0, true).unwrap(); + bitvec_zero_10.set(5, true).unwrap(); + bitvec_zero_10.set(3, true).unwrap(); + assert_eq!( + bitvec_zero_10.binary_str(), + "1001010000", + "Binary string should be 1001010000" + ); + + let serde_bitvec_json = serde_json::to_string(&bitvec_zero_10).unwrap(); + let serde_bitvec: BitVec<10> = serde_json::from_str(&serde_bitvec_json).unwrap(); + assert_eq!(serde_bitvec, bitvec_zero_10); + } } From 39614674b1d72cea2aae67acad3d68d0bb6c9c2b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:54:48 -0400 Subject: [PATCH 433/910] fix: pub(crate) for testing --- stackslib/src/net/api/gettenuretip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs index 328aafda4d8..188fe0dc51b 100644 --- a/stackslib/src/net/api/gettenuretip.rs +++ b/stackslib/src/net/api/gettenuretip.rs @@ -43,7 +43,7 @@ use crate::util_lib::db::{DBConn, Error as DBError}; #[derive(Clone)] pub struct RPCNakamotoTenureTipRequestHandler { - consensus_hash: Option, + pub(crate) consensus_hash: Option, } impl RPCNakamotoTenureTipRequestHandler { From 42bce18c9109f60256e120791f29299d36c28aac Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:55:19 -0400 Subject: [PATCH 434/910] chore: unit tests for /v3/tenures/tip/:consensus_hash --- stackslib/src/net/api/tests/gettenuretip.rs | 142 ++++++++++++++++++++ stackslib/src/net/api/tests/mod.rs | 1 + 2 files changed, 143 insertions(+) create mode 100644 stackslib/src/net/api/tests/gettenuretip.rs diff --git a/stackslib/src/net/api/tests/gettenuretip.rs b/stackslib/src/net/api/tests/gettenuretip.rs new file mode 100644 index 00000000000..15ca3fcb615 --- /dev/null +++ b/stackslib/src/net/api/tests/gettenuretip.rs @@ -0,0 +1,142 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::net::api::tests::TestRPC; +use crate::net::api::{gettenuretip, *}; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_tenure_tip(addr.into(), &ConsensusHash([0x01; 20])); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + + let mut handler = gettenuretip::RPCNakamotoTenureTipRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + assert_eq!(handler.consensus_hash, Some(ConsensusHash([0x01; 20]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing, non-empty Nakamoto tenure + let request = StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &consensus_hash); + requests.push(request); + + // query existing epoch2 tenure + let all_sortitions = rpc_test.peer_1.sortdb().get_all_snapshots().unwrap(); + assert!(all_sortitions.len() > 30); + assert!(all_sortitions[30].sortition); + let epoch2_consensus_hash = all_sortitions[30].consensus_hash.clone(); + + let request = + StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &epoch2_consensus_hash); + requests.push(request); + + // query non-existant tenure + let request = + StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &ConsensusHash([0x01; 20])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the Nakamoto tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_tenure_tip().unwrap(); + assert_eq!( + resp.as_stacks_nakamoto().unwrap().consensus_hash, + consensus_hash + ); + assert_eq!( + resp.as_stacks_nakamoto().unwrap().block_id(), + nakamoto_chain_tip + ); + + // got an epoch2 block + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_tenure_tip().unwrap(); + let block_header = resp.as_stacks_epoch2().unwrap(); + assert_eq!( + block_header.block_hash(), + all_sortitions[30].winning_stacks_block_hash + ); + + // got a failure + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index b02bb53bb80..ded0360555a 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -79,6 +79,7 @@ mod getstackerdbmetadata; mod getstxtransfercost; mod gettenure; mod gettenureinfo; +mod gettenuretip; mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; From f34c74144cf1089239de21571a4073d170813895 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 18:03:45 -0400 Subject: [PATCH 435/910] fix: fix block proposal integration test --- .../src/tests/nakamoto_integrations.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9f30ea2908d..dd5f84516e1 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2501,13 +2501,23 @@ fn block_proposal_api_endpoint() { ), ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( - "Corrupted (bit flipped after signing)", + "Non-canonical or absent tenure", (|| { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp })(), HTTP_ACCEPTED, + Some(Err(ValidateRejectCode::NonCanonicalTenure)), + ), + ( + "Corrupted (bit flipped after signing)", + (|| { + let mut sp = sign(&proposal); + sp.block.header.timestamp ^= 0x07; + sp + })(), + HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( @@ -2624,6 +2634,10 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { + info!( + "Received response {:?}, expecting {:?}", + &response, &expected_response + ); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); From 3047b5f10f9aa1e0ea0464433956ff6455574122 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 18:10:48 -0400 Subject: [PATCH 436/910] chore: address PR feedback --- libsigner/src/v0/messages.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index b82ee3bab24..d5f5fe63c45 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -926,7 +926,7 @@ mod test { Sha512Trunc256Sum([0u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -937,7 +937,7 @@ mod test { Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues, &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -958,7 +958,7 @@ mod test { Sha512Trunc256Sum([1u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) From 5487b6594001adcecb849d6152ea4894bca0c892 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 21:12:43 -0400 Subject: [PATCH 437/910] chore: fix get_block_state() and add a unit test --- stacks-signer/src/chainstate.rs | 10 +++++++++ stacks-signer/src/signerdb.rs | 36 +++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index b7a8272040a..bbbe741d8b0 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -433,6 +433,12 @@ impl SortitionsView { } /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + /// It checks the local DB first, and if the block is not present in the local DB, it asks the + /// Stacks node for the highest processed block header in the given tenure (and then caches it + /// in the DB). + /// + /// The rationale here is that the signer DB can be out-of-sync with the node. For example, + /// the signer may have been added to an already-running node. fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, @@ -446,6 +452,10 @@ impl SortitionsView { .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; if let Some(global_info) = last_globally_accepted_block { + // N.B. this block might not be the last globally accepted block across the network; + // it's just the highest one in this tenure that we know about. If this given block is + // no higher than it, then it's definitely no higher than the last globally accepted + // block across the network, so we can do an early rejection here. if block.header.chain_length <= global_info.block.header.chain_length { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 98037d991a5..e5a40ff9fb1 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -157,6 +157,21 @@ impl Display for BlockState { } } +impl TryFrom<&str> for BlockState { + type Error = String; + fn try_from(value: &str) -> Result { + let state = match value { + "Unprocessed" => BlockState::Unprocessed, + "LocallyAccepted" => BlockState::LocallyAccepted, + "LocallyRejected" => BlockState::LocallyRejected, + "GloballyAccepted" => BlockState::GloballyAccepted, + "GloballyRejected" => BlockState::GloballyRejected, + _ => return Err("Unparsable block state".into()), + }; + Ok(state) + } +} + /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { @@ -812,8 +827,13 @@ impl SignerDb { ) -> Result, DBError> { let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; - let state: Option = query_row(&self.db, qry, args)?; - try_deserialize(state) + let state_opt: Option = query_row(&self.db, qry, args)?; + let Some(state) = state_opt else { + return Ok(None); + }; + Ok(Some( + BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, + )) } } @@ -907,11 +927,23 @@ mod tests { ) .unwrap(); assert!(block_info.is_none()); + + // test getting the block state + let block_state = db + .get_block_state( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) + .unwrap() + .expect("Unable to get block state from db"); + + assert_eq!(block_state, BlockInfo::from(block_proposal.clone()).state); } #[test] fn test_basic_signer_db() { let db_path = tmp_db_path(); + eprintln!("db path is {}", &db_path.display()); test_basic_signer_db_with_path(db_path) } From 5988c65af8ebd92d1770687f1edfb9712f34969d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 10:40:06 -0400 Subject: [PATCH 438/910] Add a log to block miner thread stopping Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb79c6abc72..8ff9ca44d17 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -250,6 +250,10 @@ impl BlockMinerThread { globals: &Globals, prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { + debug!( + "Stopping prior miner thread ID {:?}", + prior_miner.thread().id() + ); globals.block_miner(); let prior_miner_result = prior_miner .join() From 9afe92e5478a21d2b7506d91dc1d0a8421c0bde0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:56:09 -0400 Subject: [PATCH 439/910] chore: documentation --- stacks-signer/src/chainstate.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index bbbe741d8b0..a017adf44fd 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -481,7 +481,8 @@ impl SortitionsView { } }; if let Some(nakamoto_tip) = tip.as_stacks_nakamoto() { - // If we have seen this block already, make sure its state is updated to globally accepted + // If we have seen this block already, make sure its state is updated to globally accepted. + // Otherwise, don't worry about it. if let Ok(Some(mut block_info)) = signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) { From 9e5e389a060ab50b99bfc430fa1c1d1fde6391d7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:56:27 -0400 Subject: [PATCH 440/910] chore: test_debug --> debug --- stackslib/src/net/relay.rs | 2 ++ stackslib/src/net/server.rs | 10 ++++++---- stackslib/src/net/stackerdb/sync.rs | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 1b08f5cd35d..dde4e9bbd89 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1678,6 +1678,8 @@ impl Relayer { ); accepted_blocks.push(nakamoto_block); } else { + // TODO: this shouldn't be a warning if it's only because we + // already have the block warn!( "Rejected Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index a26fa2f7b41..3849b9b0580 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -560,14 +560,14 @@ impl HttpPeer { let mut msgs = vec![]; for event_id in &poll_state.ready { if !self.sockets.contains_key(&event_id) { - test_debug!("Rogue socket event {}", event_id); + debug!("Rogue socket event {}", event_id); to_remove.push(*event_id); continue; } let client_sock_opt = self.sockets.get_mut(&event_id); if client_sock_opt.is_none() { - test_debug!("No such socket event {}", event_id); + debug!("No such socket event {}", event_id); to_remove.push(*event_id); continue; } @@ -576,7 +576,7 @@ impl HttpPeer { match self.peers.get_mut(event_id) { Some(ref mut convo) => { // activity on a http socket - test_debug!("Process HTTP data from {:?}", convo); + debug!("Process HTTP data from {:?}", convo); match HttpPeer::process_http_conversation( node_state, *event_id, @@ -585,11 +585,13 @@ impl HttpPeer { ) { Ok((alive, mut new_msgs)) => { if !alive { + debug!("HTTP convo {:?} is no longer alive", &convo); to_remove.push(*event_id); } msgs.append(&mut new_msgs); } - Err(_e) => { + Err(e) => { + debug!("Failed to process HTTP convo {:?}: {:?}", &convo, &e); to_remove.push(*event_id); continue; } diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 32d7a7e37ed..fa94c5be557 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -808,7 +808,7 @@ impl StackerDBSync { ); let chunks_req = self.make_getchunkinv(&network.get_chain_view().rc_consensus_hash); if let Err(e) = self.comms.neighbor_send(network, &naddr, chunks_req) { - info!( + debug!( "{:?}: failed to send StackerDBGetChunkInv to {:?}: {:?}", network.get_local_peer(), &naddr, From 8f2b2e78da9803ec4a782f713dade80092fdd2f9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:07 -0400 Subject: [PATCH 441/910] chore: raise initiative on miner failure, in case it's due to a new sortition being processed --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb79c6abc72..1e415d87c71 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -344,6 +344,10 @@ impl BlockMinerThread { } Err(e) => { warn!("Failed to mine block: {e:?}"); + + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {:?}", &e)); return Err(NakamotoNodeError::MiningFailure( ChainstateError::MinerAborted, )); From e1d7f6e73bf4af06110c23032c0c1628bd4e0e56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:30 -0400 Subject: [PATCH 442/910] chore: log joined miner thread error --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 47016565876..b48d93db448 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -817,7 +817,14 @@ impl RelayerThread { let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) + .spawn(move || { + if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { + info!("Miner thread failed: {:?}", &e); + Err(e) + } else { + Ok(()) + } + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); NakamotoNodeError::SpawnError(e) From bbdfc66eaca677b094d4726ca4719796f14f3c54 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:39 -0400 Subject: [PATCH 443/910] fix: abort signer waiting if the tenure changes, but only after a timeout has passed --- .../src/nakamoto_node/sign_coordinator.rs | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 8104d2ebd24..6c54e50af84 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; use std::sync::mpsc::Receiver; -use std::time::Duration; +use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -76,6 +76,7 @@ pub struct SignCoordinator { signer_entries: HashMap, weight_threshold: u32, total_weight: u32, + config: Config, pub next_signer_bitvec: BitVec<4000>, } @@ -305,6 +306,7 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, + config: config.clone(), }; return Ok(sign_coordinator); } @@ -326,6 +328,7 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, + config: config.clone(), }) } @@ -642,6 +645,19 @@ impl SignCoordinator { false } + /// Check if the tenure needs to change + fn check_burn_tip_changed(sortdb: &SortitionDB, consensus_hash: &ConsensusHash) -> bool { + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != *consensus_hash { + info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); + true + } else { + false + } + } + /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond @@ -729,6 +745,8 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); + let mut new_burn_tip_ts = None; + loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold @@ -749,6 +767,18 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } + if new_burn_tip_ts.is_none() { + if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + new_burn_tip_ts = Some(Instant::now()); + } + } + if let Some(ref new_burn_tip_ts) = new_burn_tip_ts.as_ref() { + if new_burn_tip_ts.elapsed() >= self.config.miner.wait_on_interim_blocks { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + } + // one of two things can happen: // * we get enough signatures from stackerdb from the signers, OR // * we see our block get processed in our chainstate (meaning, the signers broadcasted From 7f0a1f3640dcc4086ee4365070f8dfde0c24aac0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 11:58:37 -0400 Subject: [PATCH 444/910] fix: fix failing follower bootup test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 18e3f6b1f33..a61713bd0fd 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3483,6 +3483,7 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + follower_conf.node.miner = false; let mut rng = rand::thread_rng(); let mut buf = [0u8; 8]; From 343dc3162117a847f059675f77ddaa37b2aeaab2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 13:02:58 -0400 Subject: [PATCH 445/910] Fix check_proposal_reorg_ok test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/tests/chainstate.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 41f493ed57e..d8252a2c20e 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -171,7 +171,6 @@ fn reorg_timing_testing( test_name: &str, first_proposal_burn_block_timing_secs: u64, sortition_timing_secs: u64, - check_tip: bool, ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); @@ -261,19 +260,17 @@ fn reorg_timing_testing( let h = std::thread::spawn(move || { view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) }); + header_clone.chain_length -= 1; + let response = crate::client::tests::build_get_tenure_tip_response( + &StacksBlockHeaderTypes::Nakamoto(header_clone), + ); + crate::client::tests::write_response(server, response.as_bytes()); + server = crate::client::tests::mock_server_from_config(&config); crate::client::tests::write_response( server, format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), ); - if check_tip { - server = crate::client::tests::mock_server_from_config(&config); - header_clone.chain_length -= 1; - let response = crate::client::tests::build_get_tenure_tip_response( - &StacksBlockHeaderTypes::Nakamoto(header_clone), - ); - crate::client::tests::write_response(server, response.as_bytes()); - } let result = h.join().unwrap(); info!("Result: {result:?}"); @@ -282,13 +279,13 @@ fn reorg_timing_testing( #[test] fn check_proposal_reorg_timing_bad() { - let result = reorg_timing_testing("reorg_timing_bad", 30, 31, false); + let result = reorg_timing_testing("reorg_timing_bad", 30, 31); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } #[test] fn check_proposal_reorg_timing_ok() { - let result = reorg_timing_testing("reorg_timing_okay", 30, 30, true); + let result = reorg_timing_testing("reorg_timing_okay", 30, 30); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } From 5833a8f6961ca5766d54859c6c6a4b30fd23045e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 14:46:11 -0400 Subject: [PATCH 446/910] add allow_reorg_locally_accepted_block_if_globally_rejected_succeeds integration tests Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + libsigner/src/v0/messages.rs | 14 +- stacks-signer/Cargo.toml | 3 +- stacks-signer/src/v0/signer.rs | 38 +++++- testnet/stacks-node/Cargo.toml | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 148 +++++++++++++++++++++ 6 files changed, 201 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 14bd7ef023c..e550b2b8575 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,6 +100,7 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers + - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d5f5fe63c45..ae565207a73 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -526,7 +526,9 @@ RejectCodeTypePrefix { /// The block was rejected due to no sortition view NoSortitionView = 3, /// The block was rejected due to a mismatch with expected sortition view - SortitionViewMismatch = 4 + SortitionViewMismatch = 4, + /// The block was rejected due to a testing directive + TestingDirective = 5 }); impl TryFrom for RejectCodeTypePrefix { @@ -546,6 +548,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, RejectCode::NoSortitionView => RejectCodeTypePrefix::NoSortitionView, RejectCode::SortitionViewMismatch => RejectCodeTypePrefix::SortitionViewMismatch, + RejectCode::TestingDirective => RejectCodeTypePrefix::TestingDirective, } } } @@ -563,6 +566,8 @@ pub enum RejectCode { RejectedInPriorRound, /// The block was rejected due to a mismatch with expected sortition view SortitionViewMismatch, + /// The block was rejected due to a testing directive + TestingDirective, } define_u8_enum!( @@ -812,7 +817,8 @@ impl StacksMessageCodec for RejectCode { RejectCode::ConnectivityIssues | RejectCode::RejectedInPriorRound | RejectCode::NoSortitionView - | RejectCode::SortitionViewMismatch => { + | RejectCode::SortitionViewMismatch + | RejectCode::TestingDirective => { // No additional data to serialize / deserialize } }; @@ -835,6 +841,7 @@ impl StacksMessageCodec for RejectCode { RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, RejectCodeTypePrefix::NoSortitionView => RejectCode::NoSortitionView, RejectCodeTypePrefix::SortitionViewMismatch => RejectCode::SortitionViewMismatch, + RejectCodeTypePrefix::TestingDirective => RejectCode::TestingDirective, }; Ok(code) } @@ -862,6 +869,9 @@ impl std::fmt::Display for RejectCode { "The block was rejected due to a mismatch with expected sortition view." ) } + RejectCode::TestingDirective => { + write!(f, "The block was rejected due to a testing directive.") + } } } } diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 1d1af6da783..64e3cd5ca9e 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -62,4 +62,5 @@ version = "0.24.3" features = ["serde", "recovery"] [features] -monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] \ No newline at end of file +monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] +testing = [] \ No newline at end of file diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c71a2ff6373..c3cf204dd4c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -43,6 +43,12 @@ use crate::runloop::{RunLoopCommand, SignerResult}; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +#[cfg(any(test, feature = "testing"))] +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< + Option>, +> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -324,6 +330,7 @@ impl Signer { ); return; } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); @@ -427,9 +434,38 @@ impl Signer { )) }; + #[cfg(any(test, feature = "testing"))] + let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { + Some(public_keys) => { + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private( + &self.private_key, + ), + ) { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + None => block_response, + }; + if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation - block_info.valid = Some(false); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 5128f17f03a..19165db0a82 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -45,7 +45,7 @@ reqwest = { version = "0.11", default-features = false, features = ["blocking", clarity = { path = "../../clarity", features = ["default", "testing"]} stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } -stacks-signer = { path = "../../stacks-signer" } +stacks-signer = { path = "../../stacks-signer", features = ["testing"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1a5baefef98..3ada35b0df9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,6 +51,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::v0::signer::TEST_REJECT_ALL_BLOCK_PROPOSAL; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -2957,3 +2958,150 @@ fn duplicate_signers() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt +/// by the miner to reorg their prior signed block. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. +/// The miner then attempts to mine N+1', and all signers accept the block. +/// +/// Test Assertion: +/// All signers sign all blocks successfully. +/// The chain advances 2 full reward cycles. +fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(20); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + sender_nonce += 1; + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers / 2) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure A to mine block N+1"); + let start_time = Instant::now(); + let mut rejected_hash = None; + let blocks_before = mined_blocks.load(Ordering::SeqCst); + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if let Some(rejected_hash) = &rejected_hash { + if rejection.signer_signature_hash != *rejected_hash { + return None; + } + } else { + rejected_hash = Some(rejection.signer_signature_hash); + } + if rejecting_signers.contains(&rejected_pubkey) + && rejection.reason_code == RejectCode::TestingDirective + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == rejecting_signers.len() { + break; + } + assert!( + start_time.elapsed() < long_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + + info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); +} From 7d6902f5d8f874a5a74a725e740f73e124877f6b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 14:47:21 -0400 Subject: [PATCH 447/910] Fix test description Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3ada35b0df9..f8bca0da973 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2973,8 +2973,7 @@ fn duplicate_signers() { /// The miner then attempts to mine N+1', and all signers accept the block. /// /// Test Assertion: -/// All signers sign all blocks successfully. -/// The chain advances 2 full reward cycles. +/// Stacks tip advances to N+1' fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From e570031f1f05115553d2d6ecbcc52d2e732fcecd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Sep 2024 14:50:18 -0400 Subject: [PATCH 448/910] test: fix flaky behavior in `miner_forking` --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4d6ba5b8069..a6f975566fa 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1641,6 +1641,7 @@ fn miner_forking() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + let first_proposal_burn_block_timing = 1; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -1673,7 +1674,8 @@ fn miner_forking() { // we're deliberately stalling proposals: don't punish this in this test! signer_config.block_proposal_timeout = Duration::from_secs(240); // make sure that we don't allow forking due to burn block timing - signer_config.first_proposal_burn_block_timing = Duration::from_secs(1); + signer_config.first_proposal_burn_block_timing = + Duration::from_secs(first_proposal_burn_block_timing); }, |config| { let localhost = "127.0.0.1"; @@ -1804,8 +1806,8 @@ fn miner_forking() { }) .unwrap(); - // sleep for 1 second to prevent the block timing from allowing a fork by the signer set - thread::sleep(Duration::from_secs(1)); + // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); (sort_tip, true) }; @@ -1885,7 +1887,7 @@ fn miner_forking() { assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_blocks_count).unwrap(), + u64::try_from(nakamoto_blocks_count).unwrap() - 1, // subtract 1 for the first Nakamoto block "There should be no forks in this test" ); From dff1fb286c620099f399c8d97ea3a50d048ef26a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Sep 2024 14:58:54 -0400 Subject: [PATCH 449/910] test: fix `signer_set_rollover` test --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7c7412a6e4f..b931441230f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2790,7 +2790,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) + .nakamoto_first_block_of_cycle(next_reward_cycle) .saturating_add(1); info!("---- Mining to next reward set calculation -----"); From baee54e38b68adb9bc9f8f98452b5a968e2db8c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 15:44:46 -0400 Subject: [PATCH 450/910] chore: fix potential deadlock condition by avoiding a transaction when opening the chainstate --- stackslib/src/chainstate/stacks/db/mod.rs | 123 +++++++++++++--------- 1 file changed, 75 insertions(+), 48 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index ed3158c7612..23dc79a763b 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1038,13 +1038,17 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } - fn apply_schema_migrations<'a>( - tx: &DBTx<'a>, + /// Do we need a schema migration? + /// Return Ok(true) if so + /// Return Ok(false) if not + /// Return Err(..) on DB errors, or if this DB is not consistent with `mainnet` or `chain_id` + fn need_schema_migrations( + conn: &Connection, mainnet: bool, - chain_id: u32, - ) -> Result<(), Error> { - let mut db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + chain_id: u32 + ) -> Result { + let db_config = + StacksChainState::load_db_config(conn).expect("CORRUPTION: no db_config found"); if db_config.mainnet != mainnet { error!( @@ -1062,55 +1066,68 @@ impl StacksChainState { return Err(Error::InvalidChainstateDB); } - if db_config.version != CHAINSTATE_VERSION { - while db_config.version != CHAINSTATE_VERSION { - match db_config.version.as_str() { - "1" => { - // migrate to 2 - info!("Migrating chainstate schema from version 1 to 2"); - for cmd in CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } - } - "2" => { - // migrate to 3 - info!("Migrating chainstate schema from version 2 to 3"); - for cmd in CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + Ok(db_config.version != CHAINSTATE_VERSION) + } + + fn apply_schema_migrations<'a>( + tx: &DBTx<'a>, + mainnet: bool, + chain_id: u32, + ) -> Result<(), Error> { + if !Self::need_schema_migrations(tx, mainnet, chain_id)? { + return Ok(()); + } + + let mut db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + + while db_config.version != CHAINSTATE_VERSION { + match db_config.version.as_str() { + "1" => { + // migrate to 2 + info!("Migrating chainstate schema from version 1 to 2"); + for cmd in CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "3" => { - // migrate to nakamoto 1 - info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { - tx.execute_batch(cmd)?; - } + } + "2" => { + // migrate to 3 + info!("Migrating chainstate schema from version 2 to 3"); + for cmd in CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - "4" => { - // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + } + "3" => { + // migrate to nakamoto 1 + info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { + tx.execute_batch(cmd)?; } - "5" => { - // migrate to nakamoto 3 - info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "4" => { + // migrate to nakamoto 2 + info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - _ => { - error!( - "Invalid chain state database: expected version = {}, got {}", - CHAINSTATE_VERSION, db_config.version - ); - return Err(Error::InvalidChainstateDB); + } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } } - db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + _ => { + error!( + "Invalid chain state database: expected version = {}, got {}", + CHAINSTATE_VERSION, db_config.version + ); + return Err(Error::InvalidChainstateDB); + } } + db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); } Ok(()) } @@ -1134,6 +1151,11 @@ impl StacksChainState { StacksChainState::instantiate_db(mainnet, chain_id, index_path, true) } else { let mut marf = StacksChainState::open_index(index_path)?; + if !Self::need_schema_migrations(marf.sqlite_conn(), mainnet, chain_id)? { + return Ok(marf); + } + + // need a migration let tx = marf.storage_tx()?; StacksChainState::apply_schema_migrations(&tx, mainnet, chain_id)?; StacksChainState::add_indexes(&tx)?; @@ -1155,6 +1177,11 @@ impl StacksChainState { StacksChainState::instantiate_db(mainnet, chain_id, index_path, false) } else { let mut marf = StacksChainState::open_index(index_path)?; + + // do we need to apply a schema change? + let db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + let tx = marf.storage_tx()?; StacksChainState::add_indexes(&tx)?; tx.commit()?; From db88eb8daa40e989b5b76336701c92b9bb28cef8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 15:46:41 -0400 Subject: [PATCH 451/910] chore: fmt --- stackslib/src/chainstate/stacks/db/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 23dc79a763b..49ea557652c 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1045,7 +1045,7 @@ impl StacksChainState { fn need_schema_migrations( conn: &Connection, mainnet: bool, - chain_id: u32 + chain_id: u32, ) -> Result { let db_config = StacksChainState::load_db_config(conn).expect("CORRUPTION: no db_config found"); @@ -1106,7 +1106,9 @@ impl StacksChainState { } "4" => { // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + info!( + "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" + ); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } From 3fb981fa8985d5c967570f4e07c84cec8c109376 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 5 Sep 2024 16:39:19 -0400 Subject: [PATCH 452/910] chore: Remove unnecessary function `slice_partialeq()` --- stacks-common/src/util/mod.rs | 13 ------------- stackslib/src/chainstate/stacks/index/node.rs | 13 +++++-------- stackslib/src/chainstate/stacks/index/proofs.rs | 9 ++++----- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 8575fee283d..13ab79dcb30 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -85,19 +85,6 @@ impl error::Error for HexError { } } -/// PartialEq helper method for slices of arbitrary length. -pub fn slice_partialeq(s1: &[T], s2: &[T]) -> bool { - if s1.len() != s2.len() { - return false; - } - for i in 0..s1.len() { - if s1[i] != s2[i] { - return false; - } - } - true -} - pub mod db_common { use std::{thread, time}; diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 109dbaa8fc2..19e8aa327f0 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -27,7 +27,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; use stacks_common::util::hash::to_hex; -use stacks_common::util::slice_partialeq; use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, @@ -597,7 +596,7 @@ impl TrieCursor { impl PartialEq for TrieLeaf { fn eq(&self, other: &TrieLeaf) -> bool { - self.path == other.path && slice_partialeq(self.data.as_bytes(), other.data.as_bytes()) + self.path == other.path && self.data.as_bytes() == other.data.as_bytes() } } @@ -730,9 +729,7 @@ impl fmt::Debug for TrieNode48 { impl PartialEq for TrieNode48 { fn eq(&self, other: &TrieNode48) -> bool { - self.path == other.path - && slice_partialeq(&self.ptrs, &other.ptrs) - && slice_partialeq(&self.indexes, &other.indexes) + self.path == other.path && self.ptrs == other.ptrs && self.indexes == other.indexes } } @@ -755,8 +752,8 @@ impl TrieNode48 { } TrieNode48 { path: node16.path.clone(), - indexes: indexes, - ptrs: ptrs, + indexes, + ptrs, } } } @@ -781,7 +778,7 @@ impl fmt::Debug for TrieNode256 { impl PartialEq for TrieNode256 { fn eq(&self, other: &TrieNode256) -> bool { - self.path == other.path && slice_partialeq(&self.ptrs, &other.ptrs) + self.path == other.path && self.ptrs == other.ptrs } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 9348a8b4f98..815def9c910 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -28,7 +28,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; use stacks_common::util::hash::to_hex; -use stacks_common::util::slice_partialeq; use crate::chainstate::stacks::index::bits::{ get_leaf_hash, get_node_hash, read_root_hash, write_path_to_bytes, @@ -118,19 +117,19 @@ impl PartialEq for TrieMerkleProofType { ( TrieMerkleProofType::Node4((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node4((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node16((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node16((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node48((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node48((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node256((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node256((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Leaf((ref chr, ref node)), TrieMerkleProofType::Leaf((ref other_chr, ref other_node)), From 7217594ec82c76a425c46271d5cb15bd3b53d3f1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 18:16:09 -0400 Subject: [PATCH 453/910] Add locally_rejected_nlocks_overriden_by_global_acceptance test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 213 ++++++++++++++++++++- 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e550b2b8575..2269416940f 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -101,6 +101,7 @@ jobs: - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds + - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f8bca0da973..edd4073f4f4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3035,7 +3035,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in Tenure A to mine block N+1"); + info!("Submitted tx {tx} to mine block N+1"); let start_time = Instant::now(); let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -3104,3 +3104,214 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { info_before.stacks_tip_height + 1 ); } + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// a subsequent block built on top of the accepted block +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. +/// The miner then attempts to mine N+2, and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn locally_rejected_blocks_overriden_by_global_acceptance() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + let start_time = Instant::now(); + let mut rejected_hash = None; + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if let Some(rejected_hash) = &rejected_hash { + if rejection.signer_signature_hash != *rejected_hash { + return None; + } + } else { + rejected_hash = Some(rejection.signer_signature_hash); + } + if rejecting_signers.contains(&rejected_pubkey) + && rejection.reason_code == RejectCode::TestingDirective + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == rejecting_signers.len() { + break; + } + assert!( + start_time.elapsed() < long_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + // Assert the block was mined + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); + + info!("------------------------- Test Mine Nakamoto Block N+2' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+2"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height, + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); +} From cfd2c37e71fdb431372c400e3e000b5191dd28b5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 18:22:26 -0400 Subject: [PATCH 454/910] Do not store blocks that fail the initial checks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c3cf204dd4c..597ba191989 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -480,17 +480,18 @@ impl Signer { Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } } else { - // We don't know if proposal is valid, submit to stacks-node for further checks + // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. + // Do not store invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { warn!("{self}: Failed to submit block for validation: {e:?}"); }); - } - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } } /// Handle block response messages from a signer From 2be05f9a0aa66ad72950c2407b76ae02cf054a23 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 20:40:16 -0400 Subject: [PATCH 455/910] Fix broken build from prior db change commit Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/db/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 49ea557652c..a942ec7fd15 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1181,8 +1181,8 @@ impl StacksChainState { let mut marf = StacksChainState::open_index(index_path)?; // do we need to apply a schema change? - let db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + let db_config = StacksChainState::load_db_config(marf.sqlite_conn()) + .expect("CORRUPTION: no db_config found"); let tx = marf.storage_tx()?; StacksChainState::add_indexes(&tx)?; From 1a1b0764fa5541d4434f32fa8215762bdefaf7cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 21:46:35 -0400 Subject: [PATCH 456/910] chore: remove .mined_blocks and replace it with .last_mined_block, and use the highest tenure block from the chainstate to deduce where we should build (since it's not possible anymore to keep .mined_blocks coherent with the node's chainstate) --- .../stacks-node/src/nakamoto_node/miner.rs | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6f0f6f88ac0..ba32122b6da 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -138,8 +138,8 @@ pub struct BlockMinerThread { keychain: Keychain, /// burnchain configuration burnchain: Burnchain, - /// Set of blocks that we have mined - mined_blocks: Vec, + /// Last block mined + last_block_mined: Option, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -172,7 +172,7 @@ impl BlockMinerThread { globals: rt.globals.clone(), keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), - mined_blocks: vec![], + last_block_mined: None, registered_key, burn_election_block, burn_block, @@ -402,7 +402,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if self.mined_blocks.is_empty() { + if !self.last_block_mined.is_none() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -411,8 +411,7 @@ impl BlockMinerThread { Self::fault_injection_block_announce_stall(&new_block); self.globals.coord().announce_new_stacks_block(); - // store mined block - self.mined_blocks.push(new_block); + self.last_block_mined = Some(new_block); } let Ok(sort_db) = SortitionDB::open( @@ -913,32 +912,42 @@ impl BlockMinerThread { burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Result { + // load up stacks chain tip + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { + error!("Failed to load canonical Stacks tip: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + + let stacks_tip_block_id = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let tenure_tip_opt = NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip_block_id, + &self.burn_election_block.consensus_hash, + ) + .map_err(|e| { + error!( + "Could not query header info for tenure tip {} off of {}: {:?}", + &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })?; + // The nakamoto miner must always build off of a chain tip that is the highest of: // 1. The highest block in the miner's current tenure // 2. The highest block in the current tenure's parent tenure + // // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. - let stacks_tip_header = if let Some(block) = self.mined_blocks.last() { - test_debug!( - "Stacks block parent ID is last mined block {}", - &block.block_id() + let stacks_tip_header = if let Some(tenure_tip) = tenure_tip_opt { + debug!( + "Stacks block parent ID is last block in tenure ID {}", + &tenure_tip.consensus_hash ); - let stacks_block_id = block.block_id(); - NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) - .map_err(|e| { - error!( - "Could not query header info for last-mined block ID {}: {:?}", - &stacks_block_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No header for parent tenure ID {}", &stacks_block_id); - NakamotoNodeError::ParentNotFound - })? + tenure_tip } else { - // no mined blocks yet - test_debug!( - "Stacks block parent ID is last block in parent tenure ID {}", + // This tenure is empty on the canonical fork, so mine the first tenure block. + debug!( + "Stacks block parent ID is last block in parent tenure tipped by {}", &self.parent_tenure_id ); @@ -957,18 +966,9 @@ impl BlockMinerThread { NakamotoNodeError::ParentNotFound })?; - // NOTE: this is the soon-to-be parent's block ID, since it's the tip we mine on top - // of. We're only interested in performing queries relative to the canonical tip. - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); - NakamotoNodeError::ParentNotFound - })?; - - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); let header_opt = NakamotoChainState::get_highest_block_header_in_tenure( &mut chain_state.index_conn(), - &stacks_tip, + &stacks_tip_block_id, &parent_tenure_header.consensus_hash, ) .map_err(|e| { @@ -1004,7 +1004,7 @@ impl BlockMinerThread { } }; - test_debug!( + debug!( "Miner: stacks tip parent header is {} {:?}", &stacks_tip_header.index_block_hash(), &stacks_tip_header @@ -1132,7 +1132,7 @@ impl BlockMinerThread { .make_vrf_proof() .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; - if self.mined_blocks.is_empty() && parent_block_info.parent_tenure.is_none() { + if self.last_block_mined.is_none() && parent_block_info.parent_tenure.is_none() { warn!("Miner should be starting a new tenure, but failed to load parent tenure info"); return Err(NakamotoNodeError::ParentNotFound); }; From a19af95d20b368960429570079a4d385f767affc Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 22:45:29 -0400 Subject: [PATCH 457/910] Add reorg_locally_accepted_blocks_across_tenures_succeeds integration test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 3 +- stacks-signer/src/v0/signer.rs | 24 +- stackslib/src/chainstate/stacks/miner.rs | 4 +- testnet/stacks-node/src/event_dispatcher.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 271 +++++++++++++++++++- 5 files changed, 295 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2269416940f..762563871ce 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,8 +100,9 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers - - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds + - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance + - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 597ba191989..4da514d1d5e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -49,6 +49,12 @@ pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< Option>, > = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< + Option>, +> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -148,6 +154,23 @@ impl SignerTrait for Signer { for message in messages { match message { SignerMessage::BlockProposal(block_proposal) => { + #[cfg(any(test, feature = "testing"))] + if let Some(public_keys) = + &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() + { + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private( + &self.private_key, + ), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + continue; + } + } self.handle_block_proposal( stacks_client, sortition_state, @@ -442,7 +465,6 @@ impl Signer { &self.private_key, ), ) { - // Do an extra check just so we don't log EVERY time. warn!("{self}: Rejecting block proposal automatically due to testing directive"; "block_id" => %block_proposal.block.block_id(), "height" => block_proposal.block.header.chain_length, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f0e4c96307f..0195385d3b0 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -321,7 +321,7 @@ pub struct TransactionSuccessEvent { } /// Represents an event for a failed transaction. Something went wrong when processing this transaction. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct TransactionErrorEvent { #[serde(deserialize_with = "hex_deserialize", serialize_with = "hex_serialize")] pub txid: Txid, @@ -378,7 +378,7 @@ pub enum TransactionResult { /// This struct is used to transmit data about transaction results through either the `mined_block` /// or `mined_microblock` event. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum TransactionEvent { /// Transaction has already succeeded. Success(TransactionSuccessEvent), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 34e42501ace..7ad55a994b4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -142,7 +142,7 @@ pub struct MinedMicroblockEvent { pub anchor_block: BlockHeaderHash, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct MinedNakamotoBlockEvent { pub target_burn_height: u64, pub parent_block_id: String, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edd4073f4f4..6a6a0867c97 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,7 +51,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::TEST_REJECT_ALL_BLOCK_PROPOSAL; +use stacks_signer::v0::signer::{TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_REJECT_ALL_BLOCK_PROPOSAL}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -2962,7 +2962,8 @@ fn duplicate_signers() { #[test] #[ignore] /// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt -/// by the miner to reorg their prior signed block. +/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides +/// their local view. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. @@ -2974,7 +2975,7 @@ fn duplicate_signers() { /// /// Test Assertion: /// Stacks tip advances to N+1' -fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { +fn locally_accepted_blocks_overriden_by_global_rejection() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3002,6 +3003,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let start_time = Instant::now(); @@ -3019,6 +3021,14 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { thread::sleep(Duration::from_secs(1)); } sender_nonce += 1; + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected @@ -3039,6 +3049,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { let start_time = Instant::now(); let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); loop { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events @@ -3081,6 +3092,12 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { } assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!(info_before, info_after); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); @@ -3103,6 +3120,22 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { info_after.stacks_tip_height, info_before.stacks_tip_height + 1 ); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let start_time = Instant::now(); + while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n_1); } #[test] @@ -3187,6 +3220,11 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); + // Ensure that the block was accepted globally so the stacks tip has not advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block to ensure it is marked globally accepted let rejecting_signers: Vec<_> = signer_test @@ -3278,7 +3316,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - info!("------------------------- Test Mine Nakamoto Block N+2' -------------------------"); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + assert_ne!(block_n_1, block_n); + + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -3314,4 +3358,223 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .signer_signature .len(); assert_eq!(nmb_signatures, num_signers); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n_1); +} + +#[test] +#[ignore] +/// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a +/// new tenure B built upon the last globally accepted block N, i.e. a reorg can occur at a tenure boundary. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn reorg_locally_accepted_blocks_across_tenures_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N in Tenure B -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut last_hash = None; + loop { + let ignored_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + // Only care about the last proposed block + if let Some(h) = &last_hash { + if h != &hash { + return None; + } + } else { + last_hash = Some(hash); + } + ignoring_signers + .iter() + .find(|key| key.verify(hash.bits(), &signature).is_ok()) + } + _ => None, + } + }) + .collect::>(); + if ignored_signers.len() + ignoring_signers.len() == num_signers { + break; + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block proposal acceptance", + ); + sleep_ms(1000); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" + ); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n); } From 2cee593ccaa212b74ec7fcb3bf835ad9d3a7e906 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 00:39:46 -0400 Subject: [PATCH 458/910] fix: correct logic in `validate_timestamp` and fix integration test --- .../stacks-node/src/nakamoto_node/miner.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 103 +++++++++--------- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346d..6cb7a0ce659 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,7 +45,6 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; -use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -1068,9 +1067,12 @@ impl BlockMinerThread { ); NakamotoNodeError::ParentNotFound })?; - let current_timestamp = get_epoch_time_secs(); - let time_since_parent_ms = - current_timestamp.saturating_sub(stacks_parent_header.burn_header_timestamp) * 1000; + let current_timestamp = x.header.timestamp; + let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { + Some(naka_header) => naka_header.timestamp, + None => stacks_parent_header.burn_header_timestamp, + }; + let time_since_parent_ms = current_timestamp.saturating_sub(parent_timestamp) * 1000; if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; "current_timestamp" => current_timestamp, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a6f975566fa..45ab322d019 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2895,68 +2895,73 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - let proposals_before = signer_test + let blocks_before = signer_test .running_nodes - .nakamoto_blocks_proposed + .nakamoto_blocks_mined .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); + info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + assert_eq!(blocks.len(), 1); + let first_block = blocks.last().unwrap(); + let blocks = test_observer::get_blocks(); + let parent = blocks + .iter() + .find(|b| b.get("block_height").unwrap() == first_block.stacks_block_height - 1) + .unwrap(); + let first_block_time = first_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let parent_block_time = parent.get("burn_block_time").unwrap().as_u64().unwrap(); + assert!( + Duration::from_secs(first_block_time - parent_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "First block proposed before gap was exceeded: {}s - {}s > {}ms", + first_block_time, + parent_block_time, + time_between_blocks_ms + ); - // submit a tx so that the miner will mine a block + // Submit a tx so that the miner will mine a block let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); - let start_time = Instant::now(); - loop { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - if blocks_proposed > proposals_before { - assert!( - start_time.elapsed().as_millis() >= time_between_blocks_ms.into(), - "Block proposed before gap was exceeded" - ); - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - debug!("Ensure that the block is mined after the gap is exceeded"); - - let start = Instant::now(); - let duration = 30; - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - loop { - let blocks_mined = signer_test + info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); + wait_for(60, || { + let blocks_processed = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); + Ok(blocks_processed > blocks_before) + }) + .unwrap(); - let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && info.stacks_tip_height == info_before.stacks_tip_height + 1 - { - break; - } - - debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info_before.stacks_tip_height, info.stacks_tip_height - ); - - std::thread::sleep(Duration::from_millis(100)); - assert!( - start.elapsed() < Duration::from_secs(duration), - "Block not mined within timeout" - ); - } + // Verify that the second Nakamoto block is mined after the gap is exceeded + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + let last_block = blocks.last().unwrap(); + let last_block_time = last_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); + let penultimate_block_time = penultimate_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + assert!( + Duration::from_secs(last_block_time - penultimate_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "Block proposed before gap was exceeded: {}s - {}s > {}ms", + last_block_time, + penultimate_block_time, + time_between_blocks_ms + ); signer_test.shutdown(); } From 025cc0d4e84501589ed90518e7fb7fe8b0020ec9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 01:00:00 -0400 Subject: [PATCH 459/910] fix: ignore rejections for other blocks in sign coordinator --- .../stacks-node/src/nakamoto_node/sign_coordinator.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6c54e50af84..beece7f99e7 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -919,6 +919,17 @@ impl SignCoordinator { responded_signers.insert(signer_pubkey); } SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != rejected_data.signer_signature_hash { + warn!( + "Processed rejection for a different block. Will try to continue."; + "block_signer_signature_hash" => %block_sighash, + "rejected_data.signer_signature_hash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + ); + continue; + } let rejected_pubkey = match rejected_data.recover_public_key() { Ok(rejected_pubkey) => { if rejected_pubkey != signer_pubkey { From 557a4ca43eb42132baaf800c5b849604cb93b5e8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 12:22:23 -0400 Subject: [PATCH 460/910] fix: update `/v2/stacker_set/` to `/v3/stacker_set/` Matches latest node release, 2.5.0.0.7. --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6337364dbd..554a8361bde 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -792,7 +792,7 @@ impl StacksClient { } fn reward_set_path(&self, reward_cycle: u64) -> String { - format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) + format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } fn fees_transaction_path(&self) -> String { From 66ee29632c2fff4373b3a3791608f27ad17b754a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 13:07:44 -0400 Subject: [PATCH 461/910] fix: `/v2/block_proposal` -> `/v3/block_proposal` --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 554a8361bde..ebc13980716 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -767,7 +767,7 @@ impl StacksClient { } fn block_proposal_path(&self) -> String { - format!("{}/v2/block_proposal", self.http_origin) + format!("{}/v3/block_proposal", self.http_origin) } fn sortition_info_path(&self) -> String { From 1f74c451af5a680b2d9a2be0611c214791fc595f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 13:17:22 -0400 Subject: [PATCH 462/910] docs: update changelog --- stacks-signer/CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index dabe0b346a6..aa2b87deb7e 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,16 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [2.5.0.0.5.3] + +### Added + +### Changed + +- Update node endpoints to match stacks-core release 2.5.0.0.7 + - `/v2/block_proposal` -> `/v3/block_proposal` + - `/v2/stacker_set` -> `/v3/stacker_set` + ## [2.5.0.0.5.2] ### Added From 47c562a9d586a325a28629b5b9ede0203d0be8c5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 22:20:25 -0400 Subject: [PATCH 463/910] fix: make a stackerdb shrink if its signer list becomes smaller than that in the DB --- stackslib/src/net/stackerdb/db.rs | 11 +++ stackslib/src/net/stackerdb/mod.rs | 8 ++- stackslib/src/net/stackerdb/tests/db.rs | 96 +++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 1dab3f40523..2b735668ac4 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -293,6 +293,15 @@ impl<'a> StackerDBTx<'a> { Ok(()) } + /// Shrink a StackerDB. Remove all slots at and beyond a particular slot ID. + fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> { + let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2"; + let args = params![&stackerdb_id, &first_slot_id]; + let mut stmt = self.sql_tx.prepare(&qry)?; + stmt.execute(args)?; + Ok(()) + } + /// Update a database's storage slots, e.g. from new configuration state in its smart contract. /// Chunk data for slots that no longer exist will be dropped. /// Newly-created slots will be instantiated with empty data. @@ -343,6 +352,8 @@ impl<'a> StackerDBTx<'a> { stmt.execute(args)?; } } + debug!("Shrink {} to {} slots", smart_contract, total_slots_read); + self.shrink_stackerdb(stackerdb_id, total_slots_read)?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index d310998a194..ea01b1b22a9 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -341,8 +341,14 @@ impl StackerDBs { &e ); } - } else if new_config != stackerdb_config && new_config.signers.len() > 0 { + } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + || (new_config == stackerdb_config + && new_config.signers.len() + != self.get_slot_versions(&stackerdb_contract_id)?.len()) + { // only reconfigure if the config has changed + // (that second check on the length is needed in case the node is a victim of + // #5142, which was a bug whereby a stackerdb could never shrink) if let Err(e) = self.reconfigure_stackerdb(&stackerdb_contract_id, &new_config) { warn!( "Failed to create or reconfigure StackerDB {stackerdb_contract_id}: DB error {:?}", diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 7371b6b9c5d..9bcf8005295 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -20,6 +20,7 @@ use std::path::Path; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::SlotMetadata; +use rusqlite::params; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; @@ -649,6 +650,16 @@ fn test_reconfigure_stackerdb() { initial_metadata.push((slot_metadata, chunk_data)); } + tx.commit().unwrap(); + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged @@ -722,6 +733,91 @@ fn test_reconfigure_stackerdb() { assert_eq!(chunk.len(), 0); } } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + // reconfigure with fewer slots + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let reconfigured_pks = vec![ + // first five slots are unchanged + pks[0], pks[1], pks[2], pks[3], pks[4], + // next five slots are different, so their contents will be dropped and versions and write + // timestamps reset + new_pks[0], new_pks[1], new_pks[2], new_pks[3], + new_pks[4], + // slots 10-15 will disappear + ]; + let reconfigured_addrs: Vec<_> = reconfigured_pks + .iter() + .map(|pk| { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap() + }) + .collect(); + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); + + // reconfigure + tx.reconfigure_stackerdb( + &sc, + &reconfigured_addrs + .clone() + .into_iter() + .map(|addr| (addr, 1)) + .collect::>(), + ) + .unwrap(); + + tx.commit().unwrap(); + + for (i, pk) in new_pks.iter().enumerate() { + if i < 5 { + // first five are unchanged + let chunk_data = StackerDBChunkData { + slot_id: i as u32, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![i as u8; 128], + }; + + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + + assert_eq!(initial_metadata[i].0, slot_metadata); + assert_eq!(initial_metadata[i].1.data, chunk); + } else if i < 10 { + // next five are wiped + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata.slot_id, i as u32); + assert_eq!(slot_metadata.slot_version, 0); + assert_eq!(slot_metadata.data_hash, Sha512Trunc256Sum([0x00; 32])); + assert_eq!(slot_metadata.signature, MessageSignature::empty()); + + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + assert_eq!(chunk.len(), 0); + } else { + // final five are gone + let slot_metadata_opt = db.get_slot_metadata(&sc, i as u32).unwrap(); + assert!(slot_metadata_opt.is_none()); + } + } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } } // TODO: max chunk size From ed3486c25c06024a9378b2d6c2a8cae09bae5e65 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Sep 2024 14:26:16 -0400 Subject: [PATCH 464/910] Add miner_recovers_when_broadcast_block_delay_across_tenures_occurs Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/client/mod.rs | 1 - stacks-signer/src/client/stacks_client.rs | 28 +- stacks-signer/src/config.rs | 5 - stacks-signer/src/runloop.rs | 1 - stacks-signer/src/signerdb.rs | 45 ++- stacks-signer/src/tests/chainstate.rs | 2 +- stacks-signer/src/v0/signer.rs | 82 +++-- testnet/stacks-node/src/tests/signer/v0.rs | 330 +++++++++++++++++++-- 9 files changed, 417 insertions(+), 78 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 762563871ce..e38c8625520 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -103,6 +103,7 @@ jobs: - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds + - tests::signer::v0::miner_recovers_when_broadcast_block_delay_across_tenures_occurs - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index b32f465b11d..5ce87062747 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -571,7 +571,6 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, - broadcast_signed_blocks: true, } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 05c3b0f1564..85fa7fd34b4 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -706,17 +706,23 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { - let response = self - .stacks_node_client - .post(format!( - "{}{}?broadcast=1", - self.http_origin, - postblock_v3::PATH - )) - .header("Content-Type", "application/octet-stream") - .header(AUTHORIZATION, self.auth_password.clone()) - .body(block.serialize_to_vec()) - .send()?; + let send_request = || { + self.stacks_node_client + .post(format!( + "{}{}?broadcast=1", + self.http_origin, + postblock_v3::PATH + )) + .header("Content-Type", "application/octet-stream") + .header(AUTHORIZATION, self.auth_password.clone()) + .body(block.serialize_to_vec()) + .send() + .map_err(|e| { + debug!("Failed to submit block to the Stacks node: {e:?}"); + backoff::Error::transient(e) + }) + }; + let response = retry_with_exponential_backoff(send_request)?; if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 037e8af7730..66cf5a5f7d5 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -157,8 +157,6 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, - /// Broadcast a block to the node if we gather enough signatures from other signers - pub broadcast_signed_blocks: bool, } /// The parsed configuration for the signer @@ -203,8 +201,6 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, - /// Broadcast a block to the node if we gather enough signatures from other signers - pub broadcast_signed_blocks: bool, } /// Internal struct for loading up the config file @@ -361,7 +357,6 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, - broadcast_signed_blocks: true, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 86d8458e30c..cb29221ba96 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -335,7 +335,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - broadcast_signed_blocks: self.config.broadcast_signed_blocks, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index e5a40ff9fb1..6f5b6c6e061 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -225,16 +225,20 @@ impl BlockInfo { block_info } - /// Mark this block as locally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. - pub fn mark_locally_accepted(&mut self) -> Result<(), String> { + pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { self.valid = Some(true); self.signed_over = true; - self.signed_self.get_or_insert(get_epoch_time_secs()); + if group_signed { + self.signed_group.get_or_insert(get_epoch_time_secs()); + } else { + self.signed_self.get_or_insert(get_epoch_time_secs()); + } self.move_to(BlockState::LocallyAccepted) } - /// Mark this block as globally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// Mark this block as valid, signed over, and records a group timestamp in the block info if it wasn't /// already set. pub fn mark_globally_accepted(&mut self) -> Result<(), String> { self.valid = Some(true); @@ -785,15 +789,20 @@ impl SignerDb { query_rows(&self.db, qry, args) } - /// Mark a block as having been broadcasted + /// Mark a block as having been broadcasted and therefore GloballyAccepted pub fn set_block_broadcasted( &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; - let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; + let args = params![ + u64_to_sql(ts)?, + BlockState::GloballyAccepted.to_string(), + u64_to_sql(reward_cycle)?, + block_sighash + ]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -1015,7 +1024,7 @@ mod tests { .is_none()); block_info - .mark_locally_accepted() + .mark_locally_accepted(false) .expect("Failed to mark block as locally accepted"); db.insert_block(&block_info).unwrap(); @@ -1175,12 +1184,32 @@ mod tests { ) .unwrap() .is_none()); + assert_eq!( + db.block_lookup( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, + BlockState::Unprocessed + ); db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), 12345, ) .unwrap(); + assert_eq!( + db.block_lookup( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, + BlockState::GloballyAccepted + ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d8252a2c20e..a13ab24a59d 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -243,7 +243,7 @@ fn reorg_timing_testing( }; let mut header_clone = block_proposal_1.block.header.clone(); let mut block_info_1 = BlockInfo::from(block_proposal_1); - block_info_1.mark_locally_accepted().unwrap(); + block_info_1.mark_locally_accepted(false).unwrap(); signer_db.insert_block(&block_info_1).unwrap(); let sortition_time = SystemTime::UNIX_EPOCH diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4da514d1d5e..639ace66d25 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -55,6 +55,14 @@ pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< Option>, > = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Pause the block broadcast +pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); + +#[cfg(any(test, feature = "testing"))] +/// Skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -78,8 +86,6 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, - /// Whether or not to broadcast signed blocks if we gather all signatures - pub broadcast_signed_blocks: bool, } impl std::fmt::Display for Signer { @@ -179,13 +185,23 @@ impl SignerTrait for Signer { ); } SignerMessage::BlockPushed(b) => { - let block_push_result = stacks_client.post_block(b); + // This will infinitely loop until the block is acknowledged by the node info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), - "push_result" => ?block_push_result, ); + loop { + match stacks_client.post_block(b) { + Ok(block_push_result) => { + debug!("{self}: Block pushed to stacks node: {block_push_result:?}"); + break; + } + Err(e) => { + warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); + } + }; + } } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -306,7 +322,6 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, - broadcast_signed_blocks: signer_config.broadcast_signed_blocks, } } } @@ -555,7 +570,7 @@ impl Signer { return None; } }; - if let Err(e) = block_info.mark_locally_accepted() { + if let Err(e) = block_info.mark_locally_accepted(false) { warn!("{self}: Failed to mark block as locally accepted: {e:?}",); return None; } @@ -876,10 +891,11 @@ impl Signer { warn!("{self}: No such block {block_hash}"); return; }; - // move block to globally accepted state. If this is not possible, we have a bug in our block handling logic. - if let Err(e) = block_info.mark_globally_accepted() { + // move block to LOCALLY accepted state. + // We only mark this GLOBALLY accepted if we manage to broadcast it... + if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold - warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + warn!("{self}: Failed to mark block as locally accepted: {e:?}"); } let _ = self.signer_db.insert_block(&block_info).map_err(|e| { warn!( @@ -888,17 +904,24 @@ impl Signer { ); e }); - - if self.broadcast_signed_blocks { - self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); - } else { - debug!( - "{self}: Not broadcasting signed block {block_hash} since broadcast_signed_blocks is false"; - "stacks_block_id" => %block_info.block.block_id(), - "parent_block_id" => %block_info.block.header.parent_block_id, - "burnchain_consensus_hash" => %block_info.block.header.consensus_hash - ); + #[cfg(any(test, feature = "testing"))] + { + if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } } + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); } fn broadcast_signed_block( @@ -918,11 +941,30 @@ impl Signer { block.header.signer_signature_hash(); block.header.signer_signature = signatures; + #[cfg(any(test, feature = "testing"))] + { + if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return; + } + } debug!( "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - if let Err(e) = stacks_client.post_block(&block) { warn!( "{self}: Failed to post block {block_hash}: {e:?}"; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6a6a0867c97..4c83913fccd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,7 +51,10 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_REJECT_ALL_BLOCK_PROPOSAL}; +use stacks_signer::v0::signer::{ + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, + TEST_SKIP_BLOCK_BROADCAST, +}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -923,7 +926,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - config.broadcast_signed_blocks = false; + TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); }, |_| {}, None, @@ -3042,12 +3045,12 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(rejecting_signers.clone()); + test_observer::clear(); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); let start_time = Instant::now(); - let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); loop { @@ -3063,13 +3066,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let rejected_pubkey = rejection .recover_public_key() .expect("Failed to recover public key from rejection"); - if let Some(rejected_hash) = &rejected_hash { - if rejection.signer_signature_hash != *rejected_hash { - return None; - } - } else { - rejected_hash = Some(rejection.signer_signature_hash); - } if rejecting_signers.contains(&rejected_pubkey) && rejection.reason_code == RejectCode::TestingDirective { @@ -3237,13 +3233,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .lock() .unwrap() .replace(rejecting_signers.clone()); + test_observer::clear(); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); let start_time = Instant::now(); - let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client @@ -3269,13 +3265,6 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let rejected_pubkey = rejection .recover_public_key() .expect("Failed to recover public key from rejection"); - if let Some(rejected_hash) = &rejected_hash { - if rejection.signer_signature_hash != *rejected_hash { - return None; - } - } else { - rejected_hash = Some(rejection.signer_signature_hash); - } if rejecting_signers.contains(&rejected_pubkey) && rejection.reason_code == RejectCode::TestingDirective { @@ -3405,7 +3394,8 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(30); signer_test.boot_to_epoch_3(); - info!("------------------------- Test Mine Nakamoto Block N in Tenure B -------------------------"); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -3465,7 +3455,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let mut last_hash = None; loop { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() @@ -3475,14 +3464,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to deserialize SignerMessage"); match message { SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - // Only care about the last proposed block - if let Some(h) = &last_hash { - if h != &hash { - return None; - } - } else { - last_hash = Some(hash); - } ignoring_signers .iter() .find(|key| key.verify(hash.bits(), &signature).is_ok()) @@ -3513,9 +3494,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); - info!( - "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" - ); + info!("------------------------- Starting Tenure B -------------------------"); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -3527,6 +3506,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" + ); TEST_IGNORE_ALL_BLOCK_PROPOSALS .lock() .unwrap() @@ -3578,3 +3560,289 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { ); assert_ne!(block_n_1_prime, block_n); } + +#[test] +#[ignore] +/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure +/// before it receives these signatures, the miner can recover in the following tenure. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. +/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The +/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner +/// proposes a new block N+2 which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being + // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Clear the stackerdb chunks + info!("Forcing miner to ignore block responses for block N+1"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + info!("Delaying signer block N+1 broadcasting to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + test_observer::clear(); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut block = None; + loop { + if block.is_none() { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + if let Some(block) = &block { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if block.header.signer_signature_hash() == hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if signatures.len() == num_signers { + break; + } + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for signers signatures for first block proposal", + ); + sleep_ms(1000); + } + let block = block.unwrap(); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_same = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_same, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + // Wait for the miner to propose a new invalid block N+1' + let start_time = Instant::now(); + let mut rejected_block = None; + while rejected_block.is_none() { + rejected_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash != block.header.consensus_hash { + assert!( + proposal.block.header.chain_length == block.header.chain_length + ); + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for N+1' block proposal", + ); + } + + info!("Allowing miner to accept block responses again. "); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + info!("Allowing singers to broadcast block N+1 to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + + // Assert the N+1' block was rejected + let rejected_block = rejected_block.unwrap(); + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + if rejection.signer_signature_hash + == rejected_block.header.signer_signature_hash() + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == num_signers { + break; + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before + 2 { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 2, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n); +} From 55d2f32e06a0d8b7a7334ac2137c4c4317a3561a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 22:20:25 -0400 Subject: [PATCH 465/910] fix: make a stackerdb shrink if its signer list becomes smaller than that in the DB --- stackslib/src/net/stackerdb/db.rs | 11 +++ stackslib/src/net/stackerdb/mod.rs | 8 ++- stackslib/src/net/stackerdb/tests/db.rs | 96 +++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 1dab3f40523..2b735668ac4 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -293,6 +293,15 @@ impl<'a> StackerDBTx<'a> { Ok(()) } + /// Shrink a StackerDB. Remove all slots at and beyond a particular slot ID. + fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> { + let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2"; + let args = params![&stackerdb_id, &first_slot_id]; + let mut stmt = self.sql_tx.prepare(&qry)?; + stmt.execute(args)?; + Ok(()) + } + /// Update a database's storage slots, e.g. from new configuration state in its smart contract. /// Chunk data for slots that no longer exist will be dropped. /// Newly-created slots will be instantiated with empty data. @@ -343,6 +352,8 @@ impl<'a> StackerDBTx<'a> { stmt.execute(args)?; } } + debug!("Shrink {} to {} slots", smart_contract, total_slots_read); + self.shrink_stackerdb(stackerdb_id, total_slots_read)?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index a2de124793c..b022746d6ac 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -341,8 +341,14 @@ impl StackerDBs { &e ); } - } else if new_config != stackerdb_config && new_config.signers.len() > 0 { + } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + || (new_config == stackerdb_config + && new_config.signers.len() + != self.get_slot_versions(&stackerdb_contract_id)?.len()) + { // only reconfigure if the config has changed + // (that second check on the length is needed in case the node is a victim of + // #5142, which was a bug whereby a stackerdb could never shrink) if let Err(e) = self.reconfigure_stackerdb(&stackerdb_contract_id, &new_config) { warn!( "Failed to create or reconfigure StackerDB {stackerdb_contract_id}: DB error {:?}", diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 7371b6b9c5d..9bcf8005295 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -20,6 +20,7 @@ use std::path::Path; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::SlotMetadata; +use rusqlite::params; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; @@ -649,6 +650,16 @@ fn test_reconfigure_stackerdb() { initial_metadata.push((slot_metadata, chunk_data)); } + tx.commit().unwrap(); + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged @@ -722,6 +733,91 @@ fn test_reconfigure_stackerdb() { assert_eq!(chunk.len(), 0); } } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + // reconfigure with fewer slots + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let reconfigured_pks = vec![ + // first five slots are unchanged + pks[0], pks[1], pks[2], pks[3], pks[4], + // next five slots are different, so their contents will be dropped and versions and write + // timestamps reset + new_pks[0], new_pks[1], new_pks[2], new_pks[3], + new_pks[4], + // slots 10-15 will disappear + ]; + let reconfigured_addrs: Vec<_> = reconfigured_pks + .iter() + .map(|pk| { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap() + }) + .collect(); + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); + + // reconfigure + tx.reconfigure_stackerdb( + &sc, + &reconfigured_addrs + .clone() + .into_iter() + .map(|addr| (addr, 1)) + .collect::>(), + ) + .unwrap(); + + tx.commit().unwrap(); + + for (i, pk) in new_pks.iter().enumerate() { + if i < 5 { + // first five are unchanged + let chunk_data = StackerDBChunkData { + slot_id: i as u32, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![i as u8; 128], + }; + + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + + assert_eq!(initial_metadata[i].0, slot_metadata); + assert_eq!(initial_metadata[i].1.data, chunk); + } else if i < 10 { + // next five are wiped + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata.slot_id, i as u32); + assert_eq!(slot_metadata.slot_version, 0); + assert_eq!(slot_metadata.data_hash, Sha512Trunc256Sum([0x00; 32])); + assert_eq!(slot_metadata.signature, MessageSignature::empty()); + + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + assert_eq!(chunk.len(), 0); + } else { + // final five are gone + let slot_metadata_opt = db.get_slot_metadata(&sc, i as u32).unwrap(); + assert!(slot_metadata_opt.is_none()); + } + } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } } // TODO: max chunk size From bb40d1a141be23bd60e315e18d01e4b883449c8a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 6 Sep 2024 17:30:46 -0400 Subject: [PATCH 466/910] fix: get miner_recovers_when_broadcast_block_delay_across_tenures_occurs to pass --- testnet/stacks-node/src/tests/signer/v0.rs | 63 +++++++++++++++++++--- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4c83913fccd..1c1f4117edf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3594,7 +3594,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 2; + let nmb_txs = 3; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, @@ -3603,6 +3603,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(30); signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3612,12 +3613,36 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); let start_time = Instant::now(); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + loop { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + sleep_ms(10_000); + + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + if tip.sortition { + break; + } + } + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); + + // a tenure has begun, so wait until we mine a block while mined_blocks.load(Ordering::SeqCst) <= blocks_before { assert!( start_time.elapsed() < short_timeout, @@ -3649,16 +3674,20 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Delaying signer block N+1 broadcasting to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); + + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); let mut block = None; loop { if block.is_none() { @@ -3813,8 +3842,27 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { ); } + // Induce block N+2 to get mined + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+2"); + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before + 2 { + loop { + // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing + // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + if info_before.stacks_tip_height + 2 <= info.stacks_tip_height { + break; + } + assert!( start_time.elapsed() < short_timeout, "FAIL: Test timed out while waiting for block production", @@ -3826,6 +3874,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + assert_eq!( info_before.stacks_tip_height + 2, info_after.stacks_tip_height From 17aa1c42c38f3da4c148f9eedd595766ad786433 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 7 Sep 2024 23:16:35 -0400 Subject: [PATCH 467/910] chore: a NetworkResult has data if it has an uploaded Nakamoto block --- stackslib/src/net/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index af391e03e88..2c9c9473b48 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1571,7 +1571,9 @@ impl NetworkResult { } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 + self.nakamoto_blocks.len() > 0 + || self.pushed_nakamoto_blocks.len() > 0 + || uploaded_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { From 835e39bdee9d392564fcd8b81cd67a25fad64493 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 7 Sep 2024 23:38:38 -0400 Subject: [PATCH 468/910] fix: compiler error (forgot self.) --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2c9c9473b48..7f8dea93291 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1573,7 +1573,7 @@ impl NetworkResult { pub fn has_nakamoto_blocks(&self) -> bool { self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 - || uploaded_nakamoto_blocks.len() > 0 + || self.uploaded_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { From 37e27266f8fa8c81554077ea56a99b51a028bdae Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 9 Sep 2024 18:44:12 +0300 Subject: [PATCH 469/910] added test for fast blocks when epoch 3 is deployed --- .../src/tests/nakamoto_integrations.rs | 393 +++++++++++++++++- 1 file changed, 391 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4839bee3be8..c57b59dce61 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,8 +100,8 @@ use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, - wait_for_runloop, + get_pox_info, next_block_and_wait, next_block_and_wait_with_timeout, + run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -928,6 +928,158 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +pub fn boot_to_epoch_3_flash_blocks( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + self_signing: &mut Option<&mut TestSigners>, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + assert_eq!(stacker_sks.len(), signer_sks.len()); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let current_height = btc_regtest_controller.get_headers_height(); + info!( + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(btc_regtest_controller, &blocks_processed); + + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } + // stack enough to activate pox-4 + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &signer_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 12_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(signer_sk); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(12), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + // Run until the prepare phase + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + prepare_phase_start, + &naka_conf, + ); + + // We need to vote on the aggregate public key if this test is self signing + if let Some(signers) = self_signing { + // Get the aggregate key + let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); + let aggregate_public_key = + clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + // Vote on the aggregate public key + for signer_sk in signer_sks_unique.values() { + let signer_index = + get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) + .unwrap(); + let voting_tx = tests::make_contract_call( + signer_sk, + 0, + 300, + &StacksAddress::burn_address(false), + SIGNERS_VOTING_NAME, + SIGNERS_VOTING_FUNCTION_NAME, + &[ + clarity::vm::Value::UInt(u128::try_from(signer_index).unwrap()), + aggregate_public_key.clone(), + clarity::vm::Value::UInt(0), + clarity::vm::Value::UInt(reward_cycle as u128 + 1), + ], + ); + submit_tx(&http_origin, &voting_tx); + } + } + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3.start_height - 2, + &naka_conf, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); +} + fn get_signer_index( stacker_set: &GetStackersResponse, signer_key: &Secp256k1PublicKey, @@ -1519,6 +1671,243 @@ fn simple_neon_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, +/// having flash blocks when epoch updates and expects everything to work normally, +/// then switches to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration_with_flash_blocks_on_epoch_3() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3_flash_blocks( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + // mine 3 blocks which should be the ones for setting up the miner + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + + // make sure prometheus returns an updated height + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + assert!(res.contains(&expected_result)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. From 95196236927a7182fe7159ce32a3d072792029e4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:40:51 -0400 Subject: [PATCH 470/910] chore: react early to /shutdown --- libsigner/src/events.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 6dbc10110a8..4fb6d7a5070 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -314,6 +314,9 @@ impl EventReceiver for SignerEventReceiver { process_proposal_response(request) } else if request.url() == "/new_burn_block" { process_new_burn_block_event(request) + } else if request.url() == "/shutdown" { + event_receiver.stop_signal.store(true, Ordering::SeqCst); + return Err(EventError::Terminated); } else { let url = request.url().to_string(); // `/new_block` is expected, but not specifically handled. do not log. From 6788f487f11eeace4e1ecd86e7ea3af008e48e83 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:05 -0400 Subject: [PATCH 471/910] chore: move doc warnings to top of docstring --- stackslib/src/chainstate/nakamoto/mod.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 78baa2578bc..cb515a860c9 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2558,8 +2558,10 @@ impl NakamotoChainState { Ok(result.is_some()) } + /// DO NOT CALL IN CONSENSUS CODE, such as during Stacks block processing + /// (including during Clarity VM evaluation). This function returns the latest data + /// known to the node, which may not have been at the time of original block assembly. /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) - /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, @@ -2631,14 +2633,14 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + /// /// Get the highest block in a given tenure (identified by its consensus hash). /// Ties will be broken by timestamp. /// /// Used to verify that a signer-submitted block proposal builds atop the highest known block /// in the given tenure, regardless of which fork it's on. - /// - /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same - /// tenure. pub fn get_highest_known_block_header_in_tenure( db: &Connection, consensus_hash: &ConsensusHash, @@ -4258,10 +4260,10 @@ impl NakamotoChainState { Ok(Some(slot_id_range)) } + /// DO NOT USE IN MAINNET /// Boot code instantiation for the aggregate public key. /// TODO: This should be removed once it's possible for stackers to vote on the aggregate /// public key - /// DO NOT USE IN MAINNET pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { let agg_pub_key = to_hex(&apk.compress().data); let contract_content = format!( From f98422c44e58135a0c99c9d7be99fc77edd5eb5c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:17 -0400 Subject: [PATCH 472/910] chore: copyright refinement --- stackslib/src/net/api/gettenuretip.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs index 188fe0dc51b..5bed2a6cc28 100644 --- a/stackslib/src/net/api/gettenuretip.rs +++ b/stackslib/src/net/api/gettenuretip.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by From 1820319c480363eba4480b4d537868334c53cee9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:32 -0400 Subject: [PATCH 473/910] feat: plumb through should_keep_running so the signcoordinator will exit on ctrl+c --- .../stacks-node/src/nakamoto_node/miner.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ba32122b6da..3fa5ff93334 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -544,14 +544,17 @@ impl BlockMinerThread { return Ok((reward_set, Vec::new())); } - let mut coordinator = - SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( - |e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - }, - )?; + let mut coordinator = SignCoordinator::new( + &reward_set, + miner_privkey_as_scalar, + &self.config, + self.globals.should_keep_running.clone(), + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; let mut chain_state = neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { From e5af00151347ce59a5726ee2aaad31549e0d191f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:53 -0400 Subject: [PATCH 474/910] feat: react to globals.should_keep_running becoming true, so the node will cleanly shut down if it's in the middle of mining a block --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index beece7f99e7..5e7fe24e0e9 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -14,7 +14,9 @@ // along with this program. If not, see . use std::collections::BTreeMap; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; +use std::sync::Arc; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; @@ -77,6 +79,7 @@ pub struct SignCoordinator { weight_threshold: u32, total_weight: u32, config: Config, + keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, } @@ -209,6 +212,7 @@ impl SignCoordinator { reward_set: &RewardSet, message_key: Scalar, config: &Config, + keep_running: Arc, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -307,6 +311,7 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, config: config.clone(), + keep_running, }; return Ok(sign_coordinator); } @@ -329,6 +334,7 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, config: config.clone(), + keep_running, }) } @@ -795,6 +801,12 @@ impl SignCoordinator { } }; + // was the node asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("SignerCoordinator: received node exit request. Aborting"); + return Err(NakamotoNodeError::ChannelClosed); + } + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); From 648aa5bd9bbd71d90161a3a1c8c3d8a2f6fbf5d1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:42:18 -0400 Subject: [PATCH 475/910] feat: integration test for retrying a block if signers reject it --- testnet/stacks-node/src/tests/signer/v0.rs | 137 +++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1c1f4117edf..89d77811921 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2059,6 +2059,143 @@ fn end_of_tenure() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test checks that the miner will retry when enough signers reject the block. +fn retry_on_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let short_timeout = Duration::from_secs(30); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + loop { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + sleep_ms(10_000); + + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + if tip.sortition { + break; + } + } + + // mine a nakamoto block + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine the first Nakamoto block"); + + // a tenure has begun, so wait until we mine a block + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + // make all signers reject the block + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed, verifying that it is not processed"); + // Wait 10 seconds to be sure that the timeout has occurred + std::thread::sleep(Duration::from_secs(10)); + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + // resume signing + info!("Disable unconditional rejection and wait for the block to be processed"); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(vec![]); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if blocks_mined > blocks_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks that the signers will broadcast a block once they receive enough signatures. From d2bc6a1a8cd0f01d9d3fc34e86b25e907208a14a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:42:52 -0400 Subject: [PATCH 476/910] chore: add retry_on_rejection integration test --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e38c8625520..6c3aca0e140 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,6 +100,7 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers + - tests::signer::v0::retry_on_rejection - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds From 46dd56879c047cf6f799a8870ab6f8d1a5ca0dec Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:50:59 -0700 Subject: [PATCH 477/910] update hardcoded v2 references to v3 --- docs/rpc-endpoints.md | 2 +- docs/rpc/openapi.yaml | 4 ++-- stackslib/src/net/api/getstackers.rs | 6 +++--- stackslib/src/net/api/postblock_proposal.rs | 6 +++--- stackslib/src/net/api/poststackerdbchunk.rs | 2 +- stackslib/src/net/api/tests/postblock_proposal.rs | 8 ++++---- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 +++--- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 8 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6163f27b753..eea916a7812 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -427,7 +427,7 @@ Determine whether a given trait is implemented within the specified contract (ei See OpenAPI [spec](./rpc/openapi.yaml) for details. -### POST /v2/block_proposal +### POST /v3/block_proposal Used by miner to validate a proposed Stacks block using JSON encoding. diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca732..d12e800c32e 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -568,7 +568,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). - /v2/block_proposal: + /v3/block_proposal: post: summary: Validate a proposed Stacks block tags: @@ -600,7 +600,7 @@ paths: example: $ref: ./api/core-node/post-block-proposal-req.example.json - /v2/stacker_set/{cycle_number}: + /v3/stacker_set/{cycle_number}: get: summary: Fetch the stacker and signer set information for a given cycle. tags: diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4546b66fc93..c6291096d82 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -96,11 +96,11 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/stacker_set/:cycle_num" + "/v3/stacker_set/:cycle_num" } /// Try to decode this request. @@ -211,7 +211,7 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/stacker_set/{cycle_num}"), + format!("/v3/stacker_set/{cycle_num}"), HttpRequestContents::new().for_tip(tip_req), ) .expect("FATAL: failed to construct request from infallible data") diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5d..043c3165652 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -163,7 +163,7 @@ impl From> for BlockValidateRespons } } -/// Represents a block proposed to the `v2/block_proposal` endpoint for validation +/// Represents a block proposed to the `v3/block_proposal` endpoint for validation #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlockProposal { /// Proposed block @@ -431,11 +431,11 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/block_proposal$"#).unwrap() + Regex::new(r#"^/v3/block_proposal$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal" + "/v3/block_proposal" } /// Try to decode this request. diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index b3c94206025..388f35ccee0 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -83,7 +83,7 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler { } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal/:principal/:contract_name/chunks" + "/v3/block_proposal/:principal/:contract_name/chunks" } /// Try to decode this request. diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 6ab465a683c..391afc949f1 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -69,7 +69,7 @@ fn test_try_parse_request() { let mut request = StacksHttpRequest::new_for_peer( addr.into(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -320,7 +320,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -340,7 +340,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -360,7 +360,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 13de8a350c8..56e8b513bc0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -231,7 +231,7 @@ impl TestSigningChannel { pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{http_origin}/v2/stacker_set/{cycle}"); + let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client .get(&path) .send() @@ -2234,7 +2234,7 @@ fn correct_burn_outs() { run_loop_thread.join().unwrap(); } -/// Test `/v2/block_proposal` API endpoint +/// Test `/v3/block_proposal` API endpoint /// /// This endpoint allows miners to propose Nakamoto blocks to a node, /// and test if they would be accepted or rejected @@ -2471,7 +2471,7 @@ fn block_proposal_api_endpoint() { .expect("Failed to build `reqwest::Client`"); // Build URL let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{http_origin}/v2/block_proposal"); + let path = format!("{http_origin}/v3/block_proposal"); let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978c..f4340df3471 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1394,7 +1394,7 @@ pub fn get_contract_src( pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/stacker_set/{}", http_origin, reward_cycle); + let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); let res = client.get(&path).send().unwrap(); info!("Got stacker_set response {:?}", &res); From 109d3cb5986ad065825082012932872463899f0d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:30:54 -0700 Subject: [PATCH 478/910] fix: unused imports Added a cfg to warn about unused imports in `net/mod.rs`, which exposed a few unused imports that I've removed. I've also updated other parts of the code that had imports that were only used for testing. In those cases, I've updated the import to only be used in `cfg(test)`. --- contrib/tools/relay-server/src/http.rs | 2 ++ stacks-common/Cargo.toml | 1 + stackslib/src/net/connection.rs | 2 -- stackslib/src/net/mod.rs | 10 ++++++---- .../src/burnchains/bitcoin_regtest_controller.rs | 7 ++++--- .../stacks-node/src/nakamoto_node/sign_coordinator.rs | 7 ++++++- 6 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/tools/relay-server/src/http.rs b/contrib/tools/relay-server/src/http.rs index c84f833beec..f7bd1e6f896 100644 --- a/contrib/tools/relay-server/src/http.rs +++ b/contrib/tools/relay-server/src/http.rs @@ -8,7 +8,9 @@ use crate::to_io_result::ToIoResult; pub struct Request { pub method: String, pub url: String, + #[allow(dead_code)] pub protocol: String, + #[allow(dead_code)] pub headers: HashMap, pub content: Vec, } diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index d5bfeb44e9e..75692d83c6f 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -78,6 +78,7 @@ testing = ["canonical"] serde = [] bech32_std = [] bech32_strict = [] +strason = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 78c15e08330..878be15d60e 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -25,8 +25,6 @@ use std::{io, net}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; -use mio; -use mio::net as mio_net; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c2..865fcd89ba7 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::borrow::Borrow; -use std::collections::{HashMap, HashSet}; +#[warn(unused_imports)] +use std::collections::HashMap; +#[cfg(any(test, feature = "testing"))] +use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::io::prelude::*; use std::io::{Read, Write}; @@ -110,7 +112,7 @@ pub mod atlas; /// Other functionality includes (but is not limited to): /// * set up & tear down of sessions /// * dealing with and responding to invalid messages -/// * rate limiting messages +/// * rate limiting messages pub mod chat; /// Implements serialization and deserialization for `StacksMessage` types. /// Also has functionality to sign, verify, and ensure well-formedness of messages. @@ -118,7 +120,7 @@ pub mod codec; pub mod connection; pub mod db; /// Implements `DNSResolver`, a simple DNS resolver state machine. Also implements `DNSClient`, -/// which serves as an API for `DNSResolver`. +/// which serves as an API for `DNSResolver`. pub mod dns; pub mod download; pub mod http; diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a3897..12210d230e3 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -74,10 +74,11 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; +use crate::config::BurnchainConfig; +#[cfg(test)] use crate::config::{ - BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, - OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, - OP_TX_VOTE_AGG_ESTIM_SIZE, + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b9..af73ae26db4 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -28,7 +28,9 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoC use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; +use stacks::chainstate::stacks::Error as ChainstateError; +#[cfg(any(test, feature = "testing"))] +use stacks::chainstate::stacks::ThresholdSignature; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; @@ -39,6 +41,7 @@ use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use wsts::common::PolyCommitment; +#[cfg(any(test, feature = "testing"))] use wsts::curve::ecdsa; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; @@ -70,6 +73,7 @@ pub struct SignCoordinator { coordinator: FireCoordinator, receiver: Option>, message_key: Scalar, + #[cfg(any(test, feature = "testing"))] wsts_public_keys: PublicKeys, is_mainnet: bool, miners_session: StackerDBSession, @@ -321,6 +325,7 @@ impl SignCoordinator { coordinator, message_key, receiver: Some(receiver), + #[cfg(any(test, feature = "testing"))] wsts_public_keys, is_mainnet, miners_session, From 2dce84a9f3df144a3659c43e14a1bb9e9c99a735 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:33:48 -0700 Subject: [PATCH 479/910] fix: only update `schema_version` if lower Previously, because `apply_schema_8` is called after things like `apply_schema_9`, this would override the schema version to be 8. Also removed some trailing whitespace. --- stackslib/src/chainstate/burn/db/sortdb.rs | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3cf13a8a55e..90cf60ace18 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -574,7 +574,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ block_height INTEGER NOT NULL, burn_header_hash TEXT NOT NULL, sortition_id TEXT NOT NULL, - + consensus_hash TEXT NOT NULL, public_key TEXT NOT NULL, memo TEXT, @@ -619,7 +619,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ stacked_ustx TEXT NOT NULL, num_cycles INTEGER NOT NULL, - -- The primary key here is (txid, burn_header_hash) because + -- The primary key here is (txid, burn_header_hash) because -- this transaction will be accepted regardless of which sortition -- history it is in. PRIMARY KEY(txid,burn_header_hash) @@ -636,7 +636,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ transfered_ustx TEXT NOT NULL, memo TEXT NOT NULL, - -- The primary key here is (txid, burn_header_hash) because + -- The primary key here is (txid, burn_header_hash) because -- this transaction will be accepted regardless of which sortition -- history it is in. PRIMARY KEY(txid,burn_header_hash) @@ -2261,7 +2261,7 @@ impl<'a> SortitionHandleConn<'a> { /// Get a block commit by txid. In the event of a burnchain fork, this may not be unique. /// this function simply returns one of those block commits: only use data that is - /// immutable across burnchain/pox forks, e.g., parent block ptr, + /// immutable across burnchain/pox forks, e.g., parent block ptr, pub fn get_block_commit_by_txid( &self, sort_id: &SortitionId, @@ -3352,6 +3352,11 @@ impl SortitionDB { ) -> Result<(), db_error> { let canonical_tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; + let schema_version = SortitionDB::get_schema_version(self.conn())? + .unwrap_or("0".to_string()) + .parse::() + .unwrap_or(0); + // port over `stacks_chain_tips` table info!("Instantiating `stacks_chain_tips` table..."); self.apply_schema_8_stacks_chain_tips(&canonical_tip)?; @@ -3365,12 +3370,14 @@ impl SortitionDB { info!("No migrator implementation given; `preprocessed_reward_sets` will not be prepopulated"); } - let tx = self.tx_begin()?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["8"], - )?; - tx.commit()?; + if schema_version < 8 { + let tx = self.tx_begin()?; + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["8"], + )?; + tx.commit()?; + } Ok(()) } @@ -5017,7 +5024,7 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { - let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 + let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; let args = params![sortition, sortition]; From 20e0a042fc2cc9d92a9da7aa2aea7555533b713b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:35:11 -0700 Subject: [PATCH 480/910] fix: update log when validating pox treatment I've updated the log line to be a `warn`. More investigation needed on whether we should add explicit validation here to the length of the BitVec. --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 28ba89d59da..ee6ac12aee8 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3573,7 +3573,7 @@ impl NakamotoChainState { .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; let bitvec_value = block_bitvec.get(ix) .unwrap_or_else(|| { - info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); true }); Ok(bitvec_value) From 32cd617f6abeffd621ff0f5e53590b62608de95d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:37:45 -0700 Subject: [PATCH 481/910] fix: use consistent error handling when writing to SignerDB Previously, there were some cases where we'd `panic`, and some where we'd just handle the error with a log. This updates the logic to always panic. --- stacks-signer/src/v0/signer.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f51..828996c5603 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -194,12 +194,13 @@ impl SignerTrait for Signer { self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) { - warn!( + error!( "Failed to write burn block event to signerdb"; "err" => ?e, "burn_header_hash" => %burn_header_hash, "burn_height" => burn_height ); + panic!("{self} Failed to write burn block event to signerdb: {e}"); } *sortition_state = None; } @@ -679,13 +680,13 @@ impl Signer { // record time at which we reached the threshold block_info.signed_group = Some(get_epoch_time_secs()); - let _ = self.signer_db.insert_block(&block_info).map_err(|e| { - warn!( + if let Err(e) = self.signer_db.insert_block(&block_info) { + error!( "Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e ); - e - }); + panic!("{self} Failed to write block to signerdb: {e}"); + }; // collect signatures for the block let signatures: Vec<_> = self From 84c9ed4c69f98e201e32694cb7f493ecd1d6e742 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:39:04 -0700 Subject: [PATCH 482/910] fix: typo in `NAKAMOTO_TENURES_SCHEMA_1` --- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 81380cc93d0..059da96b7a4 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -132,7 +132,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" burn_view_consensus_hash TEXT NOT NULL, -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). -- this is equal to the `cause` field in a TenureChange - cause INETGER NOT NULL, + cause INTEGER NOT NULL, -- block hash of start-tenure block block_hash TEXT NOT NULL, -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) From 48195afa44a39870d9b9f65f793b8f815f3f5894 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:39:58 -0700 Subject: [PATCH 483/910] Fix: allow block timestamps exactly 15 seconds in the future Previously, we'd only allow timestamps _less than_ 15 seconds away, but our docs state that the timestamp can be _no more than_ 15 seconds away. --- stackslib/src/net/api/postblock_proposal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6112ea0fae0..19b556604f4 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -283,7 +283,7 @@ impl NakamotoBlockProposal { }); } } - if self.block.header.timestamp > get_epoch_time_secs() + 15 { + if self.block.header.timestamp >= get_epoch_time_secs() + 15 { warn!( "Rejected block proposal"; "reason" => "Block timestamp is too far into the future", From 944184900d5fda18478618830e7e5258a0a47bae Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:41:02 -0700 Subject: [PATCH 484/910] fix: update processed_time when replacing block --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 931a00777bd..1f49bccaf35 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -593,8 +593,15 @@ impl<'a> NakamotoStagingBlocksTx<'a> { signing_weight: u32, obtain_method: NakamotoBlockObtainMethod, ) -> Result<(), ChainstateError> { - self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", - params![&block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), &block.header.consensus_hash, &block.header.block_hash()])?; + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3, processed_time = ?4 WHERE consensus_hash = ?5 AND block_hash = ?6", + params![ + &block.serialize_to_vec(), + &signing_weight, + &obtain_method.to_string(), + u64_to_sql(get_epoch_time_secs())?, + &block.header.consensus_hash, + &block.header.block_hash(), + ])?; Ok(()) } } From 61be12e4c38b899548b19a6c1e119c1babb46a77 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:42:32 -0700 Subject: [PATCH 485/910] fix: return correct highest schema version In `get_nakamoto_staging_blocks_db_version`, in the case of an error, the function would default to version 1, which could cause an overwrite of tables. This updates the function to return the correct highest version, which I've also moved to a constant. --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 1f49bccaf35..b83deebac0f 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -88,7 +88,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ -- block data, including its header data BLOB NOT NULL, - + PRIMARY KEY(block_hash,consensus_hash) );"#, r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, @@ -136,7 +136,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ -- block data, including its header data BLOB NOT NULL, - + PRIMARY KEY(block_hash,consensus_hash) );"#, r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, @@ -149,6 +149,8 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ r#"INSERT INTO db_version (version) VALUES (2)"#, ]; +pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 2; + pub struct NakamotoStagingBlocksConn(rusqlite::Connection); impl Deref for NakamotoStagingBlocksConn { @@ -527,7 +529,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { processed_time, obtain_method, signing_weight, - + data ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", params![ @@ -671,7 +673,7 @@ impl StacksChainState { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(1); + return Ok(NAKAMOTO_STAGING_DB_SCHEMA_LATEST); } }; From a0ad6864d50c46fbd0e3d65eb5798a1c7cbfd741 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:43:25 -0700 Subject: [PATCH 486/910] fix: return error if unable to announce stacks block In `net::relay`, there are many cases where we return an error if unable to announce a new Stacks block, but there was one case where the error was ignored. This updates that case to also return an error. --- stackslib/src/net/relay.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index d022148b3ae..35627d9dd4e 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2049,9 +2049,11 @@ impl Relayer { } } if !http_uploaded_blocks.is_empty() { - coord_comms.inspect(|comm| { - comm.announce_new_stacks_block(); - }); + if let Some(comm) = coord_comms { + if !comm.announce_new_stacks_block() { + return Err(net_error::CoordinatorClosed); + } + }; } accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); From e882ec371e131fa5cd2c54e771dde9a2b74f4528 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:44:07 -0700 Subject: [PATCH 487/910] fix: log error when failing to push a block This adds more verbose and explicit logs when the signer is unable to post a block when handling a `BlockPushed` event. --- stacks-signer/src/v0/signer.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 828996c5603..fd5bbab4003 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -151,6 +151,13 @@ impl SignerTrait for Signer { } SignerMessage::BlockPushed(b) => { let block_push_result = stacks_client.post_block(b); + if let Err(ref e) = &block_push_result { + warn!( + "{self}: Failed to post block {} (id {}): {e:?}", + &b.header.signer_signature_hash(), + &b.block_id() + ); + }; info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), From ca22422c42a2182f166805390636e3e17e19cb3d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:45:20 -0700 Subject: [PATCH 488/910] fix: typo in SORTITION_DB_SCHEMA_4 --- stackslib/src/chainstate/burn/db/sortdb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 90cf60ace18..f545c53c8c3 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -687,11 +687,11 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ delegated_ustx TEXT NOT NULL, until_burn_height INTEGER, - PRIMARY KEY(txid,burn_header_Hash) + PRIMARY KEY(txid,burn_header_hash) );"#, r#" CREATE TABLE ast_rule_heights ( - ast_rule_id INTEGER PRIMAR KEY NOT NULL, + ast_rule_id INTEGER PRIMARY KEY NOT NULL, block_height INTEGER NOT NULL );"#, ]; From cdf4727c1bcae8c721972a2907a8f8e61d36ee77 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:46:22 -0700 Subject: [PATCH 489/910] fix: more description warning log in NakamotoBlockBuilder Previously, two different error cases had the same log message. This updates one of them to have a more description message about when the error occurred. --- stackslib/src/chainstate/nakamoto/miner.rs | 4 ++-- testnet/stacks-node/src/nakamoto_node/miner.rs | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 66aa6cc1d98..70298db74c4 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -108,7 +108,7 @@ impl NakamotoTenureInfo { } pub struct NakamotoBlockBuilder { - /// If there's a parent (i.e., not a genesis), this is Some(parent_header) + /// If there's a parent (i.e., not a genesis), this is Some(parent_header) parent_header: Option, /// Signed coinbase tx, if starting a new tenure coinbase_tx: Option, @@ -280,7 +280,7 @@ impl NakamotoBlockBuilder { &self.header.parent_block_id, ).map_err(|e| { warn!( - "Cannot process Nakamoto block: could not load reward set that elected the block"; + "Cannot process Nakamoto block: could not retrieve coinbase POX height of the elected block"; "err" => ?e, ); Error::NoSuchBlockError diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346d..1669ee9484e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1250,7 +1250,11 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + debug!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, + ); payload = payload.extend( *burn_view_consensus_hash, parent_block_id, From a4166be18b21f113e99f19baae3a4fa9e5f9fc5f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 10 Sep 2024 16:04:05 +0300 Subject: [PATCH 490/910] add test for no fee tx anchored directly and failure to submit it to mempool --- .../stacks/tests/block_construction.rs | 219 ++++++++++++++++++ 1 file changed, 219 insertions(+) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 41942078403..36997105356 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -1536,6 +1536,225 @@ fn test_build_anchored_blocks_skip_too_expensive() { } } +#[test] +fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let coinbase_tx = make_coinbase(miner, 0); + + // Create a zero-fee transaction + let zero_fee_tx = make_user_stacks_transfer( + &privk, + 0, + 0, // Set fee to 0 + &recipient.to_account_principal(), + 1000, + ); + + let result = mempool.submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &zero_fee_tx, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch20, + ); + + match result { + Ok(_) => panic!("Expected FeeTooLow error but transaction was accepted"), + Err(e) => match e { + MemPoolRejection::FeeTooLow(actual, required) => { + assert_eq!(actual, 0); + assert_eq!(required, 180); + } + _ => panic!("Unexpected error: {:?}", e), + }, + }; + + let anchored_block = StacksBlockBuilder::build_anchored_block( + chainstate, + &sortdb.index_handle_at_tip(), + &mut mempool, + &parent_tip, + tip.total_burn, + vrf_proof, + Hash160([0 as u8; 20]), + &coinbase_tx, + BlockBuilderSettings::max_value(), + None, + &burnchain, + ) + .unwrap(); + + (anchored_block.0, vec![]) + }, + ); + + peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + // Check that the block contains only coinbase transactions (coinbase) + assert_eq!(stacks_block.txs.len(), 1); +} + +#[test] +fn test_build_anchored_blocks_zero_fee_transaction() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let coinbase_tx = make_coinbase(miner, 0); + + // Create a zero-fee transaction + let zero_fee_tx = make_user_stacks_transfer( + &privk, + 0, + 0, // Set fee to 0 + &recipient.to_account_principal(), + 1000, + ); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof, + tip.total_burn, + Hash160([0 as u8; 20]), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle_at_tip(), + vec![coinbase_tx, zero_fee_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }, + ); + + peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + // Check that the block contains 2 transactions (coinbase + zero-fee transaction) + assert_eq!(stacks_block.txs.len(), 2); + + // Verify that the zero-fee transaction is in the block + let zero_fee_tx = &stacks_block.txs[1]; + assert_eq!(zero_fee_tx.get_tx_fee(), 0); +} + #[test] fn test_build_anchored_blocks_multiple_chaintips() { let mut privks = vec![]; From f1435430325b8b9e9f394bd0946b90211191af0e Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 10 Sep 2024 17:46:59 +0300 Subject: [PATCH 491/910] better naming flash blocks functions and add remove waiting for blocks --- .../src/tests/nakamoto_integrations.rs | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c57b59dce61..17f068f4744 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -928,10 +928,13 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// Boot the chain to just before the Epoch 3.0 boundary to allow for flash blocks +/// This function is similar to `boot_to_epoch_3`, but it stops at epoch 3 start height - 2, +/// allowing for flash blocks to occur when the epoch changes. /// -/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order -/// for pox-4 to activate -pub fn boot_to_epoch_3_flash_blocks( +/// * `stacker_sks` - private keys for sending large `stack-stx` transactions to activate pox-4 +/// * `signer_sks` - corresponding signer keys for the stackers +pub fn boot_to_pre_epoch_3_boundary( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], @@ -1077,7 +1080,7 @@ pub fn boot_to_epoch_3_flash_blocks( &naka_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); + info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); } fn get_signer_index( @@ -1737,7 +1740,7 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); - boot_to_epoch_3_flash_blocks( + boot_to_pre_epoch_3_boundary( &naka_conf, &blocks_processed, &[stacker_sk], @@ -1746,10 +1749,10 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &mut btc_regtest_controller, ); - // mine 3 blocks which should be the ones for setting up the miner - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + // Mine 3 Bitcoin blocks quickly without waiting for Stacks blocks to be processed + for _ in 0..3 { + btc_regtest_controller.build_next_block(1); + } info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); From e56cef4b7c66914d96704076cd6571c47b9c508c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:49:25 -0700 Subject: [PATCH 492/910] fix: update timestamp validation Keep it as "max 15 seconds", and update the documentation comments. --- clarity/src/vm/docs/mod.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 6 +++--- stackslib/src/net/api/postblock_proposal.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 6bf577b680a..65b08e3102a 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1831,7 +1831,7 @@ and block times are accurate only to within two hours. See [BIP113](https://gith For a block mined after epoch 3.0, this timestamp comes from the Stacks block header. **Note**: this is the time, according to the miner, when the mining of this block started, but is not guaranteed to be accurate. This time will be validated by the signers to be: - Greater than the timestamp of the previous block - - Less than 15 seconds into the future (according to their own local clocks) + - At most 15 seconds into the future (according to their own local clocks) ", example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) (get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ee6ac12aee8..6e775fba560 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -613,7 +613,7 @@ pub struct NakamotoBlockHeader { /// A Unix time timestamp of when this block was mined, according to the miner. /// For the signers to consider a block valid, this timestamp must be: /// * Greater than the timestamp of its parent block - /// * Less than 15 seconds into the future + /// * At most 15 seconds into the future pub timestamp: u64, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, @@ -1877,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1889,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 19b556604f4..35410b280bb 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -266,7 +266,7 @@ impl NakamotoBlockProposal { // Validate the block's timestamp. It must be: // - Greater than the parent block's timestamp - // - Less than 15 seconds into the future + // - At most 15 seconds into the future if let StacksBlockHeaderTypes::Nakamoto(parent_nakamoto_header) = &parent_stacks_header.anchored_header { @@ -283,7 +283,7 @@ impl NakamotoBlockProposal { }); } } - if self.block.header.timestamp >= get_epoch_time_secs() + 15 { + if self.block.header.timestamp > get_epoch_time_secs() + 15 { warn!( "Rejected block proposal"; "reason" => "Block timestamp is too far into the future", From d40a3e541500f9af546e666a421774cab837c619 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:49:40 -0700 Subject: [PATCH 493/910] fix: better warn message --- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 70298db74c4..c4ac2b428c9 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -280,7 +280,7 @@ impl NakamotoBlockBuilder { &self.header.parent_block_id, ).map_err(|e| { warn!( - "Cannot process Nakamoto block: could not retrieve coinbase POX height of the elected block"; + "Cannot process Nakamoto block: could not find height at which the PoX reward set was calculated"; "err" => ?e, ); Error::NoSuchBlockError From 30e50d5d44d3021ef9072f4183c8966bd38b181c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:51:08 -0700 Subject: [PATCH 494/910] fix: more idiomatic error handling when inserting block --- stacks-signer/src/v0/signer.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fd5bbab4003..926c69fc339 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -197,18 +197,17 @@ impl SignerTrait for Signer { received_time, } => { info!("{self}: Received a new burn block event for block height {burn_height}"); - if let Err(e) = - self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) - { - error!( - "Failed to write burn block event to signerdb"; - "err" => ?e, - "burn_header_hash" => %burn_header_hash, - "burn_height" => burn_height - ); - panic!("{self} Failed to write burn block event to signerdb: {e}"); - } + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + .unwrap_or_else(|e| { + error!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + panic!("{self} Failed to write burn block event to signerdb: {e}"); + }); *sortition_state = None; } } From ab7e18ff1261b8272ed2a60386bc04f707428ec9 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:53:48 -0700 Subject: [PATCH 495/910] fix: add more key/val pairs to error log --- stackslib/src/chainstate/nakamoto/miner.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index c4ac2b428c9..1d267b047f9 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -282,6 +282,9 @@ impl NakamotoBlockBuilder { warn!( "Cannot process Nakamoto block: could not find height at which the PoX reward set was calculated"; "err" => ?e, + "stacks_tip" => %self.header.parent_block_id, + "elected_height" => elected_height, + "elected_cycle" => elected_in_cycle ); Error::NoSuchBlockError })?; From 1eb20c88edba39a3820675d1794a851304b772f7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 10 Sep 2024 14:00:43 -0400 Subject: [PATCH 496/910] fix: disregard tx count if the stacks tip has changed Fixes #5157 --- testnet/stacks-node/src/neon_node.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b20..153eb9361ee 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1715,6 +1715,10 @@ impl BlockMinerThread { info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); From 47436e7ffed5dadff50739cd5ffbdcd962f2cb84 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 3 Sep 2024 17:17:35 -0400 Subject: [PATCH 497/910] fix: Improve logging in rusqlite busy handler, and fail eventually if probable deadlock detected --- stackslib/src/util_lib/db.rs | 54 ++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index f54a9c97ec7..be09de25560 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -655,31 +655,44 @@ impl<'a, C: Clone, T: MarfTrieId> DerefMut for IndexDBTx<'a, C, T> { } } +/// Called by `rusqlite` if we are waiting too long on a database lock pub fn tx_busy_handler(run_count: i32) -> bool { - let mut sleep_count = 2; - if run_count > 0 { - sleep_count = 2u64.saturating_pow(run_count as u32); + const TIMEOUT: Duration = Duration::from_secs(60); + const AVG_SLEEP_TIME_MS: u64 = 100; + + // First, check if this is taking unreasonably long. If so, it's probably a deadlock + let run_count = run_count.unsigned_abs(); + let approx_time_elapsed = + Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); + if approx_time_elapsed > TIMEOUT { + error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + return false; } - sleep_count = sleep_count.saturating_add(thread_rng().gen::() % sleep_count); - if sleep_count > 100 { - let jitter = thread_rng().gen::() % 20; - sleep_count = 100 - jitter; + let mut sleep_time_ms = 2u64.saturating_pow(run_count); + + sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); + + if sleep_time_ms > AVG_SLEEP_TIME_MS { + let bound = 10; + let jitter = thread_rng().gen_range(0..bound * 2); + sleep_time_ms = (AVG_SLEEP_TIME_MS - bound) + jitter; } - debug!( - "Database is locked; sleeping {}ms and trying again", - &sleep_count; - "backtrace" => ?{ - if run_count > 10 && run_count % 10 == 0 { - Some(Backtrace::capture()) - } else { - None - } - }, - ); + let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); + if run_count > 10 && run_count % 10 == 0 { + warn!("{msg}"; + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + } else { + debug!("{msg}"); + } - sleep_ms(sleep_count); + sleep_ms(sleep_time_ms); true } @@ -696,8 +709,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; - let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; - Ok(tx) + Transaction::new(conn, TransactionBehavior::Immediate) } #[cfg(feature = "profile-sqlite")] From 3b24bd34c15f00ae0549a69717d5e2624ea816e0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 4 Sep 2024 13:24:07 -0400 Subject: [PATCH 498/910] chore: Address PR comment from Brice --- stackslib/src/util_lib/db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index be09de25560..4728b83e73c 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -677,9 +677,9 @@ pub fn tx_busy_handler(run_count: i32) -> bool { sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { - let bound = 10; - let jitter = thread_rng().gen_range(0..bound * 2); - sleep_time_ms = (AVG_SLEEP_TIME_MS - bound) + jitter; + let jitter = 10; + sleep_time_ms = + thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); } let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); From 8dcfadf8c205c720a3b16086c195c9c2617bcf4e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 5 Sep 2024 08:56:27 -0400 Subject: [PATCH 499/910] chore: Update the `tx_busy_handler()` in stacks-common also --- clarity/src/vm/database/sqlite.rs | 2 +- stacks-common/src/util/db.rs | 64 +++++++++++++++++++++++++++++++ stacks-common/src/util/mod.rs | 27 +------------ stackslib/src/util_lib/db.rs | 38 +----------------- 4 files changed, 67 insertions(+), 64 deletions(-) create mode 100644 stacks-common/src/util/db.rs diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index dc3ad4f5bde..7d2af59eb50 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -21,7 +21,7 @@ use rusqlite::{ }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; -use stacks_common::util::db_common::tx_busy_handler; +use stacks_common::util::db::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; use super::clarity_store::{make_contract_hash_key, ContractCommitment}; diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs new file mode 100644 index 00000000000..29ebdd0bb0c --- /dev/null +++ b/stacks-common/src/util/db.rs @@ -0,0 +1,64 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::backtrace::Backtrace; +use std::time::Duration; + +use rand::{thread_rng, Rng}; + +use crate::util::sleep_ms; + +/// Called by `rusqlite` if we are waiting too long on a database lock +/// If called too many times, will fail to avoid deadlocks +pub fn tx_busy_handler(run_count: i32) -> bool { + const TIMEOUT: Duration = Duration::from_secs(60); + const AVG_SLEEP_TIME_MS: u64 = 100; + + // First, check if this is taking unreasonably long. If so, it's probably a deadlock + let run_count = run_count.unsigned_abs(); + let approx_time_elapsed = + Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); + if approx_time_elapsed > TIMEOUT { + error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + return false; + } + + let mut sleep_time_ms = 2u64.saturating_pow(run_count); + + sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); + + if sleep_time_ms > AVG_SLEEP_TIME_MS { + let jitter = 10; + sleep_time_ms = + thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); + } + + let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); + if run_count > 10 && run_count % 10 == 0 { + warn!("{msg}"; + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + } else { + debug!("{msg}"); + } + + sleep_ms(sleep_time_ms); + true +} diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 13ab79dcb30..a9dfc478061 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -19,6 +19,7 @@ pub mod log; #[macro_use] pub mod macros; pub mod chunked_encoding; +pub mod db; pub mod hash; pub mod pair; pub mod pipe; @@ -85,32 +86,6 @@ impl error::Error for HexError { } } -pub mod db_common { - use std::{thread, time}; - - use rand::{thread_rng, Rng}; - - pub fn tx_busy_handler(run_count: i32) -> bool { - let mut sleep_count = 10; - if run_count > 0 { - sleep_count = 2u64.saturating_pow(run_count as u32); - } - sleep_count = sleep_count.saturating_add(thread_rng().gen::() % sleep_count); - - if sleep_count > 5000 { - sleep_count = 5000; - } - - debug!( - "Database is locked; sleeping {}ms and trying again", - &sleep_count - ); - - thread::sleep(time::Duration::from_millis(sleep_count)); - true - } -} - /// Write any `serde_json` object directly to a file pub fn serialize_json_to_file(json: &J, path: P) -> Result<(), std::io::Error> where diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 4728b83e73c..a0496d3bfc6 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -657,43 +657,7 @@ impl<'a, C: Clone, T: MarfTrieId> DerefMut for IndexDBTx<'a, C, T> { /// Called by `rusqlite` if we are waiting too long on a database lock pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(60); - const AVG_SLEEP_TIME_MS: u64 = 100; - - // First, check if this is taking unreasonably long. If so, it's probably a deadlock - let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); - "run_count" => run_count, - "backtrace" => ?Backtrace::capture() - ); - return false; - } - - let mut sleep_time_ms = 2u64.saturating_pow(run_count); - - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); - - if sleep_time_ms > AVG_SLEEP_TIME_MS { - let jitter = 10; - sleep_time_ms = - thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); - } - - let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); - if run_count > 10 && run_count % 10 == 0 { - warn!("{msg}"; - "run_count" => run_count, - "backtrace" => ?Backtrace::capture() - ); - } else { - debug!("{msg}"); - } - - sleep_ms(sleep_time_ms); - true + stacks_common::util::db::tx_busy_handler(run_count) } /// Begin an immediate-mode transaction, and handle busy errors with exponential backoff. From 79455d4949902fc26f89a16400fb8fc27599834d Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:12:20 +0200 Subject: [PATCH 500/910] Delete unused `open-api` action --- .github/actions/open-api/Dockerfile.open-api-validate | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 .github/actions/open-api/Dockerfile.open-api-validate diff --git a/.github/actions/open-api/Dockerfile.open-api-validate b/.github/actions/open-api/Dockerfile.open-api-validate deleted file mode 100644 index 4ff6187be0b..00000000000 --- a/.github/actions/open-api/Dockerfile.open-api-validate +++ /dev/null @@ -1,10 +0,0 @@ -FROM node:lts-alpine as build - -WORKDIR /src - -COPY . . - -RUN npx redoc-cli@0.10.3 bundle -o /build/open-api-docs.html ./docs/rpc/openapi.yaml - -FROM scratch AS export-stage -COPY --from=build /build/open-api-docs.html / From f0b118d6b22ccf70feaf4ef8a5e56bbee917ff7a Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:13:25 +0200 Subject: [PATCH 501/910] Remove non-existent `burn_ops` endpoint --- .../get-burn-ops-peg-in.example.json | 14 -------- .../get-burn-ops-peg-out-fulfill.example.json | 15 -------- .../get-burn-ops-peg-out-request.example.json | 16 --------- docs/rpc/openapi.yaml | 36 ------------------- 4 files changed, 81 deletions(-) delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-in.example.json delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json deleted file mode 100644 index 5302a3b6242..00000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "peg_in": [ - { - "amount": 1337, - "block_height": 218, - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "memo": "", - "peg_wallet_address": "tb1pqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqkgkkf5", - "recipient": "S0000000000000000000002AA028H.awesome_contract", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "vtxindex": 2 - } - ], -} diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json deleted file mode 100644 index 45fca8a3291..00000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "peg_out_fulfill": [ - { - "chain_tip": "0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e", - "amount": 1337, - "recipient": "1BixGeiRyKT7NTkJAHpWuP197KXUNqhCU9", - "request_ref": "e81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157772", - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "block_height": 218, - "vtxindex": 2, - "memo": "00010203" - } - ] -} diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json deleted file mode 100644 index 0e6efa958b5..00000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "peg_out_request": [ - { - "amount": 1337, - "recipient": "1BixGeiRyKT7NTkJAHpWuP197KXUNqhCU9", - "block_height": 218, - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "peg_wallet_address": "tb1qqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvps3f3cyq", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "vtxindex": 2, - "signature": "0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d", - "fulfillment_fee": 0, - "memo": "00010203" - } - ], -} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca732..741556da8e6 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -41,42 +41,6 @@ paths: example: $ref: ./api/transaction/post-core-node-transactions-error.example.json - /v2/burn_ops/{burn_height}/{op_type}: - get: - summary: Get burn operations - description: Get all burn operations of type `op_type` successfully read at `burn_height`. Valid `op_type`s are `peg_in`, `peg_out_request` and `peg_out_fulfill`. - tags: - - Info - operationId: get_burn_ops - parameters: - - name: burn_height - in: path - required: true - description: height of the burnchain (Bitcoin) - schema: - type: integer - - name: op_type - in: path - required: true - description: name of the burnchain operation type - schema: - type: string - responses: - 200: - description: Burn operations list - content: - application/json: - examples: - peg_in: - value: - $ref: ./api/core-node/get-burn-ops-peg-in.example.json - peg_out_request: - value: - $ref: ./api/core-node/get-burn-ops-peg-out-request.example.json - peg_out_fulfill: - value: - $ref: ./api/core-node/get-burn-ops-peg-out-fulfill.example.json - /v2/contracts/interface/{contract_address}/{contract_name}: get: summary: Get contract interface From 7128cc1abe1e4adc37e9ae6870e0fe1ce1605788 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:13:59 +0200 Subject: [PATCH 502/910] Fix wrong required parameters --- .../core-node/post-fee-transaction-response.schema.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/rpc/api/core-node/post-fee-transaction-response.schema.json b/docs/rpc/api/core-node/post-fee-transaction-response.schema.json index 8a085913499..af84276b0b4 100644 --- a/docs/rpc/api/core-node/post-fee-transaction-response.schema.json +++ b/docs/rpc/api/core-node/post-fee-transaction-response.schema.json @@ -4,7 +4,12 @@ "title": "TransactionFeeEstimateResponse", "type": "object", "additionalProperties": false, - "required": ["estimated_cost", "estimated_cost_scalar", "estimated_fee_rates", "estimated_fees"], + "required": [ + "estimated_cost", + "estimated_cost_scalar", + "cost_scalar_change_by_byte", + "estimations" + ], "properties": { "estimated_cost_scalar": { "type": "integer" From 42566a1591fa85b087f6c7551b529234d8b0b35e Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:14:12 +0200 Subject: [PATCH 503/910] Fix comma leading to expected additional params --- docs/rpc/api/trait/get-is-trait-implemented.schema.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rpc/api/trait/get-is-trait-implemented.schema.json b/docs/rpc/api/trait/get-is-trait-implemented.schema.json index a8b1b65faf3..30cb3fa4862 100644 --- a/docs/rpc/api/trait/get-is-trait-implemented.schema.json +++ b/docs/rpc/api/trait/get-is-trait-implemented.schema.json @@ -8,6 +8,6 @@ "properties": { "is_implemented": { "type": "boolean" - }, + } } } From 4b275639aa10ff6f908e3059b798ddf62873ad3c Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:14:48 +0200 Subject: [PATCH 504/910] Bump `openapi` version and fix license --- docs/rpc/openapi.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 741556da8e6..2a269117ec7 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -1,13 +1,14 @@ -openapi: 3.0.2 +openapi: 3.1.0 servers: - url: http://localhost:20443 description: Local info: - title: Stacks 2.0+ RPC API + title: Stacks 3.0+ RPC API version: '1.0.0' description: | This is the documentation for the `stacks-node` RPC interface. - license: CC-0 + license: + name: CC-0 paths: /v2/transactions: From b5e1222344b2b2352c8e1ff4f82ff1b7ced58ca8 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:15:42 +0200 Subject: [PATCH 505/910] HTTP status codes MUST be enclosed in quotes https://spec.openapis.org/oas/latest.html#patterned-fields-0 --- docs/rpc/openapi.yaml | 56 +++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 2a269117ec7..abcc92e9823 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -26,14 +26,14 @@ paths: format: binary example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 responses: - 200: - description: Transaction id of successful post of a raw tx to the node's mempool + "200": + description: Transaction ID of successful post of a raw tx to the node's mempool content: text/plain: schema: type: string example: '"e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2"' - 400: + "400": description: Rejections result in a 400 error content: application/json: @@ -50,7 +50,7 @@ paths: - Smart Contracts operationId: get_contract_interface responses: - 200: + "200": description: Contract interface content: application/json: @@ -90,7 +90,7 @@ paths: In the response, `data` is the hex serialization of the map response. Note that map responses are Clarity option types, for non-existent values, this is a serialized none, and for all other responses, it is a serialized (some ...) object. responses: - 200: + "200": description: Success content: application/json: @@ -98,7 +98,7 @@ paths: $ref: ./api/core-node/get-contract-data-map-entry.schema.json example: $ref: ./api/core-node/get-contract-data-map-entry.example.json - 400: + "400": description: Failed loading data map parameters: - name: contract_address @@ -147,7 +147,7 @@ paths: operationId: get_contract_source description: Returns the Clarity source code of a given contract, along with the block height it was published in, and the MARF proof for the data responses: - 200: + "200": description: Success content: application/json: @@ -192,7 +192,7 @@ paths: The smart contract and function are specified using the URL path. The arguments and the simulated tx-sender are supplied via the POST body in the following JSON format: responses: - 200: + "200": description: Success content: application/json: @@ -273,7 +273,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). responses: - 200: + "200": description: Success content: application/json: @@ -363,7 +363,7 @@ paths: example: $ref: ./api/core-node/post-fee-transaction.example.json responses: - 200: + "200": description: Estimated fees for the transaction content: application/json: @@ -380,7 +380,7 @@ paths: operationId: get_fee_transfer description: Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer responses: - 200: + "200": description: Success content: application/json: @@ -397,7 +397,7 @@ paths: - Info operationId: get_core_api_info responses: - 200: + "200": description: Success content: application/json: @@ -414,7 +414,7 @@ paths: - Info operationId: get_pox_info responses: - 200: + "200": description: Success content: application/json: @@ -438,7 +438,7 @@ paths: - Smart Contracts operationId: get_is_trait_implemented responses: - 200: + "200": description: Success content: application/json: @@ -484,7 +484,7 @@ paths: description: | The Stacks chain tip to query from. If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). - If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: post: @@ -497,7 +497,7 @@ paths: In the response, `data` is the hex serialization of the constant value. responses: - 200: + "200": description: Success content: application/json: @@ -505,7 +505,7 @@ paths: $ref: ./api/core-node/get-constant-val.schema.json example: $ref: ./api/core-node/get-constant-val.example.json - 400: + "400": description: Failed to retrieve constant value from contract parameters: - name: contract_address @@ -544,15 +544,15 @@ paths: **This API endpoint requires a basic Authorization header.** responses: - 202: + "202": description: Block proposal has been accepted for processing. The result will be returned via the event observer. content: application/json: example: $ref: ./api/core-node/post-block-proposal-response.example.json - 403: + "403": description: Request not over loopback interface - 429: + "429": description: There is an ongoing proposal validation being processed, the new request cannot be accepted until the prior request has been processed. content: @@ -583,13 +583,13 @@ paths: schema: type: integer responses: - 200: + "200": description: Information for the given reward cycle content: application/json: example: $ref: ./api/core-node/get_stacker_set.example.json - 400: + "400": description: Could not fetch the given reward set content: application/json: @@ -598,7 +598,7 @@ paths: /v3/blocks/{block_id}: get: - summary: Fetch a Nakamoto block + summary: Fetch a Nakamoto block tags: - Blocks operationId: get_block_v3 @@ -612,14 +612,14 @@ paths: schema: type: string responses: - 200: + "200": description: The raw SIP-003-encoded block will be returned. content: application/octet-stream: schema: type: string format: binary - 404: + "404": description: The block could not be found content: application/text-plain: {} @@ -633,13 +633,13 @@ paths: description: Fetch metadata about the ongoing Nakamoto tenure. This information is sufficient to obtain and authenticate the highest complete tenure, as well as obtain new tenure blocks. responses: - 200: + "200": description: Metadata about the ongoing tenure content: application/json: example: $ref: ./api/core-node/get_tenure_info.json - + /v3/tenures/{block_id}: get: summary: Fetch a sequence of Nakamoto blocks in a tenure @@ -649,7 +649,7 @@ paths: description: Fetch a sequence of Nakamoto blocks in a tenure. The blocks will be served in order from highest to lowest. The blocks will be encoded in their SIP-003 wire format, and concatenated together. responses: - 200: + "200": description: SIP-003-encoded Nakamoto blocks, concatenated together content: application/octet-stream: From a9b0afe6d3d1bca96f177b1fd5fa7b62dc4293f1 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:21:06 +0200 Subject: [PATCH 506/910] Wrap examples in `value` --- .../api/contract/post-call-read-only-fn-fail.example.json | 6 ++++-- .../contract/post-call-read-only-fn-success.example.json | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json b/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json index 5680a225ca0..90170858878 100644 --- a/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json +++ b/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json @@ -1,4 +1,6 @@ { - "okay": false, - "cause": "Unchecked(PublicFunctionNotReadOnly(..." + "value": { + "okay": false, + "cause": "Unchecked(PublicFunctionNotReadOnly(..." + } } diff --git a/docs/rpc/api/contract/post-call-read-only-fn-success.example.json b/docs/rpc/api/contract/post-call-read-only-fn-success.example.json index cc94dccd1dd..c2f5d845f18 100644 --- a/docs/rpc/api/contract/post-call-read-only-fn-success.example.json +++ b/docs/rpc/api/contract/post-call-read-only-fn-success.example.json @@ -1,4 +1,6 @@ { - "okay": true, - "result": "0x111..." + "value": { + "okay": true, + "result": "0x111..." + } } From 31b5360b69735b19666348eba6ae3ab683389cc3 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 17:45:35 +0200 Subject: [PATCH 507/910] Update docs for `/v3/block_proposal` --- docs/rpc/openapi.yaml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index abcc92e9823..6fc49859679 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -533,7 +533,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). - /v2/block_proposal: + /v3/block_proposal: post: summary: Validate a proposed Stacks block tags: @@ -545,16 +545,19 @@ paths: **This API endpoint requires a basic Authorization header.** responses: "202": - description: Block proposal has been accepted for processing. The result will be returned via the event observer. + description: Block proposal has been accepted for processing. + The result will be returned via the event observer. content: application/json: example: $ref: ./api/core-node/post-block-proposal-response.example.json - "403": - description: Request not over loopback interface + "400": + description: Endpoint not enabled. + "401": + description: Unauthorized. "429": - description: There is an ongoing proposal validation being processed, the new request cannot be accepted - until the prior request has been processed. + description: There is an ongoing proposal validation being processed, + the new request cannot be accepted until the prior request has been processed. content: application/json: example: From 021ab583d2bbfde9e8e93de5b6674a7bc62d6091 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:31:31 -0400 Subject: [PATCH 508/910] chore: Add `LOCK_TABLE` to print out all DB locks if deadlock detected --- stacks-common/src/util/db.rs | 29 ++++++++++++++++++++++++----- stackslib/src/util_lib/db.rs | 10 ++++++++-- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 29ebdd0bb0c..fff39a32978 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -15,16 +15,32 @@ // along with this program. If not, see . use std::backtrace::Backtrace; -use std::time::Duration; +use std::sync::{LazyLock, Mutex}; +use std::thread; +use std::time::{Duration, Instant}; +use hashbrown::HashMap; use rand::{thread_rng, Rng}; use crate::util::sleep_ms; +/// Keep track of DB locks, for deadlock debugging +/// - **key:** `rusqlite::Connection` debug print +/// - **value:** Lock holder (thread name + timestamp) +/// +/// This uses a `Mutex` inside of `LazyLock` because: +/// - Using `Mutex` alone, it can't be statically initialized because `HashMap::new()` isn't `const` +/// - Using `LazyLock` alone doesn't allow interior mutability +pub static LOCK_TABLE: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); +/// Generate timestanps for use in `LOCK_TABLE` +/// `Instant` is preferable to `SystemTime` because it uses `CLOCK_MONOTONIC` and is not affected by NTP adjustments +pub static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); + /// Called by `rusqlite` if we are waiting too long on a database lock -/// If called too many times, will fail to avoid deadlocks +/// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(60); + const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; // First, check if this is taking unreasonably long. If so, it's probably a deadlock @@ -32,11 +48,14 @@ pub fn tx_busy_handler(run_count: i32) -> bool { let approx_time_elapsed = Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); if approx_time_elapsed > TIMEOUT { - error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); - return false; + for (k, v) in LOCK_TABLE.lock().unwrap().iter() { + error!("Database '{k}' last locked by {v}"); + } + panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index a0496d3bfc6..70850d372cc 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -18,7 +18,7 @@ use std::backtrace::Backtrace; use std::io::Error as IOError; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; -use std::time::Duration; +use std::time::{Duration, SystemTime}; use std::{error, fmt, fs, io}; use clarity::vm::types::QualifiedContractIdentifier; @@ -32,6 +32,7 @@ use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; +use stacks_common::util::db::{LOCK_TABLE, LOCK_TABLE_TIMER}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; @@ -673,7 +674,12 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; - Transaction::new(conn, TransactionBehavior::Immediate) + let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; + let time = LOCK_TABLE_TIMER.elapsed().as_millis(); + let k = format!("{:?}", tx.deref()); + let v = format!("{:?}@{time}", std::thread::current().name()); + LOCK_TABLE.lock().unwrap().insert(k, v); + Ok(tx) } #[cfg(feature = "profile-sqlite")] From 67d0f308edd39a7270ea327c92c510fe59fd27ff Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:45:16 -0400 Subject: [PATCH 509/910] refactor: Move logic for updating `LOCK_TABLE` into stacks-common --- stacks-common/src/util/db.rs | 14 ++++++++++++-- stackslib/src/util_lib/db.rs | 7 ++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index fff39a32978..257a98aab9d 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -21,6 +21,7 @@ use std::time::{Duration, Instant}; use hashbrown::HashMap; use rand::{thread_rng, Rng}; +use rusqlite::Connection; use crate::util::sleep_ms; @@ -31,11 +32,20 @@ use crate::util::sleep_ms; /// This uses a `Mutex` inside of `LazyLock` because: /// - Using `Mutex` alone, it can't be statically initialized because `HashMap::new()` isn't `const` /// - Using `LazyLock` alone doesn't allow interior mutability -pub static LOCK_TABLE: LazyLock>> = +static LOCK_TABLE: LazyLock>> = LazyLock::new(|| Mutex::new(HashMap::new())); /// Generate timestanps for use in `LOCK_TABLE` /// `Instant` is preferable to `SystemTime` because it uses `CLOCK_MONOTONIC` and is not affected by NTP adjustments -pub static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); +static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); + +/// Call when using an operation which locks a database +/// Updates `LOCK_TABLE` +pub fn update_lock_table(conn: &Connection) { + let timestamp = LOCK_TABLE_TIMER.elapsed().as_millis(); + let k = format!("{conn:?}"); + let v = format!("{:?}@{timestamp}", std::thread::current().name()); + LOCK_TABLE.lock().unwrap().insert(k, v); +} /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 70850d372cc..53f597daa28 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -32,7 +32,7 @@ use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; -use stacks_common::util::db::{LOCK_TABLE, LOCK_TABLE_TIMER}; +use stacks_common::util::db::update_lock_table; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; @@ -675,10 +675,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; - let time = LOCK_TABLE_TIMER.elapsed().as_millis(); - let k = format!("{:?}", tx.deref()); - let v = format!("{:?}@{time}", std::thread::current().name()); - LOCK_TABLE.lock().unwrap().insert(k, v); + update_lock_table(tx.deref()); Ok(tx) } From 0d44ca4bc4d262f9d06cbbddd7edcdb10d272e55 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 08:40:44 -0400 Subject: [PATCH 510/910] feat: use `timeout` param from burnchain config --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 2 +- testnet/stacks-node/src/config.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a3897..2593c790021 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2803,7 +2803,7 @@ impl BitcoinRPCRequest { fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); - let timeout = Duration::from_secs(60); + let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); let port = request.preamble().host.port(); diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf5..73ebf231769 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1416,6 +1416,7 @@ pub struct BurnchainConfig { pub rpc_ssl: bool, pub username: Option, pub password: Option, + /// Timeout, in seconds, for communication with bitcoind pub timeout: u32, pub magic_bytes: MagicBytes, pub local_mining_public_key: Option, @@ -1457,7 +1458,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 300, + timeout: 5, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, @@ -1551,6 +1552,7 @@ pub struct BurnchainConfigFile { pub rpc_ssl: Option, pub username: Option, pub password: Option, + /// Timeout, in seconds, for communication with bitcoind pub timeout: Option, pub magic_bytes: Option, pub local_mining_public_key: Option, From 292cd8902286c3b9224d31c2050cff2de7049774 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:50:04 -0400 Subject: [PATCH 511/910] chore: Add comment --- stacks-common/src/util/db.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 257a98aab9d..89fe4677c73 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -42,8 +42,9 @@ static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); /// Updates `LOCK_TABLE` pub fn update_lock_table(conn: &Connection) { let timestamp = LOCK_TABLE_TIMER.elapsed().as_millis(); + // The debug format for `Connection` includes the path let k = format!("{conn:?}"); - let v = format!("{:?}@{timestamp}", std::thread::current().name()); + let v = format!("{:?}@{timestamp}", thread::current().name()); LOCK_TABLE.lock().unwrap().insert(k, v); } From f9f23fb3e7a41f0a147ab0ff91a00c3e91f30e1f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Sep 2024 10:39:17 -0700 Subject: [PATCH 512/910] Check if we are the sortition winner before attempting to mock sign Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + .../src/nakamoto_node/sign_coordinator.rs | 13 +- testnet/stacks-node/src/neon_node.rs | 79 +++--- testnet/stacks-node/src/tests/signer/v0.rs | 255 +++++++++++++++--- 4 files changed, 277 insertions(+), 71 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6241c521e15..e9f3b7735f5 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -94,6 +94,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::multiple_miners_mock_sign_epoch_25 - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b9..40d3e969370 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -406,13 +406,14 @@ impl SignCoordinator { match miners_session.put_chunk(&chunk) { Ok(ack) => { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } - Err(e) => { - warn!("Failed to write message to stackerdb {e:?}"); - Err("Failed to write message to stackerdb".into()) + if ack.accepted { + debug!("Wrote message to stackerdb: {ack:?}"); + Ok(()) + } else { + Err(format!("{ack:?}")) + } } + Err(e) => Err(format!("{e:?}")), } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b20..acec391ba02 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2340,17 +2340,12 @@ impl BlockMinerThread { } /// Read any mock signatures from stackerdb and respond to them - pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { - let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_mock_signing { - debug!("Pre-Nakamoto mock signing is disabled"); - return Ok(()); - } - + pub fn send_mock_miner_messages(&mut self) -> Result<(), String> { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height) + .map_err(|e| e.to_string())? .expect("FATAL: no epoch defined") .epoch_id; if epoch_id != StacksEpochId::Epoch25 { @@ -2360,6 +2355,12 @@ impl BlockMinerThread { return Ok(()); } + let miner_config = self.config.get_miner_config(); + if !miner_config.pre_nakamoto_mock_signing { + debug!("Pre-Nakamoto mock signing is disabled"); + return Ok(()); + } + let mining_key = miner_config .mining_key .expect("Cannot mock sign without mining key"); @@ -2374,25 +2375,31 @@ impl BlockMinerThread { } // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway - let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; - let (_, miners_info) = - NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; - let idx = miners_info.get_latest_winner_index(); - let sortitions = miners_info.get_sortitions(); - let election_sortition = *sortitions - .get(idx as usize) - .expect("FATAL: latest winner index out of bounds"); - - let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_stackerdb = - StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - + let ih = burn_db.index_handle(&self.burn_block.sortition_id); + let last_winner_snapshot = ih + .get_last_snapshot_with_sortition(self.burn_block.block_height) + .map_err(|e| e.to_string())?; + + if last_winner_snapshot.miner_pk_hash + != Some(Hash160::from_node_public_key( + &StacksPublicKey::from_private(&mining_key), + )) + { + return Ok(()); + } + let election_sortition = last_winner_snapshot.consensus_hash; let mock_proposal = MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); - if let Err(e) = SignCoordinator::send_miners_message( + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false) + .map_err(|e| e.to_string())?; + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + SignCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2402,15 +2409,17 @@ impl BlockMinerThread { self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, - ) { - warn!("Failed to send mock proposal to stackerdb: {:?}", &e); - return Ok(()); - } + ) + .map_err(|e| { + warn!("Failed to write mock proposal to stackerdb."); + e + })?; // Retrieve any MockSignatures from stackerdb info!("Waiting for mock signatures..."); - let mock_signatures = - self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; + let mock_signatures = self + .wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10)) + .map_err(|e| e.to_string())?; let mock_block = MockBlock { mock_proposal, @@ -2418,8 +2427,8 @@ impl BlockMinerThread { }; info!("Sending mock block to stackerdb: {mock_block:?}"); - if let Err(e) = SignCoordinator::send_miners_message( - &miner_config.mining_key.expect("BUG: no mining key"), + SignCoordinator::send_miners_message( + &mining_key, &burn_db, &self.burn_block, &stackerdbs, @@ -2428,9 +2437,11 @@ impl BlockMinerThread { self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, - ) { - warn!("Failed to send mock block to stackerdb: {:?}", &e); - } + ) + .map_err(|e| { + warn!("Failed to write mock block to stackerdb."); + e + })?; Ok(()) } @@ -3795,7 +3806,7 @@ impl RelayerThread { } let Some(mut miner_thread_state) = - self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) + self.create_block_miner(registered_key, last_burn_block.clone(), issue_timestamp_ms) else { return false; }; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b931441230f..6cfc4b0399f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; +use stacks::util::hash::{Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -63,8 +63,9 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, + setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -163,26 +164,17 @@ impl SignerTest { .get_burnchain() .pox_constants .reward_cycle_length as u64; - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; let epoch_25_reward_cycle_boundary = epoch_25_start_height.saturating_sub(epoch_25_start_height % reward_cycle_len); - let epoch_25_reward_set_calculation_boundary = epoch_25_reward_cycle_boundary - .saturating_sub(prepare_phase_len) - .wrapping_add(reward_cycle_len) - .wrapping_add(1); - let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + let target_height = next_reward_cycle_boundary - 1; + info!("Advancing to burn block height {target_height}...",); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, - epoch_25_reward_set_calculation_boundary, + target_height, &self.running_nodes.conf, ); debug!("Waiting for signer set calculation."); @@ -210,6 +202,7 @@ impl SignerTest { debug!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state debug!("Waiting for signers to initialize."); + info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); next_block_and_wait( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -217,14 +210,6 @@ impl SignerTest { self.wait_for_registered(30); debug!("Signers initialized"); - info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); - run_until_burnchain_height( - &mut self.running_nodes.btc_regtest_controller, - &self.running_nodes.blocks_processed, - next_reward_cycle_boundary, - &self.running_nodes.conf, - ); - let current_burn_block_height = self .running_nodes .btc_regtest_controller @@ -1492,9 +1477,10 @@ fn multiple_miners() { let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { - if btc_blocks_mined > max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } + assert!( + max_nakamoto_tenures >= btc_blocks_mined, + "Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting" + ); let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); @@ -1811,14 +1797,13 @@ fn miner_forking() { // (a) its the first nakamoto tenure // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) let mut expects_miner_2_to_be_valid = true; - + let max_sortitions = 20; // due to the random nature of mining sortitions, the way this test is structured // is that keeps track of two scenarios that we want to cover, and once enough sortitions // have been produced to cover those scenarios, it stops and checks the results at the end. while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { - if sortitions_seen.len() >= 20 { - panic!("Produced 20 sortitions, but didn't cover the test scenarios, aborting"); - } + let nmb_sortitions_seen = sortitions_seen.len(); + assert!(max_sortitions >= nmb_sortitions_seen, "Produced {nmb_sortitions_seen} sortitions, but didn't cover the test scenarios, aborting"); let (sortition_data, had_tenure) = run_sortition(); sortitions_seen.push((sortition_data.clone(), had_tenure)); @@ -2537,7 +2522,6 @@ fn mock_sign_epoch_25() { }; if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height { - assert_eq!(mock_block.mock_signatures.len(), num_signers); mock_block .mock_signatures .iter() @@ -2567,6 +2551,215 @@ fn mock_sign_epoch_25() { } } +#[test] +#[ignore] +fn multiple_miners_mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.miner.pre_nakamoto_mock_signing = true; + let epochs = config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_keys = signer_test.get_signer_public_keys(reward_cycle); + let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + while mock_block_mesage.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockBlock(mock_block) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); + break; + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } + } +} + #[test] #[ignore] /// This test asserts that signer set rollover works as expected. From b7e29b073e4b1e45471cb9f3af1bf0c4d4b37f87 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Sep 2024 11:05:19 -0700 Subject: [PATCH 513/910] Fix build issues during cherry pick failure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6cfc4b0399f..9b04206975e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{Hash160, MerkleHashFunc}; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -63,9 +63,8 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -2629,7 +2628,8 @@ fn multiple_miners_mock_sign_epoch_25() { false }) }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); From 8462f901d4cbd258e99b5176e8f23d3e320077f7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 11:10:17 -0700 Subject: [PATCH 514/910] fix: revert change to update processed time --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index b83deebac0f..be904395c25 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -595,7 +595,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { signing_weight: u32, obtain_method: NakamotoBlockObtainMethod, ) -> Result<(), ChainstateError> { - self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3, processed_time = ?4 WHERE consensus_hash = ?5 AND block_hash = ?6", + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", params![ &block.serialize_to_vec(), &signing_weight, From 2ffa044f91014e6707e6125ff99ddd651ab25d94 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 12:08:01 -0700 Subject: [PATCH 515/910] fix: apply schema 8 migration before 9 --- stackslib/src/chainstate/burn/db/sortdb.rs | 30 ++++++++++------------ 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index f545c53c8c3..eb156cbb961 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2862,7 +2862,7 @@ impl SortitionDB { sql_pragma(self.conn(), "journal_mode", &"WAL")?; sql_pragma(self.conn(), "foreign_keys", &true)?; - let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; // create first (sentinel) snapshot debug!("Make first snapshot"); @@ -2888,6 +2888,12 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; + // `apply_schema_8_migration` creates new transactions, so + // commit this first. + db_tx.commit()?; + // NOTE: we don't need to provide a migrator here because we're not migrating + self.apply_schema_8_migration(None)?; + let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2906,9 +2912,6 @@ impl SortitionDB { db_tx.commit()?; - // NOTE: we don't need to provide a migrator here because we're not migrating - self.apply_schema_8_migration(None)?; - self.add_indexes()?; debug!("Instantiated SortDB"); @@ -3352,11 +3355,6 @@ impl SortitionDB { ) -> Result<(), db_error> { let canonical_tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; - let schema_version = SortitionDB::get_schema_version(self.conn())? - .unwrap_or("0".to_string()) - .parse::() - .unwrap_or(0); - // port over `stacks_chain_tips` table info!("Instantiating `stacks_chain_tips` table..."); self.apply_schema_8_stacks_chain_tips(&canonical_tip)?; @@ -3370,14 +3368,12 @@ impl SortitionDB { info!("No migrator implementation given; `preprocessed_reward_sets` will not be prepopulated"); } - if schema_version < 8 { - let tx = self.tx_begin()?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["8"], - )?; - tx.commit()?; - } + let tx = self.tx_begin()?; + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["8"], + )?; + tx.commit()?; Ok(()) } From 5574d118e835482cb628b164501c5c21e85ee254 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 12:10:34 -0700 Subject: [PATCH 516/910] fix: add mutant skip to `make_tenure_start_info` --- testnet/stacks-node/src/nakamoto_node/miner.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 1669ee9484e..3de7d2e5120 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1206,6 +1206,7 @@ impl BlockMinerThread { Ok(block) } + #[cfg_attr(test, mutants::skip)] /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( &self, From 120348ac38175e549c901ab6d0184293e9ead089 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 15:43:07 -0400 Subject: [PATCH 517/910] chore: avoid warning from duplicate block-commit This change ended up being a lot larger than I'd hoped, but with a recent investigation into some networking failures when communicating with the bitcoin node, these warnings were annoying. I'm just trying to reduce false-positive warning logs. The problematic case shows up like this in the logs before this change: ``` INFO [1725975417.732025] [testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs:1634] [miner-block-http://127.0.0.1:35738] Abort attempt to re-submit identical LeaderBlockCommit WARN [1725975417.732029] [testnet/stacks-node/src/neon_node.rs:2759] [miner-block-http://127.0.0.1:35738] Relayer: Failed to submit Bitcoin transaction ``` --- .../burnchains/bitcoin_regtest_controller.rs | 143 ++++++++++-------- .../src/burnchains/mocknet_controller.rs | 4 +- testnet/stacks-node/src/burnchains/mod.rs | 14 +- .../stacks-node/src/nakamoto_node/relayer.rs | 10 +- testnet/stacks-node/src/neon_node.rs | 27 ++-- testnet/stacks-node/src/run_loop/neon.rs | 2 +- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 10 +- .../src/tests/nakamoto_integrations.rs | 12 +- .../src/tests/neon_integrations.rs | 20 +-- 10 files changed, 135 insertions(+), 109 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a3897..19904872531 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -845,7 +845,7 @@ impl BitcoinRegtestController { payload: LeaderKeyRegisterOp, signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); // reload the config to find satoshis_per_byte changes @@ -890,7 +890,7 @@ impl BitcoinRegtestController { &mut utxos, signer, true, // key register op requires change output to exist - )?; + ); increment_btc_ops_sent_counter(); @@ -899,7 +899,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(not(test))] @@ -909,7 +909,7 @@ impl BitcoinRegtestController { _payload: TransferStxOp, _signer: &mut BurnchainOpSigner, _utxo: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -920,7 +920,7 @@ impl BitcoinRegtestController { _payload: DelegateStxOp, _signer: &mut BurnchainOpSigner, _utxo: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -931,7 +931,7 @@ impl BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, utxo: Option, - ) -> Option { + ) -> Result { let transaction = match operation { BlockstackOperationType::LeaderBlockCommit(_) | BlockstackOperationType::LeaderKeyRegister(_) @@ -950,11 +950,7 @@ impl BitcoinRegtestController { let ser_transaction = SerializedTx::new(transaction.clone()); - if self.send_transaction(ser_transaction).is_some() { - Some(transaction) - } else { - None - } + self.send_transaction(ser_transaction).map(|_| transaction) } #[cfg(test)] @@ -970,7 +966,7 @@ impl BitcoinRegtestController { payload: TransferStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_TRANSFER_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { @@ -1000,7 +996,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1028,7 +1026,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1037,7 +1035,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(test)] @@ -1053,7 +1051,7 @@ impl BitcoinRegtestController { payload: DelegateStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_DELEGATE_STACKS_ESTIM_SIZE; @@ -1084,7 +1082,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1112,7 +1112,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1121,7 +1121,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(test)] @@ -1132,7 +1132,7 @@ impl BitcoinRegtestController { payload: VoteForAggregateKeyOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_VOTE_AGG_ESTIM_SIZE; @@ -1163,7 +1163,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1187,7 +1189,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1196,7 +1198,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(not(test))] @@ -1207,7 +1209,7 @@ impl BitcoinRegtestController { _payload: VoteForAggregateKeyOp, _signer: &mut BurnchainOpSigner, _utxo_to_use: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1217,7 +1219,7 @@ impl BitcoinRegtestController { _epoch_id: StacksEpochId, _payload: PreStxOp, _signer: &mut BurnchainOpSigner, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1227,7 +1229,7 @@ impl BitcoinRegtestController { epoch_id: StacksEpochId, payload: PreStxOp, signer: &mut BurnchainOpSigner, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; @@ -1266,7 +1268,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1275,7 +1277,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg_attr(test, mutants::skip)] @@ -1286,7 +1288,7 @@ impl BitcoinRegtestController { _payload: StackStxOp, _signer: &mut BurnchainOpSigner, _utxo_to_use: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1297,7 +1299,7 @@ impl BitcoinRegtestController { payload: StackStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_STACK_STX_ESTIM_SIZE; @@ -1328,7 +1330,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1354,7 +1358,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1363,7 +1367,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } fn magic_bytes(&self) -> Vec { @@ -1389,9 +1393,14 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, previous_fees: Option, previous_txids: &Vec, - ) -> Option { + ) -> Result { let _ = self.sortdb_mut(); - let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + let burn_chain_tip = self + .burnchain_db + .as_ref() + .ok_or(BurnchainControllerError::BurnchainError)? + .get_canonical_chain_tip() + .map_err(|_| BurnchainControllerError::BurnchainError)?; let estimated_fees = match previous_fees { Some(fees) => fees.fees_from_previous_tx(&payload, &self.config), None => LeaderBlockCommitFees::estimated_fees_from_payload(&payload, &self.config), @@ -1419,7 +1428,7 @@ impl BitcoinRegtestController { mut estimated_fees: LeaderBlockCommitFees, previous_txids: &Vec, burnchain_block_height: u64, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let (mut tx, mut utxos) = self.prepare_tx( epoch_id, @@ -1467,7 +1476,7 @@ impl BitcoinRegtestController { &mut utxos, signer, true, // block commit op requires change output to exist - )?; + ); let serialized_tx = SerializedTx::new(tx.clone()); @@ -1500,7 +1509,7 @@ impl BitcoinRegtestController { increment_btc_ops_sent_counter(); - Some(tx) + Ok(tx) } fn build_leader_block_commit_tx( @@ -1509,7 +1518,7 @@ impl BitcoinRegtestController { payload: LeaderBlockCommitOp, signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. @@ -1560,7 +1569,9 @@ impl BitcoinRegtestController { // Did a re-org occur since we fetched our UTXOs, or are the UTXOs so stale that they should be abandoned? let mut traversal_depth = 0; - let mut burn_chain_tip = burnchain_db.get_canonical_chain_tip().ok()?; + let mut burn_chain_tip = burnchain_db + .get_canonical_chain_tip() + .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { @@ -1572,7 +1583,7 @@ impl BitcoinRegtestController { &burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) - .ok()?; + .map_err(|_| BurnchainControllerError::BurnchainError)?; burn_chain_tip = parent.header; traversal_depth += 1; @@ -1604,7 +1615,7 @@ impl BitcoinRegtestController { get_max_rbf(&self.config) ); self.ongoing_block_commit = Some(ongoing_op); - return None; + return Err(BurnchainControllerError::MaxFeeRateExceeded); } // An ongoing operation is in the mempool and we received a new block. The desired behaviour is the following: @@ -1619,7 +1630,7 @@ impl BitcoinRegtestController { if payload == ongoing_op.payload { info!("Abort attempt to re-submit identical LeaderBlockCommit"); self.ongoing_block_commit = Some(ongoing_op); - return None; + return Err(BurnchainControllerError::IdenticalOperation); } // Let's proceed and early return 2) i) @@ -1649,7 +1660,7 @@ impl BitcoinRegtestController { ) }; - if res.is_none() { + if res.is_ok() { self.ongoing_block_commit = Some(ongoing_op); } @@ -1688,7 +1699,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, block_height: u64, - ) -> Option<(Transaction, UTXOSet)> { + ) -> Result<(Transaction, UTXOSet), BurnchainControllerError> { let utxos = if let Some(utxos) = utxos_to_include { // in RBF, you have to consume the same UTXOs utxos @@ -1710,7 +1721,7 @@ impl BitcoinRegtestController { &addr2str(&addr), epoch_id ); - return None; + return Err(BurnchainControllerError::NoUtxos); } }; utxos @@ -1724,7 +1735,7 @@ impl BitcoinRegtestController { lock_time: 0, }; - Some((transaction, utxos)) + Ok((transaction, utxos)) } fn finalize_tx( @@ -1738,7 +1749,7 @@ impl BitcoinRegtestController { utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, force_change_output: bool, - ) -> Option<()> { + ) { // spend UTXOs in order by confirmations. Spend the least-confirmed UTXO first, and in the // event of a tie, spend the smallest-value UTXO first. utxos_set.utxos.sort_by(|u1, u2| { @@ -1788,7 +1799,6 @@ impl BitcoinRegtestController { force_change_output, ); signer.dispose(); - Some(()) } /// Sign and serialize a tx, consuming the UTXOs in utxo_set and spending total_to_spend @@ -1918,22 +1928,21 @@ impl BitcoinRegtestController { /// Send a serialized tx to the Bitcoin node. Return Some(txid) on successful send; None on /// failure. - pub fn send_transaction(&self, transaction: SerializedTx) -> Option { - debug!("Send raw transaction: {}", transaction.to_hex()); - let result = BitcoinRPCRequest::send_raw_transaction(&self.config, transaction.to_hex()); - match result { - Ok(_) => { - debug!("Sent transaction {}", &transaction.txid); - Some(transaction.txid()) - } - Err(e) => { - error!( - "Bitcoin RPC failure: transaction submission failed - {:?}", - e - ); - None - } - } + pub fn send_transaction( + &self, + transaction: SerializedTx, + ) -> Result { + debug!("Sending raw transaction: {}", transaction.to_hex()); + + BitcoinRPCRequest::send_raw_transaction(&self.config, transaction.to_hex()) + .map(|_| { + debug!("Transaction {} sent successfully", &transaction.txid()); + transaction.txid() + }) + .map_err(|e| { + error!("Bitcoin RPC error: transaction submission failed - {:?}", e); + BurnchainControllerError::TransactionSubmissionFailed + }) } /// wait until the ChainsCoordinator has processed sortitions up to @@ -2066,7 +2075,7 @@ impl BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option { + ) -> Result { let transaction = match operation { BlockstackOperationType::LeaderBlockCommit(payload) => { self.build_leader_block_commit_tx(epoch_id, payload, op_signer, attempt) @@ -2263,7 +2272,7 @@ impl BurnchainController for BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option { + ) -> Result { let transaction = self.make_operation_tx(epoch_id, operation, op_signer, attempt)?; self.send_transaction(transaction) } diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 6bb958e0708..d518f5bdea9 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -168,10 +168,10 @@ impl BurnchainController for MocknetController { operation: BlockstackOperationType, _op_signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { let txid = operation.txid(); self.queued_operations.push_back(operation); - Some(txid) + Ok(txid) } fn sync( diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 5506cf6231f..d6706a0e1c8 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -19,6 +19,12 @@ use super::operations::BurnchainOpSigner; pub enum Error { CoordinatorClosed, IndexerError(burnchains::Error), + BurnchainError, + MaxFeeRateExceeded, + IdenticalOperation, + NoUtxos, + TransactionSubmissionFailed, + SerializerError, } impl fmt::Display for Error { @@ -26,6 +32,12 @@ impl fmt::Display for Error { match self { Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), + Error::BurnchainError => write!(f, "Burnchain error"), + Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), + Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), + Error::NoUtxos => write!(f, "No UTXOs available"), + Error::TransactionSubmissionFailed => write!(f, "Transaction submission failed"), + Error::SerializerError => write!(f, "Serializer error"), } } } @@ -45,7 +57,7 @@ pub trait BurnchainController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option; + ) -> Result; fn sync(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; fn sortdb_ref(&self) -> &SortitionDB; fn sortdb_mut(&mut self) -> &mut SortitionDB; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 435305472a9..9ae219659c5 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -530,9 +530,9 @@ impl RelayerThread { let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh); let mut op_signer = self.keychain.generate_op_signer(); - if let Some(txid) = - self.bitcoin_controller - .submit_operation(cur_epoch, op, &mut op_signer, 1) + if let Ok(txid) = self + .bitcoin_controller + .submit_operation(cur_epoch, op, &mut op_signer, 1) { // advance key registration state self.last_vrf_key_burn_height = Some(burn_block.block_height); @@ -1048,8 +1048,8 @@ impl RelayerThread { &mut op_signer, 1, ) - .ok_or_else(|| { - warn!("Failed to submit block-commit bitcoin transaction"); + .map_err(|e| { + warn!("Failed to submit block-commit bitcoin transaction: {}", e); NakamotoNodeError::BurnchainSubmissionFailed })?; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b20..b7e2843ece2 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -213,7 +213,7 @@ use super::{BurnchainController, Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; -use crate::burnchains::make_bitcoin_indexer; +use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; @@ -2753,16 +2753,21 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); - if res.is_none() { - self.failed_to_submit_last_attempt = true; - if !mock_mining { - warn!("Relayer: Failed to submit Bitcoin transaction"); - return None; + self.failed_to_submit_last_attempt = match res { + Ok(_) => false, + Err(BurnchainControllerError::IdenticalOperation) => { + info!("Relayer: Block-commit already submitted"); + true } - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - } else { - self.failed_to_submit_last_attempt = false; - } + Err(_) if mock_mining => { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + true + } + Err(e) => { + warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + true + } + }; let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, @@ -3620,7 +3625,7 @@ impl RelayerThread { ); let mut one_off_signer = self.keychain.generate_op_signer(); - if let Some(txid) = + if let Ok(txid) = self.bitcoin_controller .submit_operation(cur_epoch, op, &mut one_off_signer, 1) { diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 36777c4912b..b1fa0ff53bb 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -497,7 +497,7 @@ impl RunLoop { return burnchain_error::ShutdownInitiated; } } - Error::IndexerError(_) => {} + _ => {} } error!("Burnchain controller stopped: {}", e); panic!(); diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 68f37b4fb8e..2006abb05e5 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -629,7 +629,7 @@ fn transition_empty_blocks() { &mut op_signer, 1, ); - assert!(res.is_some(), "Failed to submit block-commit"); + assert!(res.is_ok(), "Failed to submit block-commit"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index bb168b28b97..2f74ffa7708 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -678,7 +678,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -713,7 +713,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); @@ -835,7 +835,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -866,7 +866,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); @@ -1946,7 +1946,7 @@ fn transition_empty_blocks() { let mut op_signer = keychain.generate_op_signer(); let res = bitcoin_controller.submit_operation(StacksEpochId::Epoch21, op, &mut op_signer, 1); - assert!(res.is_some(), "Failed to submit block-commit"); + assert!(res.is_ok(), "Failed to submit block-commit"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4839bee3be8..fe5d594221c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2876,7 +2876,7 @@ fn vote_for_aggregate_key_burn_op() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2952,7 +2952,7 @@ fn vote_for_aggregate_key_burn_op() { &mut signer_burnop_signer, 1 ) - .is_some(), + .is_ok(), "Vote for aggregate key operation should submit successfully" ); @@ -3433,7 +3433,7 @@ fn stack_stx_burn_op_integration_test() { &mut miner_signer_1, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -3463,7 +3463,7 @@ fn stack_stx_burn_op_integration_test() { &mut miner_signer_2, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); @@ -3604,7 +3604,7 @@ fn stack_stx_burn_op_integration_test() { &mut signer_burnop_signer_1, 1 ) - .is_some(), + .is_ok(), "Stack STX operation should submit successfully" ); @@ -3631,7 +3631,7 @@ fn stack_stx_burn_op_integration_test() { &mut signer_burnop_signer_2, 1 ) - .is_some(), + .is_ok(), "Stack STX operation should submit successfully" ); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dc9669a9ea..cd568bc4382 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1934,7 +1934,7 @@ fn stx_transfer_btc_integration_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -1964,7 +1964,7 @@ fn stx_transfer_btc_integration_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); // should be elected in the same block as the transfer, so balances should be unchanged. @@ -2215,7 +2215,7 @@ fn stx_delegate_btc_integration_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2244,7 +2244,7 @@ fn stx_delegate_btc_integration_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Delegate operation should submit successfully" ); @@ -2507,7 +2507,7 @@ fn stack_stx_burn_op_test() { &mut miner_signer_1, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2528,7 +2528,7 @@ fn stack_stx_burn_op_test() { &mut miner_signer_2, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); @@ -2614,7 +2614,7 @@ fn stack_stx_burn_op_test() { &mut spender_signer_1, 1 ) - .is_some(), + .is_ok(), "Stack STX operation with some signer key should submit successfully" ); @@ -2642,7 +2642,7 @@ fn stack_stx_burn_op_test() { &mut spender_signer_2, 1 ) - .is_some(), + .is_ok(), "Stack STX operation with no signer key should submit successfully" ); @@ -2949,7 +2949,7 @@ fn vote_for_aggregate_key_burn_op_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -3006,7 +3006,7 @@ fn vote_for_aggregate_key_burn_op_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Vote for aggregate key operation should submit successfully" ); From 3a0bc7ae6e838079879b8eb27720507c8b2872e5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 11 Sep 2024 16:23:27 -0400 Subject: [PATCH 518/910] chore: have block acceptance return a BlockAcceptResult instead of a bool --- .../chainstate/nakamoto/coordinator/tests.rs | 8 +- .../src/chainstate/nakamoto/tests/node.rs | 10 +-- stackslib/src/net/api/postblock.rs | 14 +-- stackslib/src/net/api/postblock_v3.rs | 18 ++-- stackslib/src/net/relay.rs | 86 +++++++++++++------ stackslib/src/net/rpc.rs | 3 +- stackslib/src/net/tests/mod.rs | 6 +- stackslib/src/net/tests/relay/epoch2x.rs | 24 +++++- 8 files changed, 110 insertions(+), 59 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 188e2e5a3e8..ddeea515735 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -65,7 +65,7 @@ use crate::chainstate::stacks::{ }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::net::tests::NakamotoBootPlan; @@ -338,8 +338,10 @@ fn replay_reward_cycle( None, NakamotoBlockObtainMethod::Pushed, ) - .unwrap_or(false); - if accepted { + .unwrap_or(BlockAcceptResponse::Rejected( + "encountered error on acceptance".into(), + )); + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index bd12072a01d..4377f74876d 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -70,7 +70,7 @@ use crate::chainstate::stacks::{ use crate::core::{BOOT_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER}; use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::UnitEstimator; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::test::{TestPeer, TestPeerConfig, *}; use crate::util_lib::boot::boot_code_addr; use crate::util_lib::db::Error as db_error; @@ -822,9 +822,9 @@ impl TestStacksNode { } } } else { - false + BlockAcceptResponse::Rejected("try_to_process is false".into()) }; - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); @@ -1247,7 +1247,7 @@ impl<'a> TestPeer<'a> { None, NakamotoBlockObtainMethod::Pushed, )?; - if !accepted { + if !matches!(BlockAcceptResponse::Accepted, accepted) { return Ok(false); } let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); @@ -1491,7 +1491,7 @@ impl<'a> TestPeer<'a> { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); diff --git a/stackslib/src/net/api/postblock.rs b/stackslib/src/net/api/postblock.rs index 46612a2f8da..4fc50244f9d 100644 --- a/stackslib/src/net/api/postblock.rs +++ b/stackslib/src/net/api/postblock.rs @@ -47,7 +47,7 @@ use crate::net::httpcore::{ StacksHttpRequest, StacksHttpResponse, }; use crate::net::p2p::PeerNetwork; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::{ Attachment, BlocksData, BlocksDatum, Error as NetError, StacksMessageType, StacksNodeState, }; @@ -177,16 +177,10 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { ) { Ok(accepted) => { debug!( - "{} Stacks block {}/{}", - if accepted { - "Accepted" - } else { - "Did not accept" - }, - &consensus_hash, - &block_hash, + "Received POSTed Stacks block {}/{}: {:?}", + &consensus_hash, &block_hash, &accepted ); - return Ok(accepted); + return Ok(BlockAcceptResponse::Accepted == accepted); } Err(e) => { let msg = format!( diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 39ff26087f7..602e307fd47 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -29,7 +29,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; pub static PATH: &'static str = "/v3/blocks/upload/"; @@ -179,10 +179,18 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { }); let data_resp = match response { - Ok(accepted) => StacksBlockAcceptedData { - accepted, - stacks_block_id: block.block_id(), - }, + Ok(accepted) => { + debug!( + "Received POSTed Nakamoto block {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &accepted + ); + StacksBlockAcceptedData { + accepted: matches!(accepted, BlockAcceptResponse::Accepted), + stacks_block_id: block.block_id(), + } + } Err(e) => { return e.try_into_contents().map_err(NetError::from); } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 1b08f5cd35d..123f78f422c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -517,6 +517,17 @@ pub struct AcceptedNakamotoBlocks { pub blocks: Vec, } +/// Block processed result +#[derive(Debug, Clone, PartialEq)] +pub enum BlockAcceptResponse { + /// block was accepted to the staging DB + Accepted, + /// we already had this block + AlreadyStored, + /// block was rejected for some reason + Rejected(String), +} + impl Relayer { pub fn new( handle: NetworkHandle, @@ -735,7 +746,7 @@ impl Relayer { consensus_hash: &ConsensusHash, block: &StacksBlock, download_time: u64, - ) -> Result { + ) -> Result { info!( "Handle incoming block {}/{}", consensus_hash, @@ -748,7 +759,9 @@ impl Relayer { if chainstate.fault_injection.hide_blocks && Self::fault_injection_is_block_hidden(&block.header, block_sn.block_height) { - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Fault injection: block is hidden".into(), + )); } // find the snapshot of the parent of this block @@ -758,7 +771,9 @@ impl Relayer { Some(sn) => sn, None => { // doesn't correspond to a PoX-valid sortition - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Block does not correspond to a known sortition".into(), + )); } }; @@ -790,7 +805,7 @@ impl Relayer { "sortition_height" => block_sn.block_height, "ast_rules" => ?ast_rules, ); - return Ok(false); + return Ok(BlockAcceptResponse::Rejected("Block is problematic".into())); } let res = chainstate.preprocess_anchored_block( @@ -806,8 +821,10 @@ impl Relayer { consensus_hash, &block.block_hash() ); + return Ok(BlockAcceptResponse::Accepted); + } else { + return Ok(BlockAcceptResponse::AlreadyStored); } - Ok(res) } /// Wrapper around inner_process_new_nakamoto_block @@ -820,7 +837,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, - ) -> Result { + ) -> Result { Self::process_new_nakamoto_block_ext( burnchain, sortdb, @@ -856,7 +873,7 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, force_broadcast: bool, - ) -> Result { + ) -> Result { info!( "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, @@ -882,13 +899,13 @@ impl Relayer { // it's possible that the signer sent this block to us, in which case, we should // broadcast it debug!( - "Already have Nakamoto block {}, but broadcasting anyway", + "Already have Nakamoto block {}, but treating a new anyway so we can broadcast it", &block.header.block_id() ); - return Ok(true); + return Ok(BlockAcceptResponse::Accepted); } else { debug!("Already have Nakamoto block {}", &block.header.block_id()); - return Ok(false); + return Ok(BlockAcceptResponse::AlreadyStored); } } @@ -931,7 +948,9 @@ impl Relayer { "burn_height" => block.header.chain_length, "sortition_height" => block_sn.block_height, ); - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Nakamoto block is problematic".into(), + )); } let accept_msg = format!( @@ -1006,11 +1025,11 @@ impl Relayer { return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); } } + return Ok(BlockAcceptResponse::Accepted); } else { info!("{}", &reject_msg); + return Ok(BlockAcceptResponse::AlreadyStored); } - - Ok(accepted) } #[cfg_attr(test, mutants::skip)] @@ -1046,7 +1065,7 @@ impl Relayer { continue; } }; - if accept { + if BlockAcceptResponse::Accepted == accept { accepted.push(block); } } @@ -1163,8 +1182,8 @@ impl Relayer { block, *download_time, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => { + if BlockAcceptResponse::Accepted == accept_response { debug!( "Accepted downloaded block {}/{}", consensus_hash, @@ -1173,9 +1192,10 @@ impl Relayer { new_blocks.insert((*consensus_hash).clone(), block.clone()); } else { debug!( - "Rejected downloaded block {}/{}", + "Rejected downloaded block {}/{}: {:?}", consensus_hash, - &block.block_hash() + &block.block_hash(), + &accept_response ); } } @@ -1302,8 +1322,8 @@ impl Relayer { block, 0, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => { + if BlockAcceptResponse::Accepted == accept_response { debug!( "Accepted block {}/{} from {}", &consensus_hash, &bhh, &neighbor_key @@ -1311,8 +1331,8 @@ impl Relayer { new_blocks.insert(consensus_hash.clone(), block.clone()); } else { debug!( - "Rejected block {}/{} from {}", - &consensus_hash, &bhh, &neighbor_key + "Rejected block {}/{} from {}: {:?}", + &consensus_hash, &bhh, &neighbor_key, &accept_response ); } } @@ -1670,20 +1690,30 @@ impl Relayer { coord_comms, NakamotoBlockObtainMethod::Pushed, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => match accept_response { + BlockAcceptResponse::Accepted => { debug!( "Accepted Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); accepted_blocks.push(nakamoto_block); - } else { - warn!( - "Rejected Nakamoto block {} ({}) from {}", + } + BlockAcceptResponse::AlreadyStored => { + debug!( + "Rejected Nakamoto block {} ({}) from {}: already stored", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); } - } + BlockAcceptResponse::Rejected(msg) => { + warn!( + "Rejected Nakamoto block {} ({}) from {}: {:?}", + &block_id, + &nakamoto_block.header.consensus_hash, + &neighbor_key, + &msg + ); + } + }, Err(chainstate_error::InvalidStacksBlock(msg)) => { warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); bad_neighbors.push((*neighbor_key).clone()); diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 78b1ff096b2..3c1fec15c87 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -559,7 +559,8 @@ impl ConversationHttp { "processing_time_ms" => start_time.elapsed().as_millis(), "latency_ms" => latency, "conn_id" => self.conn_id, - "peer_addr" => &self.peer_addr); + "peer_addr" => &self.peer_addr, + "p2p_msg" => ?msg_opt); if let Some(msg) = msg_opt { ret.push(msg); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 4d7cdac375f..d8ee197f420 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -62,7 +62,7 @@ use crate::chainstate::stacks::{ }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::{StacksEpoch, StacksEpochExtension}; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::util_lib::boot::boot_code_id; @@ -256,7 +256,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { @@ -293,7 +293,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!( "Accepted malleablized Nakamoto block {block_id} to other peer {}", i diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index b234460ddcc..23d1dd60a8e 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3315,7 +3315,11 @@ fn test_block_pay_to_contract_gated_at_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid pay-to-contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid pay-to-contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3491,7 +3495,11 @@ fn test_block_versioned_smart_contract_gated_at_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3649,7 +3657,11 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { 123, ) { Ok(x) => { - assert!(x, "Did not accept valid block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Did not accept valid block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3702,7 +3714,11 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); From ca885d07e101fd7bbe8db0c4b6ca62cc23dc1033 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:22:02 -0400 Subject: [PATCH 519/910] chore: address review comments --- .../src/burnchains/bitcoin_regtest_controller.rs | 4 ++-- testnet/stacks-node/src/burnchains/mod.rs | 10 ++++++---- testnet/stacks-node/src/nakamoto_node.rs | 3 ++- testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 19904872531..631651c9070 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1721,7 +1721,7 @@ impl BitcoinRegtestController { &addr2str(&addr), epoch_id ); - return Err(BurnchainControllerError::NoUtxos); + return Err(BurnchainControllerError::NoUTXOs); } }; utxos @@ -1941,7 +1941,7 @@ impl BitcoinRegtestController { }) .map_err(|e| { error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed + BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) }) } diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index d6706a0e1c8..7cfde884ecb 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -22,8 +22,8 @@ pub enum Error { BurnchainError, MaxFeeRateExceeded, IdenticalOperation, - NoUtxos, - TransactionSubmissionFailed, + NoUTXOs, + TransactionSubmissionFailed(String), SerializerError, } @@ -35,8 +35,10 @@ impl fmt::Display for Error { Error::BurnchainError => write!(f, "Burnchain error"), Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUtxos => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed => write!(f, "Transaction submission failed"), + Error::NoUTXOs => write!(f, "No UTXOs available"), + Error::TransactionSubmissionFailed(e) => { + write!(f, "Transaction submission failed: {e}") + } Error::SerializerError => write!(f, "Serializer error"), } } diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 6e57b8023e8..7cda49e10d9 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -33,6 +33,7 @@ use stacks_common::types::StacksEpochId; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::burnchains::Error as BurnchainsError; use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::nakamoto::{Globals, RunLoop}; @@ -93,7 +94,7 @@ pub enum Error { /// Something unexpected happened (e.g., hash mismatches) UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain - BurnchainSubmissionFailed, + BurnchainSubmissionFailed(BurnchainsError), /// A new parent has been discovered since mining started NewParentDiscovered, /// A failure occurred while constructing a VRF Proof diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 9ae219659c5..5b04eb6ff61 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1050,7 +1050,7 @@ impl RelayerThread { ) .map_err(|e| { warn!("Failed to submit block-commit bitcoin transaction: {}", e); - NakamotoNodeError::BurnchainSubmissionFailed + NakamotoNodeError::BurnchainSubmissionFailed(e) })?; info!( From d85c52e040ae704c41f948812d06d38d9a64be63 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:31:37 -0400 Subject: [PATCH 520/910] chore: propagate codec error --- .../src/burnchains/bitcoin_regtest_controller.rs | 8 ++++---- testnet/stacks-node/src/burnchains/mod.rs | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 631651c9070..c4718cc68a1 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -998,7 +998,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1084,7 +1084,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1165,7 +1165,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1332,7 +1332,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 7cfde884ecb..0c9446304d4 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -10,6 +10,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::BlockstackOperationType; use stacks::chainstate::burn::BlockSnapshot; use stacks::core::{StacksEpoch, StacksEpochId}; +use stacks_common::codec::Error as CodecError; pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestController}; pub use self::mocknet_controller::MocknetController; @@ -24,7 +25,7 @@ pub enum Error { IdenticalOperation, NoUTXOs, TransactionSubmissionFailed(String), - SerializerError, + SerializerError(CodecError), } impl fmt::Display for Error { @@ -39,7 +40,7 @@ impl fmt::Display for Error { Error::TransactionSubmissionFailed(e) => { write!(f, "Transaction submission failed: {e}") } - Error::SerializerError => write!(f, "Serializer error"), + Error::SerializerError(e) => write!(f, "Serializer error: {e}"), } } } From 1c13216f40e101695442d889d53b65984c6d8e9c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:33:03 -0400 Subject: [PATCH 521/910] chore: formatting --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 5b04eb6ff61..707237f7c67 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1049,7 +1049,7 @@ impl RelayerThread { 1, ) .map_err(|e| { - warn!("Failed to submit block-commit bitcoin transaction: {}", e); + warn!("Failed to submit block-commit bitcoin transaction: {e}"); NakamotoNodeError::BurnchainSubmissionFailed(e) })?; From eb52ad90d9603c7d6556a45856543807213baeab Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 12 Sep 2024 04:43:58 +0300 Subject: [PATCH 522/910] fix flash block integration test --- .../src/tests/nakamoto_integrations.rs | 80 ++++++++++--------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17f068f4744..6aca29b3156 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,8 +100,8 @@ use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, next_block_and_wait_with_timeout, - run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, + wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -1749,13 +1749,43 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &mut btc_regtest_controller, ); - // Mine 3 Bitcoin blocks quickly without waiting for Stacks blocks to be processed - for _ in 0..3 { + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let block_height_before_mining = tip.block_height; + + // Mine 3 Bitcoin blocks rapidly without waiting for Stacks blocks to be processed. + // These blocks won't be considered "mined" until the next_block_and_wait call. + for _i in 0..3 { btc_regtest_controller.build_next_block(1); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Verify that the canonical burn chain tip hasn't advanced yet + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + assert_eq!(tip.block_height, block_height_before_mining); } info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + // Mine a new block and wait for it to be processed. + // This should update the canonical burn chain tip to include all 4 new blocks. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Verify that the burn chain tip has advanced by 4 blocks + assert_eq!( + tip.block_height, + block_height_before_mining + 4, + "Burn chain tip should have advanced by 4 blocks" + ); + + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (mut chainstate, _) = StacksChainState::open( @@ -1772,21 +1802,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { .unwrap() .stacks_block_height; - // query for prometheus metrics - #[cfg(feature = "monitoring_prom")] - { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); - } - info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); @@ -1883,24 +1898,17 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); - // Check that we aren't missing burn blocks + // Check that we have the expected burn blocks + // We expect to have blocks 220-230 and 234 onwards, with a gap for the flash blocks let bhh = u64::from(tip.burn_header_height); - test_observer::contains_burn_block_range(220..=bhh).unwrap(); - - // make sure prometheus returns an updated height - #[cfg(feature = "monitoring_prom")] - { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); - } + test_observer::contains_burn_block_range(220..=230).unwrap(); + test_observer::contains_burn_block_range(234..=bhh).unwrap(); + // Verify that we're missing the expected flash blocks + assert!( + test_observer::contains_burn_block_range(231..=233).is_err(), + "Expected to be missing burn blocks 231-233 due to flash blocks" + ); + info!("Verified burn block ranges, including expected gap for flash blocks"); coord_channel .lock() From 4d3a200cb128a580426438b7dca9a930033dd5a4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 11:48:51 -0500 Subject: [PATCH 523/910] test: assert block times increasing Co-authored-by: Brice Dobry --- .../src/tests/nakamoto_integrations.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5bd9ba87e71..4f09a7a1020 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2271,16 +2271,18 @@ fn correct_burn_outs() { "Blocks should be sorted by cycle number already" ); - let block_times: Vec = new_blocks_with_reward_set - .iter() - .filter_map(|block| block.get("block_time").and_then(|cn| cn.as_u64())) - .collect(); - // Assert that block_times are all greater than 0 - assert!(block_times.iter().all(|&t| t > 0)); - + let mut last_block_time = None; for block in new_blocks_with_reward_set.iter() { let cycle_number = block["cycle_number"].as_u64().unwrap(); let reward_set = block["reward_set"].as_object().unwrap(); + if let Some(block_time) = block["block_time"].as_u64() { + if let Some(last) = last_block_time { + assert!(block_time > last, "Block times should be increasing"); + } + last_block_time = Some(block_time); + } + let cycle_number = block["cycle_number"].as_u64().unwrap(); + let reward_set = block["reward_set"].as_object().unwrap(); if cycle_number < first_epoch_3_cycle { assert!( From df6eae3aef916b9a9a748726bf90b6bb42ef9765 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 13:15:36 -0400 Subject: [PATCH 524/910] chore: add signer logs --- stacks-signer/src/chainstate.rs | 8 ++++++++ stacks-signer/src/v0/signer.rs | 1 + 2 files changed, 9 insertions(+) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c35ceb67e03..49cf237a038 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -193,10 +193,18 @@ impl SortitionsView { .cur_sortition .is_timed_out(self.config.block_proposal_timeout, signer_db)? { + info!( + "Current miner timed out, marking as invalid."; + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } if let Some(last_sortition) = self.last_sortition.as_mut() { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { + info!( + "Last miner timed out, marking as invalid."; + "last_sortition_consensus_hash" => ?last_sortition.consensus_hash, + ); last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f51..82e6d78ea55 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -353,6 +353,7 @@ impl Signer { "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), + "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); let mut block_info = BlockInfo::from(block_proposal.clone()); From 11c312eda80eac7023a0919a88487ab906ad84c0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 13:15:48 -0400 Subject: [PATCH 525/910] chore: change default `nakamoto_attempt_time_ms` to 5s --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf5..5ab9396ff4b 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2370,7 +2370,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, - nakamoto_attempt_time_ms: 20_000, + nakamoto_attempt_time_ms: 5_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, From bfdb6e42f9343f489680b158059ed9ecd1d2452e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 12:53:32 -0500 Subject: [PATCH 526/910] fix: matches macro usage in block acceptance tests --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/tests/node.rs | 6 +++--- stackslib/src/net/api/postblock_v3.rs | 4 ++-- stackslib/src/net/relay.rs | 7 +++++++ stackslib/src/net/tests/mod.rs | 4 ++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ddeea515735..469c72e777f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -341,7 +341,7 @@ fn replay_reward_cycle( .unwrap_or(BlockAcceptResponse::Rejected( "encountered error on acceptance".into(), )); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 4377f74876d..aa00430f891 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -824,7 +824,7 @@ impl TestStacksNode { } else { BlockAcceptResponse::Rejected("try_to_process is false".into()) }; - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); @@ -1247,7 +1247,7 @@ impl<'a> TestPeer<'a> { None, NakamotoBlockObtainMethod::Pushed, )?; - if !matches!(BlockAcceptResponse::Accepted, accepted) { + if !accepted.is_accepted() { return Ok(false); } let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); @@ -1491,7 +1491,7 @@ impl<'a> TestPeer<'a> { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 602e307fd47..9bd174d3220 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -29,7 +29,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::relay::{BlockAcceptResponse, Relayer}; +use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; pub static PATH: &'static str = "/v3/blocks/upload/"; @@ -187,7 +187,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &accepted ); StacksBlockAcceptedData { - accepted: matches!(accepted, BlockAcceptResponse::Accepted), + accepted: accepted.is_accepted(), stacks_block_id: block.block_id(), } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 123f78f422c..750651ec73a 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -528,6 +528,13 @@ pub enum BlockAcceptResponse { Rejected(String), } +impl BlockAcceptResponse { + /// Does this response indicate that the block was accepted to the staging DB + pub fn is_accepted(&self) -> bool { + matches!(self, Self::Accepted) + } +} + impl Relayer { pub fn new( handle: NetworkHandle, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index d8ee197f420..09d150d8931 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -256,7 +256,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { @@ -293,7 +293,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!( "Accepted malleablized Nakamoto block {block_id} to other peer {}", i From 16f127cb80a19d41cad75102e210f04b52b8e9a2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 14:21:12 -0400 Subject: [PATCH 527/910] chore: change default `burnchain.timeout` to 60s This matches the hard-coded timeout what was previously in the code. --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 5ab9396ff4b..611bd7cdaf1 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1457,7 +1457,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 300, + timeout: 60, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, From f065a189fce5b4320dc5292db5512b1769fc6d0f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Sep 2024 11:27:33 -0700 Subject: [PATCH 528/910] Add state machine fixes to enable moving from the same state into the same state and add tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 8 +-- stacks-signer/src/runloop.rs | 8 +-- stacks-signer/src/signerdb.rs | 61 +++++++++++++++++++---- stacks-signer/src/v0/signer.rs | 32 +++++------- 4 files changed, 70 insertions(+), 39 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 85fa7fd34b4..5afec3f76e0 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -564,13 +564,13 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { + Err(backoff::Error::transient(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); - return Err(backoff::Error::permanent(ClientError::RequestFailure( + Err(backoff::Error::permanent(ClientError::RequestFailure( status, - ))); + ))) } }; let stackers_response = diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index cb29221ba96..0ce706b3c42 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -430,10 +430,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { self.refresh_signer_config(current_reward_cycle); } - if is_in_next_prepare_phase { - if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { - self.refresh_signer_config(next_reward_cycle); - } + if is_in_next_prepare_phase + && !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) + { + self.refresh_signer_config(next_reward_cycle); } self.cleanup_stale_signers(current_reward_cycle); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6f5b6c6e061..5e47f09193d 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -267,10 +267,11 @@ impl BlockInfo { /// Check if the block state transition is valid fn check_state(&self, state: BlockState) -> bool { let prev_state = &self.state; + if *prev_state == state { + return true; + } match state { - BlockState::Unprocessed => { - matches!(prev_state, BlockState::Unprocessed) - } + BlockState::Unprocessed => false, BlockState::LocallyAccepted => { matches!(prev_state, BlockState::Unprocessed) } @@ -687,7 +688,7 @@ impl SignerDb { .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -736,7 +737,7 @@ impl SignerDb { let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; let args = params![ block_sighash, - serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + serde_json::to_string(signature).map_err(DBError::SerializationError)? ]; debug!("Inserting block signature."; @@ -825,7 +826,7 @@ impl SignerDb { if broadcasted == 0 { return Ok(None); } - Ok(u64::try_from(broadcasted).ok()) + Ok(Some(broadcasted)) } /// Get the current state of a given block in the database @@ -1152,15 +1153,12 @@ mod tests { assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); db.add_block_signature(&block_id, &sig1).unwrap(); - assert_eq!( - db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone()] - ); + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![sig1]); db.add_block_signature(&block_id, &sig2).unwrap(); assert_eq!( db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone(), sig2.clone()] + vec![sig1, sig2] ); } @@ -1223,4 +1221,45 @@ mod tests { 12345 ); } + #[test] + fn state_machine() { + let (mut block, _) = create_block(); + assert_eq!(block.state, BlockState::Unprocessed); + assert!(block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyAccepted).unwrap(); + assert_eq!(block.state, BlockState::LocallyAccepted); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::GloballyAccepted).unwrap(); + assert_eq!(block.state, BlockState::GloballyAccepted); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(!block.check_state(BlockState::GloballyRejected)); + + // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted + block.state = BlockState::LocallyRejected; + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::GloballyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(!block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 639ace66d25..d47f84d9fe3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -289,7 +289,7 @@ impl From for Signer { .signer_entries .signer_ids .iter() - .map(|(addr, id)| (*id, addr.clone())) + .map(|(addr, id)| (*id, *addr)) .collect(); let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); @@ -307,7 +307,7 @@ impl From for Signer { signer_id, addr ); }; - (addr.clone(), key_ids.len()) + (*addr, key_ids.len()) }) .collect(); @@ -671,7 +671,7 @@ impl Signer { addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) }); u32::try_from(signing_weight) @@ -727,14 +727,10 @@ impl Signer { let signer_address = StacksAddress::p2pkh(self.mainnet, &public_key); // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - // it only matters that the address hash bytes match - signer_address.bytes == addr.bytes - }) - .is_some(); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + // it only matters that the address hash bytes match + signer_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive block rejection with an invalid signature. Will not store."; @@ -822,16 +818,12 @@ impl Signer { }; // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); - // it only matters that the address hash bytes match - stacker_address.bytes == addr.bytes - }) - .is_some(); + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive invalid signature {signature}. Will not store."); From 9883957ed105887efb228e9121dfe55241455e9f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Sep 2024 11:30:14 -0700 Subject: [PATCH 529/910] Do not modify self if fail to move to the appropriate state Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5e47f09193d..b9604726ceb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -228,6 +228,7 @@ impl BlockInfo { /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { + self.move_to(BlockState::LocallyAccepted)?; self.valid = Some(true); self.signed_over = true; if group_signed { @@ -235,28 +236,31 @@ impl BlockInfo { } else { self.signed_self.get_or_insert(get_epoch_time_secs()); } - self.move_to(BlockState::LocallyAccepted) + Ok(()) } /// Mark this block as valid, signed over, and records a group timestamp in the block info if it wasn't /// already set. pub fn mark_globally_accepted(&mut self) -> Result<(), String> { + self.move_to(BlockState::GloballyAccepted)?; self.valid = Some(true); self.signed_over = true; self.signed_group.get_or_insert(get_epoch_time_secs()); - self.move_to(BlockState::GloballyAccepted) + Ok(()) } /// Mark the block as locally rejected and invalid pub fn mark_locally_rejected(&mut self) -> Result<(), String> { + self.move_to(BlockState::LocallyRejected); self.valid = Some(false); - self.move_to(BlockState::LocallyRejected) + Ok(()) } /// Mark the block as globally rejected and invalid pub fn mark_globally_rejected(&mut self) -> Result<(), String> { + self.move_to(BlockState::GloballyRejected); self.valid = Some(false); - self.move_to(BlockState::GloballyRejected) + Ok(()) } /// Return the block's signer signature hash From ba1ce0c7bcdc52836f54d960f8e9d777a4d6be01 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 15:09:27 -0500 Subject: [PATCH 530/910] fix merge artifact --- stackslib/src/net/relay.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index e1498327c68..aebb2b7609f 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -934,7 +934,7 @@ impl Relayer { ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { - return Ok(false); + return Ok(BlockAcceptResponse::Rejected("Fault injection: ignoring block".into())) } // do we have this block? don't lock the DB needlessly if so. From 80c069f3eab8858be0be8ff6c0436509db11ee5e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 15:18:39 -0500 Subject: [PATCH 531/910] cargo fmt --- stackslib/src/net/relay.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index aebb2b7609f..21699ad14c3 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -934,7 +934,9 @@ impl Relayer { ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { - return Ok(BlockAcceptResponse::Rejected("Fault injection: ignoring block".into())) + return Ok(BlockAcceptResponse::Rejected( + "Fault injection: ignoring block".into(), + )); } // do we have this block? don't lock the DB needlessly if so. From d0bdfe3cb2c5bf90b043103d105fd54eba846986 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 16:21:01 -0400 Subject: [PATCH 532/910] chore: change default `timeout` to 60s This matches the actual timeout that was hard-coded before this change. --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 73ebf231769..f2f366dedd8 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1458,7 +1458,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 5, + timeout: 60, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, From b8f8b141255618b6755ca8f303726e1c52919a74 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 11:17:10 -0400 Subject: [PATCH 533/910] chore: Add `signer_stats` table to chainstate DB --- stackslib/src/chainstate/nakamoto/mod.rs | 20 +++++ stackslib/src/chainstate/stacks/db/mod.rs | 95 +++++++++++++---------- 2 files changed, 72 insertions(+), 43 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4ffa76c1a72..e53c81f5dd9 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -268,6 +268,26 @@ lazy_static! { ADD COLUMN height_in_tenure; "#.into(), ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_4: [&'static str; 2] = [ + r#" + UPDATE db_config SET version = "7"; + "#, + // Add a `signer_stats` table to keep track of how many blocks have been signed by each signer + r#" + -- Table for signer stats + CREATE TABLE signer_stats ( + -- Signers public key + public_key TEXT NOT NULL, + -- Stacking rewards cycle ID + reward_cycle INTEGER NOT NULL, + -- Number of blocks signed during reward cycle + blocks_signed INTEGER DEFAULT 0 NOT NULL, + + PRIMARY KEY(public_key,reward_cycle) + ); + "#, + ]; } #[cfg(test)] diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e8b5c7bb41b..198de0354e4 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -679,7 +679,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "6"; +pub const CHAINSTATE_VERSION: &'static str = "7"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1062,55 +1062,64 @@ impl StacksChainState { return Err(Error::InvalidChainstateDB); } - if db_config.version != CHAINSTATE_VERSION { - while db_config.version != CHAINSTATE_VERSION { - match db_config.version.as_str() { - "1" => { - // migrate to 2 - info!("Migrating chainstate schema from version 1 to 2"); - for cmd in CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + while db_config.version != CHAINSTATE_VERSION { + match db_config.version.as_str() { + "1" => { + // migrate to 2 + info!("Migrating chainstate schema from version 1 to 2"); + for cmd in CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "2" => { - // migrate to 3 - info!("Migrating chainstate schema from version 2 to 3"); - for cmd in CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "2" => { + // migrate to 3 + info!("Migrating chainstate schema from version 2 to 3"); + for cmd in CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - "3" => { - // migrate to nakamoto 1 - info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { - tx.execute_batch(cmd)?; - } + } + "3" => { + // migrate to nakamoto 1 + info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { + tx.execute_batch(cmd)?; } - "4" => { - // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + } + "4" => { + // migrate to nakamoto 2 + info!( + "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "5" => { - // migrate to nakamoto 3 - info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - _ => { - error!( - "Invalid chain state database: expected version = {}, got {}", - CHAINSTATE_VERSION, db_config.version - ); - return Err(Error::InvalidChainstateDB); + } + "6" => { + // migrate to nakamoto 3 + info!( + "Migrating chainstate schema from version 6 to 7: adds signer_stats table" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_4.iter() { + tx.execute_batch(cmd)?; } } - db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + _ => { + error!( + "Invalid chain state database: expected version = {}, got {}", + CHAINSTATE_VERSION, db_config.version + ); + return Err(Error::InvalidChainstateDB); + } } + db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); } Ok(()) } From c454b35cabda0a6c1068e81e2e6fdcd8d3b830b0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 15:20:50 -0400 Subject: [PATCH 534/910] chore: Record block signers in `append_block()` --- stackslib/src/chainstate/nakamoto/mod.rs | 55 ++++++++++++++++-------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e53c81f5dd9..2450bac3c67 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,13 +14,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::HashMap; use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; -use clarity::types::PublicKey; -use clarity::util::secp256k1::{secp256k1_recover, Secp256k1PublicKey}; +use clarity::util::secp256k1::Secp256k1PublicKey; use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{BurnStateDB, ClarityDatabase}; @@ -282,7 +281,7 @@ lazy_static! { -- Stacking rewards cycle ID reward_cycle INTEGER NOT NULL, -- Number of blocks signed during reward cycle - blocks_signed INTEGER DEFAULT 0 NOT NULL, + blocks_signed INTEGER DEFAULT 1 NOT NULL, PRIMARY KEY(public_key,reward_cycle) ); @@ -3276,6 +3275,24 @@ impl NakamotoChainState { .map_err(ChainstateError::from) } + /// Keep track of how many blocks each signer is signing + fn record_block_signers( + tx: &mut ChainstateTx, + block: &NakamotoBlock, + reward_cycle: u64, + ) -> Result<(), ChainstateError> { + let signer_sighash = block.header.signer_signature_hash(); + for signer_signature in &block.header.signer_signature { + let signer_pubkey = + StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) + .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; + let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; + let args = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + tx.execute(sql, args)?; + } + Ok(()) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -4059,6 +4076,11 @@ impl NakamotoChainState { let new_block_id = new_tip.index_block_hash(); chainstate_tx.log_transactions_processed(&new_block_id, &tx_receipts); + let reward_cycle = pox_constants.block_height_to_reward_cycle( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ); + // store the reward set calculated during this block if it happened // NOTE: miner and proposal evaluation should not invoke this because // it depends on knowing the StacksBlockId. @@ -4067,19 +4089,12 @@ impl NakamotoChainState { if let Some(signer_calculation) = signer_set_calc { Self::write_reward_set(chainstate_tx, &new_block_id, &signer_calculation.reward_set)?; - let cycle_number = if let Some(cycle) = pox_constants.reward_cycle_of_prepare_phase( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) { - Some(cycle) - } else { - pox_constants - .block_height_to_reward_cycle( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) - .map(|cycle| cycle + 1) - }; + let cycle_number = pox_constants + .reward_cycle_of_prepare_phase( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) + .or_else(|| reward_cycle.map(|cycle| cycle + 1)); if let Some(cycle) = cycle_number { reward_set_data = Some(RewardSetData::new( @@ -4089,6 +4104,12 @@ impl NakamotoChainState { } } + if let Some(reward_cycle) = reward_cycle { + Self::record_block_signers(chainstate_tx, block, reward_cycle)?; + } else { + warn!("No reward cycle found, skipping record_block_signers()"); + } + monitoring::set_last_block_transaction_count(u64::try_from(block.txs.len()).unwrap()); monitoring::set_last_execution_cost_observed(&block_execution_cost, &block_limit); From 32e74fb382c40b49fdec2b0b2477bdab41895599 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 17:17:55 -0400 Subject: [PATCH 535/910] chore: Add `/v3/signer/` endpoint --- stackslib/src/net/api/getsigner.rs | 268 +++++++++++++++++++++++++++++ stackslib/src/net/api/mod.rs | 1 + 2 files changed, 269 insertions(+) create mode 100644 stackslib/src/net/api/getsigner.rs diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs new file mode 100644 index 00000000000..46a0e4229fe --- /dev/null +++ b/stackslib/src/net/api/getsigner.rs @@ -0,0 +1,268 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use clarity::util::secp256k1::Secp256k1PublicKey; +use regex::{Captures, Regex}; +use serde_json::json; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::stacks::boot::{ + PoxVersions, RewardSet, POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, +}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Clone, Default)] +pub struct GetSignerRequestHandler { + signer_pubkey: Option, + reward_cycle: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetSignerResponse { + pub blocks_signed: u64, +} + +pub enum GetSignerErrors { + NotAvailableYet(crate::chainstate::coordinator::Error), + Other(String), +} + +impl GetSignerErrors { + pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &'static str = "other"; + + pub fn error_type_string(&self) -> &'static str { + match self { + Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, + Self::Other(_) => Self::OTHER_ERR_TYPE, + } + } +} + +impl From<&str> for GetSignerErrors { + fn from(value: &str) -> Self { + GetSignerErrors::Other(value.into()) + } +} + +impl std::fmt::Display for GetSignerErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GetSignerErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), + GetSignerErrors::Other(msg) => write!(f, "{msg}") + } + } +} + +/// Decode the HTTP request +impl HttpRequest for GetSignerRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new( + r#"^/v3/stacker_set/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#, + ) + .unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/signer/:signer_pubkey/:cycle_num" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".into(), + )); + } + + let Some(cycle_num_str) = captures.name("cycle_num") else { + return Err(Error::DecodeError( + "Missing in request path: `cycle_num`".into(), + )); + }; + let Some(signer_pubkey_str) = captures.name("signer_pubkey") else { + return Err(Error::DecodeError( + "Missing in request path: `signer_pubkey`".into(), + )); + }; + + let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) + .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; + + let signer_pubkey = Secp256k1PublicKey::from_hex(signer_pubkey_str.into()) + .map_err(|e| Error::DecodeError(format!("Failed to signer public key: {e}")))?; + + self.signer_pubkey = Some(signer_pubkey); + self.reward_cycle = Some(cycle_num); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for GetSignerRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.signer_pubkey = None; + self.reward_cycle = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let signer_pubkey = self + .signer_pubkey + .take() + .ok_or(NetError::SendError("Missing `signer_pubkey`".into()))?; + + let reward_cycle = self + .reward_cycle + .take() + .ok_or(NetError::SendError("Missing `reward_cycle`".into()))?; + + let result = node.with_node_state(|_network, _sortdb, _chainstate, _mempool, _rpc_args| { + // TODO + if true { + Ok(0u64) + } else { + Err("Something went wrong") + } + }); + + let response = match result { + Ok(response) => response, + Err(error) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(error.to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&response)?; + Ok((preamble, body)) + } +} + +impl HttpResponse for GetSignerRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let response: GetSignerResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(response)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getsigner( + host: PeerHost, + signer_pubkey: &Secp256k1PublicKey, + cycle_num: u64, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/signer/{}/{cycle_num}", signer_pubkey.to_hex()), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_signer(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let response: GetSignerResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(response) + } +} + +#[cfg(test)] +mod test { + use super::GetSignerErrors; + + #[test] + // Test the formatting and error type strings of GetSignerErrors + fn get_signer_errors() { + let not_available_err = GetSignerErrors::NotAvailableYet( + crate::chainstate::coordinator::Error::PoXNotProcessedYet, + ); + let other_err = GetSignerErrors::Other("foo".into()); + + assert_eq!( + not_available_err.error_type_string(), + GetSignerErrors::NOT_AVAILABLE_ERR_TYPE + ); + assert_eq!( + other_err.error_type_string(), + GetSignerErrors::OTHER_ERR_TYPE + ); + + assert!(not_available_err + .to_string() + .starts_with("Could not read reward set")); + assert_eq!(other_err.to_string(), "foo".to_string()); + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 5bbc6281a24..6eff1a1c53f 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -55,6 +55,7 @@ pub mod getmicroblocks_indexed; pub mod getmicroblocks_unconfirmed; pub mod getneighbors; pub mod getpoxinfo; +pub mod getsigner; pub mod getsortition; pub mod getstackerdbchunk; pub mod getstackerdbmetadata; From 66ee0e1c4cfbb9e9e8fbea08ac4b34fc8b2cb88e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 9 Sep 2024 10:38:42 -0400 Subject: [PATCH 536/910] chore: Implement DB read for `/v3/signer/` endpoint --- stackslib/src/chainstate/nakamoto/mod.rs | 30 +++++++- stackslib/src/net/api/getsigner.rs | 89 +++--------------------- 2 files changed, 39 insertions(+), 80 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2450bac3c67..aa3722505b2 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -30,6 +30,7 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; +use sha2::digest::typenum::Integer; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -3287,12 +3288,37 @@ impl NakamotoChainState { StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; - let args = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; - tx.execute(sql, args)?; + let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + tx.execute(sql, params)?; } Ok(()) } + /// Fetch number of blocks signed for a given signer and reward cycle + /// This is the data tracked by `record_block_signers()` + pub fn get_signer_block_count( + chainstate_db: &Connection, + signer_pubkey: &Secp256k1PublicKey, + reward_cycle: u64, + ) -> Result, ChainstateError> { + let sql = + "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; + let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + chainstate_db + .query_row(sql, params, |row| { + let value: String = row.get(2)?; + value.parse::().map_err(|e| { + rusqlite::Error::FromSqlConversionFailure( + size_of::(), + rusqlite::types::Type::Integer, + e.into(), + ) + }) + }) + .optional() + .map_err(ChainstateError::from) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 46a0e4229fe..7a9b418cc3c 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -22,6 +22,7 @@ use stacks_common::util::hash::Sha256Sum; use crate::burnchains::Burnchain; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{ PoxVersions, RewardSet, POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, }; @@ -53,38 +54,6 @@ pub struct GetSignerResponse { pub blocks_signed: u64, } -pub enum GetSignerErrors { - NotAvailableYet(crate::chainstate::coordinator::Error), - Other(String), -} - -impl GetSignerErrors { - pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; - pub const OTHER_ERR_TYPE: &'static str = "other"; - - pub fn error_type_string(&self) -> &'static str { - match self { - Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, - Self::Other(_) => Self::OTHER_ERR_TYPE, - } - } -} - -impl From<&str> for GetSignerErrors { - fn from(value: &str) -> Self { - GetSignerErrors::Other(value.into()) - } -} - -impl std::fmt::Display for GetSignerErrors { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GetSignerErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), - GetSignerErrors::Other(msg) => write!(f, "{msg}") - } - } -} - /// Decode the HTTP request impl HttpRequest for GetSignerRequestHandler { fn verb(&self) -> &'static str { @@ -128,12 +97,12 @@ impl HttpRequest for GetSignerRequestHandler { )); }; - let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) - .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; - let signer_pubkey = Secp256k1PublicKey::from_hex(signer_pubkey_str.into()) .map_err(|e| Error::DecodeError(format!("Failed to signer public key: {e}")))?; + let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) + .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; + self.signer_pubkey = Some(signer_pubkey); self.reward_cycle = Some(cycle_num); @@ -152,16 +121,9 @@ impl RPCRequestHandler for GetSignerRequestHandler { fn try_handle_request( &mut self, preamble: HttpRequestPreamble, - contents: HttpRequestContents, + _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let tip = match node.load_stacks_chain_tip(&preamble, &contents) { - Ok(tip) => tip, - Err(error_resp) => { - return error_resp.try_into_contents().map_err(NetError::from); - } - }; - let signer_pubkey = self .signer_pubkey .take() @@ -172,13 +134,12 @@ impl RPCRequestHandler for GetSignerRequestHandler { .take() .ok_or(NetError::SendError("Missing `reward_cycle`".into()))?; - let result = node.with_node_state(|_network, _sortdb, _chainstate, _mempool, _rpc_args| { - // TODO - if true { - Ok(0u64) - } else { - Err("Something went wrong") - } + let result = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + NakamotoChainState::get_signer_block_count( + &chainstate.index_conn(), + &signer_pubkey, + reward_cycle, + ) }); let response = match result { @@ -238,31 +199,3 @@ impl StacksHttpResponse { Ok(response) } } - -#[cfg(test)] -mod test { - use super::GetSignerErrors; - - #[test] - // Test the formatting and error type strings of GetSignerErrors - fn get_signer_errors() { - let not_available_err = GetSignerErrors::NotAvailableYet( - crate::chainstate::coordinator::Error::PoXNotProcessedYet, - ); - let other_err = GetSignerErrors::Other("foo".into()); - - assert_eq!( - not_available_err.error_type_string(), - GetSignerErrors::NOT_AVAILABLE_ERR_TYPE - ); - assert_eq!( - other_err.error_type_string(), - GetSignerErrors::OTHER_ERR_TYPE - ); - - assert!(not_available_err - .to_string() - .starts_with("Could not read reward set")); - assert_eq!(other_err.to_string(), "foo".to_string()); - } -} From ec195f907992bc20c4a8da38580f7ff4bb405fea Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 9 Sep 2024 13:48:52 -0400 Subject: [PATCH 537/910] docs: Add `/v3/signer/` --- CHANGELOG.md | 1 + docs/rpc-endpoints.md | 5 ++++ docs/rpc/openapi.yaml | 30 ++++++++++++++++++++++++ stackslib/src/chainstate/nakamoto/mod.rs | 3 ++- 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb6061cb9d4..add9240af3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed +- Added `/v3/signer/` endpoint ## [2.5.0.0.5] diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index eea916a7812..9f0e09fd20b 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -533,3 +533,8 @@ highest sortition), `reward_cycle` identifies the reward cycle number of this tenure, `tip_block_id` identifies the highest-known block in this tenure, and `tip_height` identifies that block's height. +### GET /v3/signer/[Signer Pubkey]/[Reward Cycle] + +Get number of blocks signed by signer during a given reward cycle + +Returns a non-negative integer diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 6fc49859679..9492fd785d5 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,33 @@ paths: schema: type: string + /v3/signer/{signer}/{cycle_number}: + get: + summary: Get number of blocks signed by signer during a given reward cycle + tags: + - Blocks + - Signers + operationId: get_signer + description: Get number of blocks signed by signer during a given reward cycle + parameters: + - name: signer + in: path + required: true + description: Hex-encoded compressed Secp256k1 public key of signer + schema: + type: string + parameters: + - name: cycle_number + in: path + required: true + description: Reward cycle number + schema: + type: integer + responses: + 200: + description: Number of blocks signed + content: + text/plain: + schema: + type: integer + example: 7 \ No newline at end of file diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index aa3722505b2..ff38a476912 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3300,7 +3300,7 @@ impl NakamotoChainState { chainstate_db: &Connection, signer_pubkey: &Secp256k1PublicKey, reward_cycle: u64, - ) -> Result, ChainstateError> { + ) -> Result { let sql = "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; @@ -3316,6 +3316,7 @@ impl NakamotoChainState { }) }) .optional() + .map(Option::unwrap_or_default) // It's fine to map `NONE` to `0`, because it's impossible to have `Some(0)` .map_err(ChainstateError::from) } From 99f40cf944c844e4bb83e7187c7d5ecde3fb7829 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 12 Sep 2024 14:57:47 -0400 Subject: [PATCH 538/910] test: Add integration test for `/v3/signer` endpoint --- .github/workflows/bitcoin-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 130 ++++++++++++++++++ 2 files changed, 131 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5710c7c661..713ab5a986b 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -113,6 +113,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover + - tests::nakamoto_integrations::v3_signer_api_endpoint - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork # Do not run this one until we figure out why it fails in CI diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9b9c9844d6..8296c153d4c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7910,3 +7910,133 @@ fn utxo_check_on_startup_recover() { run_loop_stopper.store(false, Ordering::SeqCst); run_loop_thread.join().unwrap(); } + +/// Test `/v3/signer` API endpoint +/// +/// This endpoint returns a count of how many blocks a signer has signed during a given reward cycle +#[test] +#[ignore] +fn v3_signer_api_endpoint() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + + // only subscribe to the block proposal events + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::BlockProposal], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + blind_signer(&conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &commits_submitted); + + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + let naka_tenures = 20; + let pre_naka_reward_cycle = 1; + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Sending GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .text() + .expect("Empty response") + .parse::() + .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) + }; + + // Check reward cycle 1, should be 0 (pre-nakamoto) + let blocks_signed_pre_naka = get_v3_signer(&signer_pubkey, pre_naka_reward_cycle); + assert_eq!(blocks_signed_pre_naka, 0); + + // Keep track of reward cycles encountered + let mut reward_cycles = HashSet::new(); + + // Mine some nakamoto tenures + for _ in 0..naka_tenures { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + reward_cycles.insert(reward_cycle); + } + + // Make sure we got a couple cycles + assert!(reward_cycles.len() > 1); + assert!(!reward_cycles.contains(&pre_naka_reward_cycle)); + + // Since we have only one signer, it must be signing at least 1 block per reward cycle + for reward_cycle in reward_cycles.into_iter() { + let blocks_signed = get_v3_signer(&signer_pubkey, reward_cycle); + assert_ne!(blocks_signed, 0); + } + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 25008560b6f8a1969e6aaa1749d8ca2e8aa518b3 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 12 Sep 2024 17:52:03 -0400 Subject: [PATCH 539/910] fix: Varoius fixes so that `/v3/signer/` endpoint works now --- stackslib/src/chainstate/nakamoto/mod.rs | 11 +---------- stackslib/src/chainstate/stacks/db/mod.rs | 16 ++++++++-------- stackslib/src/net/api/getsigner.rs | 6 ++---- stackslib/src/net/api/mod.rs | 1 + .../src/tests/nakamoto_integrations.rs | 5 +++-- 5 files changed, 15 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ff38a476912..821af99ed0e 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3305,16 +3305,7 @@ impl NakamotoChainState { "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; chainstate_db - .query_row(sql, params, |row| { - let value: String = row.get(2)?; - value.parse::().map_err(|e| { - rusqlite::Error::FromSqlConversionFailure( - size_of::(), - rusqlite::types::Type::Integer, - e.into(), - ) - }) - }) + .query_row(sql, params, |row| row.get("blocks_signed")) .optional() .map(Option::unwrap_or_default) // It's fine to map `NONE` to `0`, because it's impossible to have `Some(0)` .map_err(ChainstateError::from) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 198de0354e4..e829a8c0309 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -298,14 +298,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 6, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 6, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, } } } diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 7a9b418cc3c..1231e195c1b 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -61,10 +61,8 @@ impl HttpRequest for GetSignerRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new( - r#"^/v3/stacker_set/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#, - ) - .unwrap() + Regex::new(r#"^/v3/signer/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#) + .unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 6eff1a1c53f..eac48761153 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -125,6 +125,7 @@ impl StacksHttp { self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); + self.register_rpc_endpoint(getsigner::GetSignerRequestHandler::default()); self.register_rpc_endpoint( liststackerdbreplicas::RPCListStackerDBReplicasRequestHandler::new(), ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8296c153d4c..e241c083261 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7983,14 +7983,15 @@ fn v3_signer_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { - let url = format!( + let url = &format!( "{http_origin}/v3/signer/{pk}/{reward_cycle}", pk = pubkey.to_hex() ); - info!("Sending GET {url}"); + info!("Send request: GET {url}"); reqwest::blocking::get(url) .unwrap_or_else(|e| panic!("GET request failed: {e}")) .text() + .inspect(|response| info!("Recieved response: GET {url} -> {response}")) .expect("Empty response") .parse::() .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) From 6945aa8a7a68b1f3107e95d602149730741939ad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 12 Sep 2024 20:48:13 -0400 Subject: [PATCH 540/910] chore: address PR feedback -- the signer coordinator should exit immediately on burncahin tip change --- stacks-signer/src/signerdb.rs | 5 ++++- .../src/nakamoto_node/sign_coordinator.rs | 20 ++++--------------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6f5b6c6e061..06b1e2efbe6 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -272,7 +272,10 @@ impl BlockInfo { matches!(prev_state, BlockState::Unprocessed) } BlockState::LocallyAccepted => { - matches!(prev_state, BlockState::Unprocessed) + matches!( + prev_state, + BlockState::Unprocessed | BlockState::LocallyAccepted + ) } BlockState::LocallyRejected => { matches!(prev_state, BlockState::Unprocessed) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index e86bb724e50..29a64cfb27f 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -78,7 +78,6 @@ pub struct SignCoordinator { signer_entries: HashMap, weight_threshold: u32, total_weight: u32, - config: Config, keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, } @@ -310,7 +309,6 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, - config: config.clone(), keep_running, }; return Ok(sign_coordinator); @@ -333,7 +331,6 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, - config: config.clone(), keep_running, }) } @@ -752,8 +749,6 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); - let mut new_burn_tip_ts = None; - loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold @@ -774,16 +769,9 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if new_burn_tip_ts.is_none() { - if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { - new_burn_tip_ts = Some(Instant::now()); - } - } - if let Some(ref new_burn_tip_ts) = new_burn_tip_ts.as_ref() { - if new_burn_tip_ts.elapsed() >= self.config.miner.wait_on_interim_blocks { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } + if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); } // one of two things can happen: From c77c024db6192cf8bbbac99249fd985418d246d5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 10:52:47 -0400 Subject: [PATCH 541/910] chore: add info to signature gathering failure logs --- .../stacks-node/src/nakamoto_node/miner.rs | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c3a1baea600..ef2a5e609dd 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -358,22 +358,32 @@ impl BlockMinerThread { &mut attempts, ) { Ok(x) => x, - Err(e) => { - match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"); - return Err(e); - } - _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; - } + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); } - } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + continue; + } + }, }; new_block.header.signer_signature = signer_signature; From d8ac38b270e8f168e1f4fff81ef49feebeb85e1e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 11:31:09 -0500 Subject: [PATCH 542/910] test: fix nakamoto_integrations::mock_mining() --- .../src/tests/nakamoto_integrations.rs | 65 ++++++++++++------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 583fea43092..d696c53688d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -99,9 +99,9 @@ use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, - wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_opt, get_chain_info_result, + get_neighbors, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, + test_observer, wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -7723,7 +7723,21 @@ fn mock_mining() { .spawn(move || follower_run_loop.start(None, 0)) .unwrap(); - debug!("Booted follower-thread"); + info!("Booting follower-thread, waiting for the follower to sync to the chain tip"); + + wait_for(120, || { + let Some(miner_node_info) = get_chain_info_opt(&naka_conf) else { + return Ok(false); + }; + let Some(follower_node_info) = get_chain_info_opt(&follower_conf) else { + return Ok(false); + }; + Ok(miner_node_info.stacks_tip_height == follower_node_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + let miner_node_info = get_chain_info(&naka_conf); + let follower_node_info = get_chain_info(&follower_conf); + info!("Node heights"; "miner" => miner_node_info.stacks_tip_height, "follower" => follower_node_info.stacks_tip_height); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { @@ -7767,25 +7781,26 @@ fn mock_mining() { last_tip_height = info.stacks_tip_height; } - let mock_miner_timeout = Instant::now(); - while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before - { - if mock_miner_timeout.elapsed() >= Duration::from_secs(60) { - panic!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - ); - } - thread::sleep(Duration::from_millis(100)); - } + let miner_node_info = get_chain_info(&naka_conf); + let follower_node_info = get_chain_info(&follower_conf); + info!("Node heights"; "miner" => miner_node_info.stacks_tip_height, "follower" => follower_node_info.stacks_tip_height); - let start_time = Instant::now(); - while commits_submitted.load(Ordering::SeqCst) <= commits_before { - if start_time.elapsed() >= Duration::from_secs(20) { - panic!("Timed out waiting for block-commit"); - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(60, || { + Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) + > follower_naka_mined_blocks_before) + }) + .expect(&format!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + )); + + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .expect(&format!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + )); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -7809,9 +7824,9 @@ fn mock_mining() { // Check follower's mock miner let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; - assert_eq!( - blocks_mock_mined, tenure_count, - "Should have mock mined `tenure_count` nakamoto blocks" + assert!( + blocks_mock_mined > tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks" ); // wait for follower to reach the chain tip From 16bad25cd6667c506eb8364c78f0aabcf7954695 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 13:08:57 -0500 Subject: [PATCH 543/910] test: fix signer::v0::end_of_tenure() --- testnet/stacks-node/src/tests/signer/v0.rs | 38 ++++++---------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 229a99fe6bf..5a1b5082289 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1926,17 +1926,13 @@ fn end_of_tenure() { // give the system a chance to mine a Nakamoto block // But it doesn't have to mine one for this test to succeed? - let start = Instant::now(); - while start.elapsed() <= short_timeout { + wait_for(short_timeout.as_secs(), || { let mined_blocks = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - if mined_blocks > blocks_before { - break; - } - sleep_ms(100); - } + Ok(mined_blocks > blocks_before) + }); info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -1957,10 +1953,7 @@ fn end_of_tenure() { .running_nodes .nakamoto_blocks_proposed .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + let blocks_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; let info = get_chain_info(&signer_test.running_nodes.conf); let start_height = info.stacks_tip_height; @@ -2020,29 +2013,18 @@ fn end_of_tenure() { info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); assert_eq!( - signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst), + get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height, blocks_before ); info!("Unpausing block validation and waiting for block to be processed"); // Disable the stall and wait for the block to be processed TEST_VALIDATE_STALL.lock().unwrap().replace(false); - let start_time = Instant::now(); - while signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst) - <= blocks_before - { - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting for block to be mined" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let processed_now = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(processed_now > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); let info = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(info.stacks_tip_height, start_height + 1); From 12a06ead0b138dd14db9fa55a8eeeb0f164cb04f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 14:28:28 -0400 Subject: [PATCH 544/910] chore: add debug logs before all `stacks_node_client` use --- stacks-signer/src/client/stacks_client.rs | 28 +++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914bd..e8ad18fd879 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -219,6 +219,7 @@ impl StacksClient { &self, tx: &StacksTransaction, ) -> Result { + debug!("stacks_node_client: Getting estimated fee..."); let request = FeeRateEstimateRequestBody { estimated_len: Some(tx.tx_len()), transaction_payload: to_hex(&tx.payload.serialize_to_vec()), @@ -283,6 +284,11 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { + debug!("stacks_node_client: Submitting block for validation..."; + "signer_sighash" => %block.header.signer_signature_hash(), + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, + ); let block_proposal = NakamotoBlockProposal { block, chain_id: self.chain_id, @@ -416,6 +422,10 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + debug!("stacks_node_client: Getting tenure forking info..."; + "chosen_parent" => %chosen_parent, + "last_sortition" => %last_sortition, + ); let send_request = || { self.stacks_node_client .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) @@ -433,6 +443,7 @@ impl StacksClient { /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { + debug!("stacks_node_client: Getting latest sortition..."); let send_request = || { self.stacks_node_client .get(self.sortition_info_path()) @@ -452,6 +463,7 @@ impl StacksClient { /// Get the sortition information for a given sortition pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); let send_request = || { self.stacks_node_client .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) @@ -471,7 +483,7 @@ impl StacksClient { /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { - debug!("Getting stacks node info..."); + debug!("stacks_node_client: Getting peer info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { @@ -521,6 +533,7 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { + debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( &self.reward_set_path(reward_cycle), &self.http_origin, @@ -558,7 +571,7 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { - debug!("Getting pox data..."); + debug!("stacks_node_client: Getting pox data..."); #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { @@ -606,7 +619,7 @@ impl StacksClient { &self, address: &StacksAddress, ) -> Result { - debug!("Getting account info..."); + debug!("stacks_node_client: Getting account info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); let send_request = || { @@ -683,6 +696,10 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { + debug!("stacks_node_client: Posting block to the stacks node..."; + "block_id" => block.header.block_id(), + "block_height" => block.header.chain_length, + ); let response = self .stacks_node_client .post(format!( @@ -705,6 +722,9 @@ impl StacksClient { pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { let txid = tx.txid(); let tx = tx.serialize_to_vec(); + debug!("stacks_node_client: Submitting transaction to the stacks node..."; + "txid" => %txid, + ); let timer = crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); let send_request = || { @@ -734,7 +754,7 @@ impl StacksClient { function_name: &ClarityName, function_args: &[ClarityValue], ) -> Result { - debug!("Calling read-only function {function_name} with args {function_args:?}..."); + debug!("stacks_node_client: Calling read-only function {function_name} with args {function_args:?}..."); let args = function_args .iter() .filter_map(|arg| arg.serialize_to_hex().ok()) From ebd4234da987371b515b80791a9f7dc8b051ff8b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 15:16:34 -0400 Subject: [PATCH 545/910] chore: fix typo in new logs --- stacks-signer/src/client/stacks_client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e8ad18fd879..4bd844b181f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -697,8 +697,8 @@ impl StacksClient { /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { debug!("stacks_node_client: Posting block to the stacks node..."; - "block_id" => block.header.block_id(), - "block_height" => block.header.chain_length, + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, ); let response = self .stacks_node_client From e89746a55c8932825e5989a1505ab7076656024e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 14:47:23 -0500 Subject: [PATCH 546/910] test: fix signer::v0::miner_forking() --- testnet/stacks-node/src/tests/signer/v0.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5a1b5082289..ecda37c66e2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1742,6 +1742,10 @@ fn miner_forking() { TEST_BROADCAST_STALL.lock().unwrap().replace(true); let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); signer_test .running_nodes @@ -1755,6 +1759,15 @@ fn miner_forking() { Ok(commits_count > rl2_commits_before) }) .unwrap(); + // wait until a commit is submitted by run_loop_1 + wait_for(60, || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > rl1_commits_before) + }) + .unwrap(); // fetch the current sortition info let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -1815,7 +1828,10 @@ fn miner_forking() { let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) .into_iter() - .map(|header| (header.consensus_hash.clone(), header)) + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); + (header.consensus_hash.clone(), header) + }) .collect(); if had_tenure { From fcc24986c44c30e90bb6bd273d62c716825bb5f3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Sep 2024 13:45:58 -0700 Subject: [PATCH 547/910] Do not have an overflow occur in the test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a2d7be2bacd..af7d6462047 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3141,6 +3141,7 @@ fn min_gap_between_blocks() { .as_stacks_nakamoto() .unwrap() .timestamp; + assert!(blocks.len() >= 2, "Expected at least 2 mined blocks"); let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); let penultimate_block_time = penultimate_block .anchored_header From 02cc0c5583bea83eb5129af25800bb3791be16aa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 16:14:08 -0500 Subject: [PATCH 548/910] test: fix signer::v0::partial_tenure_fork() --- testnet/stacks-node/src/tests/signer/v0.rs | 114 ++++++++++++++++++--- 1 file changed, 100 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ecda37c66e2..687b06c2a57 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -72,8 +72,8 @@ use crate::tests::nakamoto_integrations::{ wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, + run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController, Config, Keychain}; @@ -1948,7 +1948,8 @@ fn end_of_tenure() { .nakamoto_blocks_mined .load(Ordering::SeqCst); Ok(mined_blocks > blocks_before) - }); + }) + .unwrap(); info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -3591,12 +3592,7 @@ fn partial_tenure_fork() { (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { - let node_host = if signer_config.endpoint.port() % 2 == 0 { - &node_1_rpc_bind - } else { - &node_2_rpc_bind - }; - signer_config.node_host = node_host.to_string(); + signer_config.node_host = node_1_rpc_bind.clone(); }, |config| { let localhost = "127.0.0.1"; @@ -3656,6 +3652,17 @@ fn partial_tenure_fork() { signer_test.boot_to_epoch_3(); let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + info!("------------------------- Reached Epoch 3.0 -------------------------"); // due to the random nature of mining sortitions, the way this test is structured @@ -3680,8 +3687,26 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_before_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); sleep_ms(1000); + + info!( + "Next tenure checking"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3695,7 +3720,32 @@ fn partial_tenure_fork() { || mined_2 > mined_before_2) }, ) - .unwrap(); + .unwrap_or_else(|_| { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + error!( + "Next tenure failed to tick"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + "mined_1" => mined_1, + "mined_2" => mined_2, + "proposed_1" => proposed_1, + "proposed_2" => proposed_2, + ); + panic!(); + }); btc_blocks_mined += 1; let mined_1 = blocks_mined1.load(Ordering::SeqCst); @@ -3720,12 +3770,23 @@ fn partial_tenure_fork() { } // mine (or attempt to mine) the interim blocks - info!("Mining interim blocks"); for interim_block_ix in 0..inter_blocks_per_tenure { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + info!( + "Mining interim blocks"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + // submit a tx so that the miner will mine an extra block let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = @@ -3743,7 +3804,32 @@ fn partial_tenure_fork() { || mined_1 > mined_before_1 || mined_2 > mined_before_2) }) - .unwrap(); + .unwrap_or_else(|_| { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + error!( + "Next tenure failed to tick"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + "mined_1" => mined_1, + "mined_2" => mined_2, + "proposed_1" => proposed_1, + "proposed_2" => proposed_2, + ); + panic!(); + }); } Err(e) => { if e.to_string().contains("TooMuchChaining") { @@ -3766,8 +3852,8 @@ fn partial_tenure_fork() { miner_2_tenures += 1; } info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures + "Miner 1 tenures: {}, Miner 2 tenures: {}, Miner 1 before: {}, Miner 2 before: {}", + miner_1_tenures, miner_2_tenures, mined_before_1, mined_before_2, ); let mined_1 = blocks_mined1.load(Ordering::SeqCst); From ff5b49d2fd9dc51fc75ad5e66f1afc9b0647464a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 16:32:30 -0500 Subject: [PATCH 549/910] reduce some flakiness in partial_tenure_fork() --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 687b06c2a57..66aad1a00f1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3582,7 +3582,6 @@ fn partial_tenure_fork() { let node_2_p2p = 51025; let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( @@ -3644,12 +3643,13 @@ fn partial_tenure_fork() { naka_proposed_blocks: blocks_proposed2, .. } = run_loop_2.counters(); + + signer_test.boot_to_epoch_3(); let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); - signer_test.boot_to_epoch_3(); let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; wait_for(120, || { From 8c077d5104766c102733a4006efdff47e9f0193a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Sep 2024 18:10:54 -0700 Subject: [PATCH 550/910] Fix confusion over what configured for cycle means by adding is_registered_for_cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b5..ad29ded57a3 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -421,7 +421,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo "reward_cycle_before_refresh" => reward_cycle_before_refresh, "current_reward_cycle" => current_reward_cycle, "configured_for_current" => Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle), + "registered_for_current" => Self::is_registered_for_cycle(&self.stacks_signers, current_reward_cycle), "configured_for_next" => Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle), + "registered_for_next" => Self::is_registered_for_cycle(&self.stacks_signers, next_reward_cycle), "is_in_next_prepare_phase" => is_in_next_prepare_phase, ); @@ -456,6 +458,17 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo signer.reward_cycle() == reward_cycle } + fn is_registered_for_cycle( + stacks_signers: &HashMap>, + reward_cycle: u64, + ) -> bool { + let Some(signer) = stacks_signers.get(&(reward_cycle % 2)) else { + return false; + }; + signer.reward_cycle() == reward_cycle + && matches!(signer, ConfiguredSigner::RegisteredSigner(_)) + } + fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { From df5623aec39a1591f2f54b80b0b69590514ec56f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 20:36:04 -0500 Subject: [PATCH 551/910] test: fix CI flake in signer::v0::min_gap_between_blocks() --- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 66aad1a00f1..0e62e0f05dd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3127,11 +3127,6 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); assert_eq!(blocks.len(), 1); @@ -3164,11 +3159,8 @@ fn min_gap_between_blocks() { info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); wait_for(60, || { - let blocks_processed = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - Ok(blocks_processed > blocks_before) + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + Ok(blocks.len() >= 2) }) .unwrap(); From 9822ef15484acdf04d2f364d5175e96ab4228a6e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 08:41:11 -0500 Subject: [PATCH 552/910] test: attempt to fix signers::v0::signer_set_rollover() in CI --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 0e62e0f05dd..c4b1ae7d559 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -26,7 +26,6 @@ use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; -use rand::RngCore; use stacks::address::AddressHashMode; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -2850,11 +2849,8 @@ fn signer_set_rollover() { initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); let run_stamp = rand::random(); - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 2]; - rng.fill_bytes(&mut buf); - let rpc_port = u16::from_be_bytes(buf.try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let rpc_port = 51024; let rpc_bind = format!("127.0.0.1:{}", rpc_port); // Setup the new signers that will take over From ec89da91a178d3f507c72cd04298b0a0532e7234 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 13:14:19 -0500 Subject: [PATCH 553/910] CI: disable signer_set_rollover and mine_2_nakamoto_reward_cycles * these tests work fine locally, but in CI, they just timeout after 30 minutes without any logging --- .github/workflows/bitcoin-tests.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index db108d85f03..40afc15f3e0 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -86,7 +86,6 @@ jobs: - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::end_of_tenure - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid @@ -95,7 +94,6 @@ jobs: - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 - tests::signer::v0::multiple_miners_mock_sign_epoch_25 - - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks @@ -120,6 +118,11 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork + # The following 2 tests work locally, but fail on CI. + # Locally, they both run consistently quite quickly, but on + # CI, they timeout without any logging. Disabling in CI for now. + # - tests::signer::v0::mine_2_nakamoto_reward_cycles + # - tests::signer::v0::signer_set_rollover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From b09ffc6ee42ce54c68e2085bc6a2bb85ac467b3b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 14:06:39 -0500 Subject: [PATCH 554/910] chore: address PR review, uncomment CI tests --- .github/workflows/bitcoin-tests.yml | 10 ++++------ stackslib/src/net/api/postblock_proposal.rs | 4 +++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 40afc15f3e0..69878ce403f 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -45,7 +45,8 @@ jobs: - tests::neon_integrations::liquid_ustx_integration - tests::neon_integrations::microblock_fork_poison_integration_test - tests::neon_integrations::microblock_integration_test - - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY + # Disable this flaky test. Microblocks are no longer supported anyways. + # - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - tests::neon_integrations::microblock_limit_hit_integration_test - tests::neon_integrations::miner_submit_twice - tests::neon_integrations::mining_events_integration_test @@ -118,11 +119,8 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork - # The following 2 tests work locally, but fail on CI. - # Locally, they both run consistently quite quickly, but on - # CI, they timeout without any logging. Disabling in CI for now. - # - tests::signer::v0::mine_2_nakamoto_reward_cycles - # - tests::signer::v0::signer_set_rollover + - tests::signer::v0::mine_2_nakamoto_reward_cycles + - tests::signer::v0::signer_set_rollover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 9c5ab712c3a..6c54b05342b 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -196,11 +196,13 @@ impl NakamotoBlockProposal { }) } + /// DO NOT CALL FROM CONSENSUS CODE + /// /// Check to see if a block builds atop the highest block in a given tenure. /// That is: /// - its parent must exist, and /// - its parent must be as high as the highest block in the given tenure. - pub(crate) fn check_block_builds_on_highest_block_in_tenure( + fn check_block_builds_on_highest_block_in_tenure( chainstate: &StacksChainState, tenure_id: &ConsensusHash, parent_block_id: &StacksBlockId, From 35f866be8c8e3415b9fc95a25fc514d99ef3b4c8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 14 Sep 2024 13:07:13 -0700 Subject: [PATCH 555/910] Do not have an infinite loop inside mine_and_verify_confirmed_naka_block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c4b1ae7d559..3f08b71f737 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -311,6 +311,11 @@ impl SignerTest { let mut signer_index = 0; let mut signature_index = 0; let mut signing_keys = HashSet::new(); + let start = Instant::now(); + debug!( + "Validating {} signatures against {num_signers} signers", + signature.len() + ); let validated = loop { // Since we've already checked `signature.len()`, this means we've // validated all the signatures in this loop @@ -341,6 +346,11 @@ impl SignerTest { signer_index += 1; signature_index += 1; } + // Shouldn't really ever timeout, but do this in case there is some sort of overflow/underflow happening. + assert!( + start.elapsed() < timeout, + "Timed out waiting to confirm block signatures" + ); }; assert!(validated); From 0fc9509d19b4e535a0ce9a607b38f73ac9d78fa7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sun, 15 Sep 2024 11:47:10 -0700 Subject: [PATCH 556/910] Fix unhandled result in block state machine move_to calls Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b424c6e7d62..1b8a57abbb9 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -251,14 +251,14 @@ impl BlockInfo { /// Mark the block as locally rejected and invalid pub fn mark_locally_rejected(&mut self) -> Result<(), String> { - self.move_to(BlockState::LocallyRejected); + self.move_to(BlockState::LocallyRejected)?; self.valid = Some(false); Ok(()) } /// Mark the block as globally rejected and invalid pub fn mark_globally_rejected(&mut self) -> Result<(), String> { - self.move_to(BlockState::GloballyRejected); + self.move_to(BlockState::GloballyRejected)?; self.valid = Some(false); Ok(()) } From ec3aea8ceed0bf48ba70b9dc638084672cc4155e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 15 Sep 2024 12:55:57 -0700 Subject: [PATCH 557/910] fix: add missing prometheus timers to stacks_client --- stacks-signer/src/client/stacks_client.rs | 44 +++++++++++------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5afec3f76e0..75790dc54e5 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -439,13 +439,16 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + let path = self.tenure_forking_info_path(chosen_parent, last_sortition); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client - .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) + .get(&path) .send() .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -456,16 +459,16 @@ impl StacksClient { /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { + let path = self.sortition_info_path(); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { - self.stacks_node_client - .get(self.sortition_info_path()) - .send() - .map_err(|e| { - warn!("Signer failed to request latest sortition"; "err" => ?e); - e - }) + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) }; let response = send_request()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -475,16 +478,16 @@ impl StacksClient { /// Get the sortition information for a given sortition pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { - self.stacks_node_client - .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) - .send() - .map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); + e + }) }; let response = send_request()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -582,7 +585,6 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); - #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client @@ -591,7 +593,6 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; - #[cfg(feature = "monitoring_prom")] timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); @@ -706,13 +707,11 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { + let path = format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client - .post(format!( - "{}{}?broadcast=1", - self.http_origin, - postblock_v3::PATH - )) + .post(&path) .header("Content-Type", "application/octet-stream") .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) @@ -723,6 +722,7 @@ impl StacksClient { }) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } From 8af3d2ef9ff1f378710bb0823a65da616615f903 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sun, 15 Sep 2024 14:08:06 -0700 Subject: [PATCH 558/910] Fix bug and add percentage missing Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 44 +++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 64e5c48a96e..513382e843b 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -220,6 +220,7 @@ struct RewardCycleState { signers_slots: HashMap, signers_keys: HashMap, signers_addresses: HashMap, + signers_weights: HashMap, slot_ids: Vec, /// Reward cycle is not known until the first successful call to the node reward_cycle: Option, @@ -257,12 +258,6 @@ impl SignerMonitor { self.cycle_state.signers_slots = self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - self.cycle_state.slot_ids = self - .cycle_state - .signers_slots - .values() - .map(|value| value.0) - .collect(); let entries = self .stacks_client @@ -277,6 +272,9 @@ impl SignerMonitor { self.cycle_state .signers_keys .insert(stacks_address, public_key); + self.cycle_state + .signers_weights + .insert(stacks_address, entry.weight); } for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { self.cycle_state @@ -284,9 +282,6 @@ impl SignerMonitor { .insert(*slot_id, *signer_address); } - self.cycle_state.signers_slots = - self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { self.cycle_state .signers_addresses @@ -318,8 +313,14 @@ impl SignerMonitor { }) .collect::>() .join(", "); + let missing_weight = missing_signers + .iter() + .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) + .sum::(); + let total_weight = self.cycle_state.signers_weights.values().sum::(); + let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; warn!( - "Missing messages for {} of {} signer(s). ", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); "signer_addresses" => formatted_signers, "signer_keys" => formatted_keys ); @@ -442,18 +443,21 @@ impl SignerMonitor { chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) }) .collect(); - for ((signer_address, slot_id), signer_message_opt) in self - .cycle_state - .signers_slots - .clone() - .into_iter() - .zip(new_messages) + + for (signer_message_opt, slot_id) in + new_messages.into_iter().zip(&self.cycle_state.slot_ids) { + let signer_slot_id = SignerSlotID(*slot_id); + let signer_address = *self + .cycle_state + .signers_addresses + .get(&signer_slot_id) + .expect("BUG: missing signer address for given slot id"); let Some(signer_message) = signer_message_opt else { missing_signers.push(signer_address); continue; }; - if let Some(last_message) = last_messages.get(&slot_id) { + if let Some(last_message) = last_messages.get(&signer_slot_id) { if last_message == &signer_message { continue; } @@ -467,11 +471,11 @@ impl SignerMonitor { || (epoch > StacksEpochId::Epoch25 && !matches!(signer_message, SignerMessage::BlockResponse(_))) { - unexpected_messages.insert(signer_address, (signer_message, slot_id)); + unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); continue; } - last_messages.insert(slot_id, signer_message); - last_updates.insert(slot_id, std::time::Instant::now()); + last_messages.insert(signer_slot_id, signer_message); + last_updates.insert(signer_slot_id, std::time::Instant::now()); } for (slot_id, last_update_time) in last_updates.iter() { if last_update_time.elapsed().as_secs() > self.args.max_age { From 572fb815cdba65175c84b41992cdcad748be7269 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 21:09:25 -0400 Subject: [PATCH 559/910] chore: stdext and rlimit dev-dependencies --- Cargo.lock | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 10b65eb7458..e56e4400b44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2795,6 +2795,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" +[[package]] +name = "rlimit" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" +dependencies = [ + "libc", +] + [[package]] name = "rstest" version = "0.11.0" @@ -3411,6 +3420,9 @@ dependencies = [ "regex", "reqwest", "ring 0.16.20", + "rlimit", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -3419,6 +3431,7 @@ dependencies = [ "stacks-common", "stacks-signer", "stackslib", + "stdext", "stx-genesis", "tikv-jemallocator", "tiny_http", From 95b72cda4d8d4313746b12a91dd85579bb5fd3d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 21:10:11 -0400 Subject: [PATCH 560/910] bugfix: always save new neighbor data when we get it --- stackslib/src/net/neighbors/db.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index c0e65a6f854..0289875f113 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -557,9 +557,12 @@ impl NeighborWalkDB for PeerDBNeighborWalk { if let Some(data) = new_data { cur_neighbor.handshake_update(&tx, &data.handshake)?; - if let Some(db_data) = new_db_data { - cur_neighbor.save_update(&tx, Some(db_data.smart_contracts.as_slice()))?; - } + } + + if let Some(db_data) = new_db_data { + cur_neighbor.save_update(&tx, Some(db_data.smart_contracts.as_slice()))?; + } else { + cur_neighbor.save_update(&tx, None)?; } tx.commit()?; From b602455aed2a67c5a12fdb69ba4a697568dd1042 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 15 Sep 2024 22:13:08 -0400 Subject: [PATCH 561/910] test: increase open file descriptor limit in CI --- .github/workflows/bitcoin-tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 69878ce403f..9465b90e29d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -140,6 +140,11 @@ jobs: with: btc-version: "25.0" + ## Increase open file descriptors limit + - name: Increase Open File Descriptors + run: | + sudo prlimit --nofile=4096:4096 + ## Run test matrix using restored cache of archive file ## - Test will timeout after env.TEST_TIMEOUT minutes - name: Run Tests From 69ecf2e9be5557b489791ecb702631068edeac9a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:20:24 -0400 Subject: [PATCH 562/910] chore: reduce some debug noise, and log connection name for bandwidth-exceeded conditions --- stackslib/src/net/chat.rs | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 926340d7fef..5949db0bbf6 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -510,7 +510,7 @@ impl Neighbor { } }; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer @@ -1433,6 +1433,8 @@ impl ConversationP2P { // get neighbors at random as long as they're fresh, and as long as they're compatible with // the current system epoch. + // Alternate at random between serving public-only and public/private-mixed IPs, since for + // the time being, the remote peer has no way of asking for a particular subset. let mut neighbors = PeerDB::get_fresh_random_neighbors( peer_dbconn, self.network_id, @@ -1441,6 +1443,7 @@ impl ConversationP2P { MAX_NEIGHBORS_DATA_LEN, chain_view.burn_block_height, false, + thread_rng().gen(), ) .map_err(net_error::DBError)?; @@ -1917,10 +1920,12 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, + naddr: NeighborAddress, chainstate: &mut StacksChainState, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { Ok(network.make_StackerDBChunksInv_or_Nack( + naddr, chainstate, &getchunkinv.contract_id, &getchunkinv.rc_consensus_hash, @@ -1938,6 +1943,7 @@ impl ConversationP2P { ) -> Result { let response = ConversationP2P::make_stacker_db_getchunkinv_response( network, + self.to_neighbor_address(), chainstate, getchunkinv, )?; @@ -2120,7 +2126,8 @@ impl ConversationP2P { > (self.connection.options.max_block_push_bandwidth as f64) { debug!( - "Neighbor {:?} exceeded max block-push bandwidth of {} bytes/sec (currently at {})", + "{:?}: Neighbor {:?} exceeded max block-push bandwidth of {} bytes/sec (currently at {})", + &self, &self.to_neighbor_key(), self.connection.options.max_block_push_bandwidth, self.stats.get_block_push_bandwidth() @@ -2162,7 +2169,7 @@ impl ConversationP2P { && self.stats.get_microblocks_push_bandwidth() > (self.connection.options.max_microblocks_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2199,7 +2206,7 @@ impl ConversationP2P { && self.stats.get_transaction_push_bandwidth() > (self.connection.options.max_transaction_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2237,7 +2244,7 @@ impl ConversationP2P { && self.stats.get_stackerdb_push_bandwidth() > (self.connection.options.max_stackerdb_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2276,7 +2283,7 @@ impl ConversationP2P { && self.stats.get_nakamoto_block_push_bandwidth() > (self.connection.options.max_nakamoto_block_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2415,11 +2422,11 @@ impl ConversationP2P { Ok(num_recved) => { total_recved += num_recved; if num_recved > 0 { - debug!("{:?}: received {} bytes", self, num_recved); + test_debug!("{:?}: received {} bytes", self, num_recved); self.stats.last_recv_time = get_epoch_time_secs(); self.stats.bytes_rx += num_recved as u64; } else { - debug!("{:?}: received {} bytes, stopping", self, num_recved); + test_debug!("{:?}: received {} bytes, stopping", self, num_recved); break; } } @@ -2436,7 +2443,7 @@ impl ConversationP2P { } } } - debug!("{:?}: received {} bytes", self, total_recved); + test_debug!("{:?}: received {} bytes", self, total_recved); Ok(total_recved) } @@ -2464,7 +2471,7 @@ impl ConversationP2P { } } } - debug!("{:?}: sent {} bytes", self, total_sent); + test_debug!("{:?}: sent {} bytes", self, total_sent); Ok(total_sent) } @@ -3051,8 +3058,9 @@ impl ConversationP2P { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { + #![allow(unused)] use std::fs; use std::io::prelude::*; use std::io::{Read, Write}; From 62bdacd6359fa5ff502a481027cb24e946a472da Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:20:54 -0400 Subject: [PATCH 563/910] chore: document neighbor walk connection opts, and add `log_neighbors_Freq` and `walk_seed_probability` options --- stackslib/src/net/connection.rs | 36 ++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 0d4d5aafd6e..db50c46333d 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -43,7 +43,7 @@ use crate::net::inv::{INV_REWARD_CYCLES, INV_SYNC_INTERVAL}; use crate::net::neighbors::{ MAX_NEIGHBOR_AGE, NEIGHBOR_REQUEST_TIMEOUT, NEIGHBOR_WALK_INTERVAL, NUM_INITIAL_WALKS, WALK_MAX_DURATION, WALK_MIN_DURATION, WALK_RESET_INTERVAL, WALK_RESET_PROB, WALK_RETRY_COUNT, - WALK_STATE_TIMEOUT, + WALK_SEED_PROBABILITY, WALK_STATE_TIMEOUT, }; use crate::net::{ Error as net_error, MessageSequence, Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, @@ -235,9 +235,10 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. - debug!( + test_debug!( "Still have data to send, drop_on_success = {}, ret = {}", - drop_on_success, ret + drop_on_success, + ret ); Some(fd) } @@ -345,15 +346,37 @@ pub struct ConnectionOptions { pub max_http_clients: u64, pub neighbor_request_timeout: u64, pub max_neighbor_age: u64, + /// How many walk steps to take when the node has booted up. This influences how quickly the + /// node will find new peers on start-up. This describes the maximum length of such walks. pub num_initial_walks: u64, + /// How many walk state-machine restarts to take when the node has boote dup. This influences + /// how quickly the node will find new peers on start-up. This describes the maximum number of + /// such walk state-machine run-throughs. pub walk_retry_count: u64, + /// How often, in seconds, to run the walk state machine. pub walk_interval: u64, + /// The regularity of doing an inbound neighbor walk (as opposed to an outbound neighbor walk). + /// Every `walk_inbound_ratio + 1`-th walk will be an inbound neighbor walk. pub walk_inbound_ratio: u64, + /// Minimum number of steps a walk will run until it can be reset. pub walk_min_duration: u64, + /// Maximum number of steps a walk will run until forcibly reset. pub walk_max_duration: u64, + /// Probabiility that the walk will be reset once `walk_min_duration` steps are taken. pub walk_reset_prob: f64, + /// Maximum number of seconds a walk can last before being reset. pub walk_reset_interval: u64, + /// Maximum number of seconds a walk can remain in the same state before being reset. pub walk_state_timeout: u64, + /// If the node is booting up, or if the node is not connected to an always-allowed peer and + /// there are one or more such peers in the peers DB, then this controls the probability that + /// the node will attempt to start a walk to an always-allowed peer. It's good to have this + /// close to, but not equal to 1.0, so that if the node can't reach any always-allowed peer for + /// some reason but can reach other neighbors, then neighbor walks can continue. + pub walk_seed_probability: f64, + /// How often, if ever, to log our neighbors via DEBG. + /// Units are milliseconds. A value of 0 means "never". + pub log_neighbors_freq: u64, pub inv_sync_interval: u64, pub inv_reward_cycles: u64, pub download_interval: u64, @@ -494,6 +517,8 @@ impl std::default::Default for ConnectionOptions { walk_reset_prob: WALK_RESET_PROB, walk_reset_interval: WALK_RESET_INTERVAL, walk_state_timeout: WALK_STATE_TIMEOUT, + walk_seed_probability: WALK_SEED_PROBABILITY, + log_neighbors_freq: 60_000, inv_sync_interval: INV_SYNC_INTERVAL, // how often to synchronize block inventories inv_reward_cycles: INV_REWARD_CYCLES, // how many reward cycles of blocks to sync in a non-full inventory sync download_interval: BLOCK_DOWNLOAD_INTERVAL, // how often to scan for blocks to download @@ -1024,7 +1049,7 @@ impl ConnectionInbox

{ total_read += num_read; if num_read > 0 || total_read > 0 { - debug!("read {} bytes; {} total", num_read, total_read); + test_debug!("read {} bytes; {} total", num_read, total_read); } if num_read > 0 { @@ -1479,8 +1504,9 @@ pub type ReplyHandleP2P = NetworkReplyHandle; pub type ConnectionHttp = NetworkConnection; pub type ReplyHandleHttp = NetworkReplyHandle; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { + #![allow(unused)] use std::io::prelude::*; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; From 71bdd843f2dab1bea44284c652cc661de6ae2215 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:21:26 -0400 Subject: [PATCH 564/910] fix: if we're not in IBD and we dno't have a seed node connection (which can happen due to misconfiguration), then still occasionally attempt to walk to non-seed nodes (fixes #5159) --- stackslib/src/net/neighbors/mod.rs | 68 +++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 6447a6ec00b..28355d0e1a0 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -42,9 +42,9 @@ pub use db::{NeighborReplacements, NeighborWalkDB, PeerDBNeighborWalk}; pub use walk::{NeighborPingback, NeighborWalk, NeighborWalkResult}; /// How often we can contact other neighbors, at a minimim -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 600; /// Default number of seconds to wait for a reply from a neighbor @@ -79,33 +79,37 @@ pub const WALK_STATE_TIMEOUT: u64 = 60; /// Total number of seconds for which a particular walk can exist. It will be reset if it exceeds /// this age. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const WALK_RESET_INTERVAL: u64 = 60; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const WALK_RESET_INTERVAL: u64 = 600; /// How often the node will consider pruning neighbors from its neighbor set. The node will prune /// neighbors from over-represented hosts and IP ranges in order to maintain connections to a /// diverse set of neighbors. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const PRUNE_FREQUENCY: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const PRUNE_FREQUENCY: u64 = 43200; /// Not all neighbors discovered will have an up-to-date chain tip. This value is the highest /// discrepancy between the local burnchain block height and the remote node's burnchain block /// height for which the neighbor will be considered as a worthwhile peer to remember. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 25; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 288; /// How often to kick off neighbor walks. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 120; // seconds +/// Probability that we begin an always-allowed peer walk if we're either in IBD or if we're not +/// connected to at least one always-allowed node +pub const WALK_SEED_PROBABILITY: f64 = 0.9; + impl PeerNetwork { /// Begin an outbound walk or a pingback walk, depending on whether or not we have pingback /// state. @@ -115,6 +119,10 @@ impl PeerNetwork { &self, ) -> Result, net_error> { if self.get_walk_pingbacks().len() == 0 { + debug!( + "{:?}: no walk pingbacks, so instantiate a normal neighbor walk", + self.get_local_peer() + ); // unconditionally do an outbound walk return NeighborWalk::instantiate_walk( self.get_neighbor_walk_db(), @@ -127,6 +135,10 @@ impl PeerNetwork { // If one fails, then try the other let do_outbound = thread_rng().gen::(); if do_outbound { + debug!( + "{:?}: instantiate a normal neighbor walk", + self.get_local_peer() + ); match NeighborWalk::instantiate_walk( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -148,6 +160,10 @@ impl PeerNetwork { } } } else { + debug!( + "{:?}: instantiate a pingback neighbor walk", + self.get_local_peer() + ); match NeighborWalk::instantiate_walk_from_pingback( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -216,9 +232,17 @@ impl PeerNetwork { .unwrap_or((0, 0)); // always ensure we're connected to always-allowed outbound peers other than ourselves - let walk_res = if ibd || (num_always_connected == 0 && total_always_connected > 0) { + let walk_seed = + thread_rng().gen::() < self.get_connection_opts().walk_seed_probability; + let walk_res = if ibd + || (num_always_connected == 0 && total_always_connected > 0 && walk_seed) + { // always connect to bootstrap peers if in IBD, or if we're not connected to an // always-allowed peer already + debug!("{:?}: Instantiate walk to always allowed", self.get_local_peer(); + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "ibd" => ibd); NeighborWalk::instantiate_walk_to_always_allowed( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -226,12 +250,26 @@ impl PeerNetwork { ibd, ) } else if self.walk_attempts % (self.connection_opts.walk_inbound_ratio + 1) == 0 { - // not IBD. Time to try an inbound neighbor + // not IBD, or not walk_seed, or connected to an always-allowed peer, or no always-allowed. + // Time to try an inbound neighbor + debug!("{:?}: Instantiate walk to inbound neigbor", self.get_local_peer(); + "walk_attempts" => self.walk_attempts, + "walk_inbound_ratio" => self.connection_opts.walk_inbound_ratio, + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "walk_seed" => walk_seed); + self.new_maybe_inbound_walk() } else { - // not IBD, and not time to try an inbound neighbor. + // no need to walk to an always-allowed peer, and not time to try an inbound neighbor. // Either do an outbound walk, or do a pingback walk. // If one fails, then try the other. + debug!("{:?}: Instantiate walk to either outbound or pingback neighbor", self.get_local_peer(); + "walk_attempts" => self.walk_attempts, + "walk_inbound_ratio" => self.connection_opts.walk_inbound_ratio, + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "walk_seed" => walk_seed); self.new_outbound_or_pingback_walk() }; @@ -329,7 +367,7 @@ impl PeerNetwork { return true; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn print_walk_diagnostics(&mut self) { let (mut inbound, mut outbound) = self.dump_peer_table(); @@ -359,7 +397,7 @@ impl PeerNetwork { debug!("{:?}: Walk finished ===================", &self.local_peer); } - #[cfg(not(test))] + #[cfg(not(any(test, feature = "testing")))] fn print_walk_diagnostics(&self) {} /// Update the state of our peer graph walk. From 6c56f71961c3b802da70581f2272db6b2a8ef771 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:22:27 -0400 Subject: [PATCH 565/910] fix: collate peers by public key and only report the one with the latest last-contact time. Don't query rows in `frontier` directly, unless it's for a specific `slot` (fixes #5169) --- stackslib/src/net/db.rs | 383 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 351 insertions(+), 32 deletions(-) diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 07f0bb5d74c..ff6b5a9a052 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::{fmt, fs}; use clarity::vm::types::{ @@ -45,7 +45,7 @@ use crate::util_lib::db::{ }; use crate::util_lib::strings::UrlString; -pub const PEERDB_VERSION: &'static str = "2"; +pub const PEERDB_VERSION: &'static str = "3"; const NUM_SLOTS: usize = 8; @@ -394,13 +394,20 @@ const PEERDB_SCHEMA_2: &'static [&'static str] = &[ CREATE INDEX IF NOT EXISTS index_stackedb_peers_by_slot ON stackerdb_peers(peer_slot); "#, r#" - ALTER TABLE local_peer ADD COLUMN stacker_dbs TEXT + ALTER TABLE local_peer ADD COLUMN stacker_dbs TEXT; "#, r#" UPDATE db_config SET version = 2; "#, ]; +const PEERDB_SCHEMA_3: &'static [&'static str] = &[ + r#" + ALTER TABLE frontier ADD COLUMN public BOOL NOT NULL DEFAULT 0; + "#, + "UPDATE db_config SET version = 3;", +]; + #[derive(Debug)] pub struct PeerDB { pub conn: Connection, @@ -516,6 +523,18 @@ impl PeerDB { Ok(version) } + /// Tag each address in the peer DB as public if its address is not private. + /// Happens as part of the schema 3 migration + fn update_peerdb_public_addrs(tx: &Transaction) -> Result<(), db_error> { + let all_peers = Self::get_all_peers(tx)?; + for peer in all_peers { + let public = !peer.addr.addrbytes.is_in_private_range(); + debug!("Marking peer {:?} as public? {}", &peer, public); + Self::update_peer(tx, &peer)?; + } + Ok(()) + } + #[cfg_attr(test, mutants::skip)] fn apply_schema_2(tx: &Transaction) -> Result<(), db_error> { test_debug!("Apply schema 2 to peer DB"); @@ -525,6 +544,16 @@ impl PeerDB { Ok(()) } + #[cfg_attr(test, mutants::skip)] + fn apply_schema_3(tx: &Transaction) -> Result<(), db_error> { + test_debug!("Apply schema 3 to peer DB"); + for row_text in PEERDB_SCHEMA_3 { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } + Self::update_peerdb_public_addrs(tx)?; + Ok(()) + } + fn apply_schema_migrations(tx: &Transaction) -> Result { test_debug!("Apply any schema migrations"); let expected_version = PEERDB_VERSION.to_string(); @@ -537,6 +566,8 @@ impl PeerDB { } if version == "1" { PeerDB::apply_schema_2(tx)?; + } else if version == "2" { + PeerDB::apply_schema_3(tx)?; } else if version == expected_version { return Ok(ret.expect("unreachable")); } else { @@ -675,9 +706,12 @@ impl PeerDB { } } } else { - let tx = db.tx_begin()?; - PeerDB::apply_schema_migrations(&tx)?; - tx.commit()?; + let peerdb_version = PeerDB::get_schema_version(&db.conn)?; + if peerdb_version != PEERDB_VERSION { + let tx = db.tx_begin()?; + PeerDB::apply_schema_migrations(&tx)?; + tx.commit()?; + } db.update_local_peer( network_id, @@ -748,7 +782,7 @@ impl PeerDB { } /// Open a peer database in memory (used for testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory( network_id: u32, parent_network_id: u32, @@ -903,8 +937,38 @@ impl PeerDB { Ok(ret) } + /// Group a list of peers by public key, and return the one with the highest last-contact time + fn query_peers( + conn: &Connection, + qry: &str, + args: &[&dyn ToSql], + ) -> Result, db_error> { + let peers: Vec = query_rows(conn, qry, args)?; + let mut grouped_by_public_key: HashMap = HashMap::new(); + for peer in peers.into_iter() { + if let Some(cur_peer) = grouped_by_public_key.get_mut(&peer.public_key) { + if cur_peer.last_contact_time < peer.last_contact_time { + *cur_peer = peer; + } + } else { + grouped_by_public_key.insert(peer.public_key.clone(), peer); + } + } + Ok(grouped_by_public_key.into_values().collect()) + } + + /// Query a single peer. + /// If multiple rows are returned, then only the first-found row is reported. + fn query_peer( + conn: &Connection, + qry: &str, + args: &[&dyn ToSql], + ) -> Result, db_error> { + let mut peers = Self::query_peers(conn, qry, args)?; + Ok(peers.pop()) + } + /// Get a peer from the DB. - /// Panics if the peer was inserted twice -- this shouldn't happen. pub fn get_peer( conn: &DBConn, network_id: u32, @@ -913,7 +977,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; let args = params![network_id, peer_addr.to_bin(), peer_port,]; - query_row::(conn, qry, args) + Self::query_peer(conn, qry, args) } pub fn has_peer( @@ -930,7 +994,7 @@ impl PeerDB { } /// Get peer by port (used in tests where the IP address doesn't really matter) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_peer_by_port( conn: &DBConn, network_id: u32, @@ -938,7 +1002,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; let args = params![network_id, peer_port]; - query_row::(conn, &qry, args) + Self::query_peer(conn, qry, args) } /// Get a peer record at a particular slot @@ -949,6 +1013,8 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; let args = params![network_id, slot]; + + // N.B. we don't use Self::query_peer() here because `slot` is the primary key query_row::(conn, &qry, args) } @@ -1012,15 +1078,24 @@ impl PeerDB { conn: &DBConn, network_id: u32, ) -> Result, db_error> { + let local_peer = Self::get_local_peer(conn)?; let sql = "SELECT * FROM frontier WHERE allowed < 0 AND network_id = ?1 ORDER BY RANDOM()"; - let allow_rows = query_rows::(conn, sql, &[&network_id])?; - Ok(allow_rows) + let allow_rows: Vec = Self::query_peers(conn, sql, params![&network_id])?; + Ok(allow_rows + .into_iter() + .filter(|neighbor| { + // omit local peer if it ever gets entered by mistake, since we can't talk to + // ourselves. + neighbor.public_key.to_bytes_compressed() + != StacksPublicKey::from_private(&local_peer.private_key).to_bytes_compressed() + }) + .collect()) } /// Get the bootstrap peers pub fn get_bootstrap_peers(conn: &DBConn, network_id: u32) -> Result, db_error> { let sql = "SELECT * FROM frontier WHERE initial = 1 AND network_id = ?1 ORDER BY RANDOM()"; - let allow_rows = query_rows::(conn, sql, &[&network_id])?; + let allow_rows = Self::query_peers(conn, sql, params![&network_id])?; Ok(allow_rows) } @@ -1070,10 +1145,11 @@ impl PeerDB { neighbor.out_degree, 0i64, slot, + !neighbor.addr.addrbytes.is_in_private_range() ]; - tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", neighbor_args) + tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot, public) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)", neighbor_args) .map_err(db_error::SqliteError)?; if let Some(old_peer) = old_peer_opt { @@ -1111,6 +1187,7 @@ impl PeerDB { } /// Is a peer one of this node's initial neighbors? + /// Only checks IP address. pub fn is_initial_peer( conn: &DBConn, network_id: u32, @@ -1119,7 +1196,7 @@ impl PeerDB { ) -> Result { let res: Option = query_row( conn, - "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", + "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3 ORDER BY last_contact_time DESC LIMIT 1", params![network_id, peer_addr.to_bin(), peer_port], )?; @@ -1129,7 +1206,8 @@ impl PeerDB { } } - /// Set a peer as an initial peer + /// Set a peer as an initial peer. + /// Does so for all rows with the given IP address. pub fn set_initial_peer( tx: &Transaction, network_id: u32, @@ -1258,13 +1336,14 @@ impl PeerDB { neighbor.denied, neighbor.in_degree, neighbor.out_degree, + !neighbor.addr.addrbytes.is_in_private_range(), neighbor.addr.network_id, to_bin(neighbor.addr.addrbytes.as_bytes()), neighbor.addr.port, ]; - tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10 \ - WHERE network_id = ?11 AND addrbytes = ?12 AND port = ?13", args) + tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10, public = ?11 \ + WHERE network_id = ?12 AND addrbytes = ?13 AND port = ?14", args) .map_err(db_error::SqliteError)?; if let Some(old_peer) = old_peer_opt { @@ -1586,7 +1665,8 @@ impl PeerDB { Ok(()) } - /// Get random neighbors, optionally always including allowed neighbors + /// Get random neighbors, optionally always including allowed neighbors. + /// Private IPs may be returned, if known. pub fn get_random_neighbors( conn: &DBConn, network_id: u32, @@ -1603,6 +1683,7 @@ impl PeerDB { count, block_height, always_include_allowed, + false, ) } @@ -1615,6 +1696,7 @@ impl PeerDB { count: u32, block_height: u64, always_include_allowed: bool, + public_only: bool, ) -> Result, db_error> { let mut ret = vec![]; @@ -1630,7 +1712,7 @@ impl PeerDB { u64_to_sql(now_secs)?, network_epoch, ]; - let mut allow_rows = query_rows::(conn, &allow_qry, allow_args)?; + let mut allow_rows = Self::query_peers(conn, &allow_qry, allow_args)?; if allow_rows.len() >= (count as usize) { // return a random subset @@ -1646,12 +1728,14 @@ impl PeerDB { } // fill in with non-allowed, randomly-chosen, fresh peers + let use_public = if public_only { "AND public = 1" } else { "" }; + let random_peers_qry = if always_include_allowed { - "SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ - (allowed >= 0 AND allowed <= ?5) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" + format!("SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ + (allowed >= 0 AND allowed <= ?5) AND (peer_version & 0x000000ff) >= ?6 {use_public} ORDER BY RANDOM() LIMIT ?7") } else { - "SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ - (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" + format!("SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ + (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 {use_public} ORDER BY RANDOM() LIMIT ?7") }; let random_peers_args = params![ @@ -1663,8 +1747,7 @@ impl PeerDB { network_epoch, (count - (ret.len() as u32)), ]; - let mut random_peers = - query_rows::(conn, &random_peers_qry, random_peers_args)?; + let mut random_peers = Self::query_peers(conn, &random_peers_qry, random_peers_args)?; ret.append(&mut random_peers); Ok(ret) @@ -1686,6 +1769,7 @@ impl PeerDB { /// Get a randomized set of peers for walking the peer graph. /// -- selects peers at random even if not allowed + /// -- may include private IPs #[cfg_attr(test, mutants::skip)] pub fn get_random_walk_neighbors( conn: &DBConn, @@ -1703,6 +1787,7 @@ impl PeerDB { count, block_height, false, + false, ) } @@ -1767,7 +1852,7 @@ impl PeerDB { pub fn get_all_peers(conn: &DBConn) -> Result, db_error> { let qry = "SELECT * FROM frontier ORDER BY addrbytes ASC, port ASC"; - let rows = query_rows::(conn, &qry, NO_PARAMS)?; + let rows = Self::query_peers(conn, &qry, NO_PARAMS)?; Ok(rows) } @@ -1784,7 +1869,7 @@ impl PeerDB { if max_count == 0 { return Ok(vec![]); } - let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; + let qry = "SELECT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); let args = params![ smart_contract.to_string(), @@ -1792,11 +1877,11 @@ impl PeerDB { u64_to_sql(min_age)?, max_count_u32, ]; - query_rows(conn, qry, args) + Self::query_peers(conn, qry, args) } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use clarity::vm::types::{StacksAddressExtensions, StandardPrincipalData}; use stacks_common::types::chainstate::StacksAddress; @@ -1806,6 +1891,21 @@ mod test { use super::*; use crate::net::{Neighbor, NeighborKey}; + impl PeerDB { + /// test the `public` flag + pub fn is_public( + conn: &DBConn, + network_id: u32, + peer_addr: &PeerAddress, + peer_port: u16, + ) -> Result { + let qry = "SELECT public FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; + let args = params![network_id, peer_addr.to_bin(), peer_port,]; + let public: bool = query_row(conn, qry, args)?.ok_or(db_error::NotFoundError)?; + Ok(public) + } + } + /// Test storage, retrieval, and mutation of LocalPeer, including its stacker DB contract IDs #[test] fn test_local_peer() { @@ -3543,4 +3643,223 @@ mod test { ) .unwrap(); } + + /// Test `public` setting in DB migration + #[test] + fn test_db_schema_3_public_ip_migration() { + let key = Secp256k1PrivateKey::new(); + + let path = "/tmp/test-peerdb-schema-3-public-ip-migration.db".to_string(); + if fs::metadata(&path).is_ok() { + fs::remove_file(&path).unwrap(); + } + let mut db = PeerDB::connect( + &path, + true, + 0x80000000, + 0, + Some(key.clone()), + i64::MAX as u64, + PeerAddress::from_ipv4(127, 0, 0, 1), + 12345, + UrlString::try_from("http://foo.com").unwrap(), + &vec![], + None, + &[], + ) + .unwrap(); + + let private_addrbytes = vec![ + PeerAddress::from_ipv4(127, 0, 0, 1), + PeerAddress::from_ipv4(192, 168, 0, 1), + PeerAddress::from_ipv4(172, 16, 0, 1), + PeerAddress::from_ipv4(10, 0, 0, 1), + PeerAddress([ + 0xfc, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, + 0x0d, 0x0e, + ]), + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, + ]), + ]; + + let public_addrbytes = vec![ + PeerAddress::from_ipv4(1, 2, 3, 4), + PeerAddress([ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0x00, + ]), + ]; + + let mut neighbor = Neighbor { + addr: NeighborKey { + peer_version: 0x12345678, + network_id: 0x9abcdef0, + addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + port: 12345, + }, + public_key: Secp256k1PublicKey::from_hex( + "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", + ) + .unwrap(), + expire_block: 23456, + last_contact_time: 1552509642, + allowed: -1, + denied: -1, + asn: 34567, + org: 45678, + in_degree: 1, + out_degree: 1, + }; + + // force public and see if it gets reverted + let tx = db.tx_begin().unwrap(); + + for private in private_addrbytes.iter() { + neighbor.addr.addrbytes = private.clone(); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + for public in public_addrbytes.iter() { + neighbor.addr.addrbytes = public.clone(); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + tx.execute("UPDATE frontier SET public = 1", params![]) + .unwrap(); + tx.commit().unwrap(); + + // make sure they're all listed as public (even if erroneously) + for private in private_addrbytes.iter() { + assert!(PeerDB::is_public( + &db.conn, + neighbor.addr.network_id, + private, + neighbor.addr.port + ) + .unwrap()); + } + for public in public_addrbytes.iter() { + assert!(PeerDB::is_public( + &db.conn, + neighbor.addr.network_id, + public, + neighbor.addr.port + ) + .unwrap()); + } + + let tx = db.tx_begin().unwrap(); + PeerDB::update_peerdb_public_addrs(&tx).unwrap(); + + // fixed + for private in private_addrbytes.iter() { + assert!( + !PeerDB::is_public(&tx, neighbor.addr.network_id, private, neighbor.addr.port) + .unwrap() + ); + } + for public in public_addrbytes.iter() { + assert!( + PeerDB::is_public(&tx, neighbor.addr.network_id, public, neighbor.addr.port) + .unwrap() + ); + } + + // now do the opposite + tx.execute("UPDATE frontier SET public = 0", params![]) + .unwrap(); + tx.commit().unwrap(); + + let tx = db.tx_begin().unwrap(); + PeerDB::update_peerdb_public_addrs(&tx).unwrap(); + + // fixed + for private in private_addrbytes.iter() { + assert!( + !PeerDB::is_public(&tx, neighbor.addr.network_id, private, neighbor.addr.port) + .unwrap() + ); + } + for public in public_addrbytes.iter() { + assert!( + PeerDB::is_public(&tx, neighbor.addr.network_id, public, neighbor.addr.port) + .unwrap() + ); + } + tx.commit().unwrap(); + } + + /// Verify that multiple peers with the same public key are coalesced by last-contact-time + #[test] + fn test_query_peers() { + let key = Secp256k1PrivateKey::new(); + + let path = "/tmp/test-query-peers.db".to_string(); + if fs::metadata(&path).is_ok() { + fs::remove_file(&path).unwrap(); + } + let mut db = PeerDB::connect( + &path, + true, + 0x80000000, + 0, + Some(key.clone()), + i64::MAX as u64, + PeerAddress::from_ipv4(127, 0, 0, 1), + 12345, + UrlString::try_from("http://foo.com").unwrap(), + &vec![], + None, + &[], + ) + .unwrap(); + + let mut neighbor = Neighbor { + addr: NeighborKey { + peer_version: 0x12345678, + network_id: 0x9abcdef0, + addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + port: 12345, + }, + public_key: Secp256k1PublicKey::from_hex( + "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", + ) + .unwrap(), + expire_block: 23456, + last_contact_time: 1552509642, + allowed: -1, + denied: -1, + asn: 34567, + org: 45678, + in_degree: 1, + out_degree: 1, + }; + + let tx = db.tx_begin().unwrap(); + for i in 0..10 { + neighbor.addr.port = (i + 1024) as u16; + neighbor.last_contact_time = (i + 1552509642) as u64; + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + tx.commit().unwrap(); + + // only one peer returned, and it's the one with the highest last-contact time + let mut peers = PeerDB::query_peers( + &db.conn, + "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", + params![ + &neighbor.addr.network_id, + &to_bin(neighbor.addr.addrbytes.as_bytes()), + &neighbor.addr.port + ], + ) + .unwrap(); + assert_eq!(peers.len(), 1); + + let peer = peers.pop().unwrap(); + assert_eq!(peer.addr.port, 1033); + assert_eq!(peer.last_contact_time, 1552509651); + } } From 43d1ba9044ceb804e7a1be9efdccb54838dd8fc5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:23:09 -0400 Subject: [PATCH 566/910] feat: log all p2p conversations every `log_neighbor_freq` milliseconds --- stackslib/src/net/p2p.rs | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f0693c10a0f..3796a6c5f2d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -348,6 +348,9 @@ pub struct PeerNetwork { pub walk_pingbacks: HashMap, // inbound peers for us to try to ping back and add to our frontier, mapped to (peer_version, network_id, timeout, pubkey) pub walk_result: NeighborWalkResult, // last successful neighbor walk result + /// last time we logged neigbhors + last_neighbor_log: u128, + /// Epoch 2.x inventory state pub inv_state: Option, /// Epoch 3.x inventory state @@ -537,6 +540,8 @@ impl PeerNetwork { walk_pingbacks: HashMap::new(), walk_result: NeighborWalkResult::new(), + last_neighbor_log: 0, + inv_state: None, inv_state_nakamoto: None, pox_id: PoxId::initial(), @@ -5017,6 +5022,33 @@ impl PeerNetwork { false } + /// Log our neighbors. + /// Used for testing and debuggin + fn log_neighbors(&mut self) { + if self.get_connection_opts().log_neighbors_freq == 0 { + return; + } + + let now = get_epoch_time_ms(); + if self.last_neighbor_log + u128::from(self.get_connection_opts().log_neighbors_freq) >= now + { + return; + } + + let convo_strs: Vec<_> = self + .peers + .values() + .map(|convo| format!("{:?}", &convo)) + .collect(); + + debug!( + "{:?}: current neighbors are {:?}", + self.get_local_peer(), + &convo_strs + ); + self.last_neighbor_log = now; + } + /// Top-level main-loop circuit to take. /// -- polls the peer network and http network server sockets to get new sockets and detect ready sockets /// -- carries out network conversations @@ -5130,12 +5162,13 @@ impl PeerNetwork { p2p_poll_state, ); + self.log_neighbors(); debug!("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< End Network Dispatch <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); Ok(network_result) } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::{thread, time}; From 32b57260649d1f5903322bff758f7727ce84f719 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:23:59 -0400 Subject: [PATCH 567/910] fix: when discovering a new neighbor, don't replace its inbound peer address with its outbound address if both addresses are private. Also, log more walk instantiation data --- stackslib/src/net/neighbors/walk.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index e1207941e06..d4f1cd089b2 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -359,8 +359,8 @@ impl NeighborWalk { // pick a random search index let mut idx = thread_rng().gen::() % event_ids.len(); - test_debug!( - "{:?}: try inbound neighbors -- sample out of {}. idx = {}", + debug!( + "{:?}: instantiate inbound walk: try inbound neighbors -- sample out of {}. idx = {}", network.get_local_peer(), network.get_num_p2p_convos(), idx @@ -410,6 +410,10 @@ impl NeighborWalk { } // no inbound peers + debug!( + "{:?}: no inbound peers to talk to", + network.get_local_peer() + ); return Err(net_error::NoSuchNeighbor); } @@ -426,13 +430,14 @@ impl NeighborWalk { network: &PeerNetwork, ) -> Result, net_error> { if network.get_walk_pingbacks().len() == 0 { + debug!("{:?}: no walk pingbacks", network.get_local_peer()); return Err(net_error::NoSuchNeighbor); } // random search let idx = thread_rng().gen::() % network.get_walk_pingbacks().len(); - test_debug!( + debug!( "{:?}: try pingback candidates -- sample out of {}. idx = {}", network.get_local_peer(), network.get_walk_pingbacks().len(), @@ -490,7 +495,7 @@ impl NeighborWalk { next_neighbor: Neighbor, next_neighbor_outbound: bool, ) -> NeighborWalkResult { - test_debug!( + debug!( "{:?}: Walk reset to {} neighbor {:?}", local_peer, if self.next_walk_outbound { @@ -686,9 +691,10 @@ impl NeighborWalk { // if the neighbor accidentally gave us a private IP address, then // just use the one we used to contact it. This can happen if the // node is behind a load-balancer, or is doing port-forwarding, - // etc. - if neighbor_from_handshake.addr.addrbytes.is_in_private_range() - || neighbor_from_handshake.addr.addrbytes.is_anynet() + // etc. But do nothing if both cur_neighbor and its reported address are private. + if (neighbor_from_handshake.addr.addrbytes.is_in_private_range() + || neighbor_from_handshake.addr.addrbytes.is_anynet()) + && !self.cur_neighbor.addr.addrbytes.is_in_private_range() { debug!( "{}: outbound neighbor gave private IP address {:?}; assuming it meant {:?}", From 88c9a5023150ddb578054b4ea654cd6061a9ccbb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:24:43 -0400 Subject: [PATCH 568/910] fix: don't forward stackerdb chunks that are known to be locally stale --- stackslib/src/net/relay.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 123f78f422c..4537ddff6f5 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2374,6 +2374,7 @@ impl Relayer { /// this far at all means that they were novel, and thus potentially novel to our neighbors). pub fn process_uploaded_stackerdb_chunks( &mut self, + rc_consensus_hash: &ConsensusHash, uploaded_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) { @@ -2381,12 +2382,25 @@ impl Relayer { let mut all_events: HashMap> = HashMap::new(); for chunk in uploaded_chunks.into_iter() { - debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); if let Some(events) = all_events.get_mut(&chunk.contract_id) { events.push(chunk.chunk_data.clone()); } else { all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data.clone()]); } + + // forward if not stale + if chunk.rc_consensus_hash != *rc_consensus_hash { + debug!("Drop stale uploaded StackerDB chunk"; + "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), + "slot_id" => chunk.chunk_data.slot_id, + "slot_version" => chunk.chunk_data.slot_version, + "chunk.rc_consensus_hash" => %chunk.rc_consensus_hash, + "network.rc_consensus_hash" => %rc_consensus_hash); + continue; + } + + debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); + let msg = StacksMessageType::StackerDBPushChunk(chunk); if let Err(e) = self.p2p.broadcast_message(vec![], msg) { warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); @@ -2918,6 +2932,7 @@ impl Relayer { // push events for HTTP-uploaded stacker DB chunks self.process_uploaded_stackerdb_chunks( + &network_result.rc_consensus_hash, mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), ); From f9b94dc21404aff61f0157315b15927863fb1f52 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:25:26 -0400 Subject: [PATCH 569/910] chore: log the neighbor address which sent the chunk --- stackslib/src/net/stackerdb/mod.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index b022746d6ac..e971d9ebfc7 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -111,7 +111,7 @@ /// state periodically (whereas Gaia stores data for as long as the back-end storage provider's SLA /// indicates). -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub mod config; @@ -386,6 +386,8 @@ pub enum StackerDBSyncState { pub struct StackerDBSync { /// what state are we in? state: StackerDBSyncState, + /// What was the rc consensus hash at the start of sync? + pub rc_consensus_hash: Option, /// which contract this is a replica for pub smart_contract_id: QualifiedContractIdentifier, /// number of chunks in this DB @@ -507,6 +509,7 @@ impl PeerNetwork { /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, + naddr: NeighborAddress, chainstate: &mut StacksChainState, contract_id: &QualifiedContractIdentifier, rc_consensus_hash: &ConsensusHash, @@ -537,10 +540,10 @@ impl PeerNetwork { &tip_block_id, &rc_consensus_hash, ) { - debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk from {} since {} != {} (remote is stale)", self.get_local_peer(), &naddr, &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); } else { - debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (local is potentially stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk from {} since {} != {} (local is potentially stale)", self.get_local_peer(), &naddr, &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::FutureView)); } } @@ -655,7 +658,19 @@ impl PeerNetwork { chunk_data: &StackerDBPushChunkData, send_reply: bool, ) -> Result<(bool, bool), net_error> { + let Some(naddr) = self + .get_p2p_convo(event_id) + .map(|convo| convo.to_neighbor_address()) + else { + debug!( + "Drop unsolicited StackerDBPushChunk: event ID {} is not connected", + event_id + ); + return Ok((false, false)); + }; + let mut payload = self.make_StackerDBChunksInv_or_Nack( + naddr, chainstate, &chunk_data.contract_id, &chunk_data.rc_consensus_hash, From 39265dd1ee634b9764bc89760a9da9f516d5e2b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:26:01 -0400 Subject: [PATCH 570/910] fix: check local rc_consensus_hash against rc_consensus_hash of a scheduled message we're about to send, and abort stackerdb sync if they differ (indicates that the p2p network advanced its stackerdb state, and this sync is acting on stale data). Also, log the local peer _and_ contract ID in debug messages --- stackslib/src/net/stackerdb/sync.rs | 221 ++++++++++++++++++++-------- 1 file changed, 158 insertions(+), 63 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index fa94c5be557..467bc608e15 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -50,6 +50,7 @@ impl StackerDBSync { ) -> StackerDBSync { let mut dbsync = StackerDBSync { state: StackerDBSyncState::ConnectBegin, + rc_consensus_hash: None, smart_contract_id: smart_contract, num_slots: config.num_slots() as usize, write_freq: config.write_freq, @@ -90,6 +91,9 @@ impl StackerDBSync { let mut found = HashSet::new(); let mut min_age = get_epoch_time_secs().saturating_sub(network.get_connection_opts().max_neighbor_age); + + let local_naddr = network.get_local_peer().to_neighbor_addr(); + while found.len() < self.max_neighbors { let peers_iter = PeerDB::find_stacker_db_replicas( network.peerdb_conn(), @@ -109,6 +113,10 @@ impl StackerDBSync { if naddr.addrbytes.is_anynet() { return false; } + if naddr.public_key_hash == local_naddr.public_key_hash { + // don't talk to us by another address + return false; + } if !network.get_connection_opts().private_neighbors && naddr.addrbytes.is_in_private_range() { @@ -169,7 +177,10 @@ impl StackerDBSync { network: Option<&PeerNetwork>, config: &StackerDBConfig, ) -> StackerDBSyncResult { - debug!("Reset {} with config {:?}", &self.smart_contract_id, config); + debug!( + "{}: Reset with config {:?}", + &self.smart_contract_id, config + ); let mut chunks = vec![]; let downloaded_chunks = mem::replace(&mut self.downloaded_chunks, HashMap::new()); for (_, mut data) in downloaded_chunks.into_iter() { @@ -220,6 +231,7 @@ impl StackerDBSync { self.num_connections = 0; self.num_attempted_connections = 0; self.rounds += 1; + self.rc_consensus_hash = None; result } @@ -258,7 +270,7 @@ impl StackerDBSync { .get_slot_write_timestamps(&self.smart_contract_id)?; if local_slot_versions.len() != local_write_timestamps.len() { - let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}) for {}; abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len(), &self.smart_contract_id); + let msg = format!("{}: Local slot versions ({}) out of sync with DB slot versions ({}); abandoning sync and trying again", &self.smart_contract_id, local_slot_versions.len(), local_write_timestamps.len()); warn!("{}", &msg); return Err(net_error::Transient(msg)); } @@ -270,10 +282,11 @@ impl StackerDBSync { // who has data we need? for (i, local_version) in local_slot_versions.iter().enumerate() { let write_ts = local_write_timestamps[i]; - if write_ts + self.write_freq > now { + if self.write_freq > 0 && write_ts + self.write_freq > now { debug!( - "{:?}: Chunk {} was written too frequently ({} + {} >= {}) in {}, so will not fetch chunk", + "{:?}: {}: Chunk {} was written too frequently ({} + {} > {}) in {}, so will not fetch chunk", network.get_local_peer(), + &self.smart_contract_id, i, write_ts, self.write_freq, @@ -346,10 +359,10 @@ impl StackerDBSync { schedule.reverse(); debug!( - "{:?}: Will request up to {} chunks for {}. Schedule: {:?}", + "{:?}: {}: Will request up to {} chunks. Schedule: {:?}", network.get_local_peer(), - &schedule.len(), &self.smart_contract_id, + &schedule.len(), &schedule ); Ok(schedule) @@ -415,7 +428,7 @@ impl StackerDBSync { }; debug!( - "{:?}: Can push chunk StackerDBChunk(db={},id={},ver={}) to {}. Replicate? {}", + "{:?}: {}: Can push chunk StackerDBChunk(id={},ver={}) to {}. Replicate? {}", &network.get_local_peer(), &self.smart_contract_id, our_chunk.chunk_data.slot_id, @@ -448,10 +461,10 @@ impl StackerDBSync { schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len())); debug!( - "{:?}: Will push up to {} chunks for {}", + "{:?}: {}: Will push up to {} chunks", network.get_local_peer(), - &schedule.len(), &self.smart_contract_id, + &schedule.len(), ); Ok(schedule) } @@ -524,13 +537,13 @@ impl StackerDBSync { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. debug!( - "{:?}: peer {:?} has a newer version of slot {} ({} < {}) in {}", + "{:?}: {}: peer {:?} has a newer version of slot {} ({} < {})", _network.get_local_peer(), + &self.smart_contract_id, &naddr, old_slot_id, old_version, new_inv.slot_versions[old_slot_id], - &self.smart_contract_id, ); resync = true; break; @@ -614,11 +627,12 @@ impl StackerDBSync { } for (naddr, chunks_req) in to_send.into_iter() { - debug!("{:?}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv to inbound {:?}", network.get_local_peer(), &naddr); + debug!("{:?}: {}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv at {} to inbound {:?}", network.get_local_peer(), &self.smart_contract_id, &network.get_chain_view().rc_consensus_hash, &naddr); if let Err(_e) = self.comms.neighbor_send(network, &naddr, chunks_req) { info!( - "{:?}: Failed to send StackerDBGetChunkInv to inbound {:?}: {:?}", + "{:?}: {}: Failed to send StackerDBGetChunkInv to inbound {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &_e ); @@ -639,10 +653,12 @@ impl StackerDBSync { self.replicas = replicas; } debug!( - "{:?}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", + "{:?}: {}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", network.get_local_peer(), + &self.smart_contract_id, self.replicas.len(), - network.get_num_p2p_convos() + network.get_num_p2p_convos(); + "replicas" => ?self.replicas ); if self.replicas.len() == 0 { // nothing to do @@ -653,8 +669,9 @@ impl StackerDBSync { for naddr in naddrs.into_iter() { if self.comms.is_neighbor_connecting(network, &naddr) { debug!( - "{:?}: connect_begin: already connecting to StackerDB peer {:?}", + "{:?}: {}: connect_begin: already connecting to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.replicas.insert(naddr); @@ -662,8 +679,9 @@ impl StackerDBSync { } if self.comms.has_neighbor_session(network, &naddr) { debug!( - "{:?}: connect_begin: already connected to StackerDB peer {:?}", + "{:?}: {}: connect_begin: already connected to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.connected_replicas.insert(naddr); @@ -671,16 +689,18 @@ impl StackerDBSync { } debug!( - "{:?}: connect_begin: Send Handshake to StackerDB peer {:?}", + "{:?}: {}: connect_begin: Send Handshake to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); match self.comms.neighbor_session_begin(network, &naddr) { Ok(true) => { // connected! debug!( - "{:?}: connect_begin: connected to StackerDB peer {:?}", + "{:?}: {}: connect_begin: connected to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.num_attempted_connections += 1; @@ -692,7 +712,13 @@ impl StackerDBSync { self.num_attempted_connections += 1; } Err(_e) => { - debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + debug!( + "{:?}: {}: Failed to begin session with {:?}: {:?}", + &network.get_local_peer(), + &self.smart_contract_id, + &naddr, + &_e + ); } } } @@ -710,8 +736,9 @@ impl StackerDBSync { if network.get_chain_view().rc_consensus_hash != db_data.rc_consensus_hash { // stale or inconsistent view. Do not proceed debug!( - "{:?}: remote peer {:?} has stale view ({} != {})", + "{:?}: {}: remote peer {:?} has stale view ({} != {})", network.get_local_peer(), + &self.smart_contract_id, &naddr, &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash @@ -723,8 +750,9 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBHandshake with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBHandshake with code {}", &network.get_local_peer(), + &self.smart_contract_id, &naddr, data.error_code ); @@ -737,7 +765,12 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + &network.get_local_peer(), + &self.smart_contract_id, + &x + ); continue; } }; @@ -749,7 +782,7 @@ impl StackerDBSync { .is_none() { debug!( - "{:?}: remote peer does not replicate {}", + "{:?}: {}: remote peer does not replicate", network.get_local_peer(), &self.smart_contract_id ); @@ -760,8 +793,9 @@ impl StackerDBSync { } debug!( - "{:?}: connect_try_finish: Received StackerDBHandshakeAccept from {:?} for {:?}", + "{:?}: {}: connect_try_finish: Received StackerDBHandshakeAccept from {:?} for {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &data ); @@ -778,7 +812,8 @@ impl StackerDBSync { if self.connected_replicas.len() == 0 { // no one to talk to debug!( - "{:?}: connect_try_finish: no valid replicas", + "{:?}: {}: connect_try_finish: no valid replicas", + &self.smart_contract_id, network.get_local_peer() ); return Err(net_error::PeerNotConnected); @@ -796,21 +831,26 @@ impl StackerDBSync { let naddrs = mem::replace(&mut self.connected_replicas, HashSet::new()); let mut already_sent = vec![]; debug!( - "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {} replicas", + "{:?}: {}: getchunksinv_begin: Send StackerDBGetChunksInv to {} replicas", network.get_local_peer(), - naddrs.len() + &self.smart_contract_id, + naddrs.len(); + "connected_replicas" => ?naddrs, ); for naddr in naddrs.into_iter() { debug!( - "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {:?}", + "{:?}: {}: getchunksinv_begin: Send StackerDBGetChunksInv at {} to {:?}", network.get_local_peer(), - &naddr + &self.smart_contract_id, + &network.get_chain_view().rc_consensus_hash, + &naddr, ); let chunks_req = self.make_getchunkinv(&network.get_chain_view().rc_consensus_hash); if let Err(e) = self.comms.neighbor_send(network, &naddr, chunks_req) { debug!( - "{:?}: failed to send StackerDBGetChunkInv to {:?}: {:?}", + "{:?}: {}: failed to send StackerDBGetChunkInv to {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &e ); @@ -833,7 +873,7 @@ impl StackerDBSync { let chunk_inv_opt = match message.payload { StacksMessageType::StackerDBChunkInv(data) => { if data.slot_versions.len() != self.num_slots { - info!("{:?}: Received malformed StackerDBChunkInv for {} from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len()); + info!("{:?}: {}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len()); None } else { Some(data) @@ -841,10 +881,10 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us (on {}) with code {}", - &network.get_local_peer(), - &naddr, + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBGetChunksInv with code {}", + network.get_local_peer(), &self.smart_contract_id, + &naddr, data.error_code ); self.connected_replicas.remove(&naddr); @@ -856,14 +896,20 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); self.connected_replicas.remove(&naddr); continue; } }; debug!( - "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", + "{:?}: {}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &chunk_inv_opt ); @@ -893,15 +939,22 @@ impl StackerDBSync { pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.chunk_fetch_priorities.len() == 0 { // done + debug!( + "{:?}: {}: getchunks_begin: no chunks prioritized", + network.get_local_peer(), + &self.smart_contract_id + ); return Ok(true); } let mut cur_priority = self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len(); debug!( - "{:?}: getchunks_begin: Issue up to {} StackerDBGetChunk requests", + "{:?}: {}: getchunks_begin: Issue up to {} StackerDBGetChunk requests", &network.get_local_peer(), - self.request_capacity + &self.smart_contract_id, + self.request_capacity; + "chunk_fetch_priorities" => ?self.chunk_fetch_priorities, ); let mut requested = 0; @@ -926,11 +979,12 @@ impl StackerDBSync { }; debug!( - "{:?}: getchunks_begin: Send StackerDBGetChunk(db={},id={},ver={}) to {}", + "{:?}: {}: getchunks_begin: Send StackerDBGetChunk(id={},ver={}) at {} to {}", &network.get_local_peer(), &self.smart_contract_id, chunk_request.slot_id, chunk_request.slot_version, + &chunk_request.rc_consensus_hash, &selected_neighbor ); @@ -940,10 +994,10 @@ impl StackerDBSync { StacksMessageType::StackerDBGetChunk(chunk_request.clone()), ) { info!( - "{:?}: Failed to request chunk {} of {} from {:?}: {:?}", + "{:?}: {} Failed to request chunk {} from {:?}: {:?}", network.get_local_peer(), - chunk_request.slot_id, &self.smart_contract_id, + chunk_request.slot_id, &selected_neighbor, &e ); @@ -981,10 +1035,10 @@ impl StackerDBSync { StacksMessageType::StackerDBChunk(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk (on {}) with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBGetChunk with code {}", network.get_local_peer(), - &naddr, &self.smart_contract_id, + &naddr, data.error_code ); if data.error_code == NackErrorCodes::StaleView @@ -998,7 +1052,12 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); self.connected_replicas.remove(&naddr); continue; } @@ -1007,8 +1066,11 @@ impl StackerDBSync { // validate if !self.validate_downloaded_chunk(network, config, &data)? { info!( - "Remote neighbor {:?} served an invalid chunk for ID {}", - &naddr, data.slot_id + "{:?}: {}: Remote neighbor {:?} served an invalid chunk for ID {}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + data.slot_id ); self.connected_replicas.remove(&naddr); continue; @@ -1016,8 +1078,9 @@ impl StackerDBSync { // update bookkeeping debug!( - "{:?}: getchunks_try_finish: Received StackerDBChunk from {:?}", + "{:?}: {}, getchunks_try_finish: Received StackerDBChunk from {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.add_downloaded_chunk(naddr, data); @@ -1038,15 +1101,22 @@ impl StackerDBSync { } if self.chunk_push_priorities.len() == 0 { // done + debug!( + "{:?}:{}: pushchunks_begin: no chunks prioritized", + network.get_local_peer(), + &self.smart_contract_id + ); return Ok(true); } let mut cur_priority = self.next_chunk_push_priority % self.chunk_push_priorities.len(); debug!( - "{:?}: pushchunks_begin: Send up to {} StackerDBChunk pushes", + "{:?}: {}: pushchunks_begin: Send up to {} StackerDBChunk pushes", &network.get_local_peer(), - self.chunk_push_priorities.len() + &self.smart_contract_id, + self.chunk_push_priorities.len(); + "chunk_push_priorities" => ?self.chunk_push_priorities ); // fill up our comms with $capacity requests @@ -1063,7 +1133,7 @@ impl StackerDBSync { .map(|neighbor| (0, neighbor)); let Some((idx, selected_neighbor)) = selected_neighbor_opt else { - debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", + debug!("{:?}: {}: pushchunks_begin: no available neighbor to send StackerDBChunk(id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, chunk_push.chunk_data.slot_id, @@ -1073,11 +1143,12 @@ impl StackerDBSync { }; debug!( - "{:?}: pushchunks_begin: Send StackerDBChunk(db={},id={},ver={}) to {}", + "{:?}: {}: pushchunks_begin: Send StackerDBChunk(id={},ver={}) at {} to {}", &network.get_local_peer(), &self.smart_contract_id, chunk_push.chunk_data.slot_id, chunk_push.chunk_data.slot_version, + &chunk_push.rc_consensus_hash, &selected_neighbor ); @@ -1089,10 +1160,10 @@ impl StackerDBSync { StacksMessageType::StackerDBPushChunk(chunk_push), ) { info!( - "{:?}: Failed to send chunk {} of {} from {:?}: {:?}", + "{:?}: {}: Failed to send chunk {} from {:?}: {:?}", network.get_local_peer(), - slot_id, &self.smart_contract_id, + slot_id, &selected_neighbor, &e ); @@ -1130,8 +1201,9 @@ impl StackerDBSync { StacksMessageType::StackerDBChunkInv(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBChunk with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBChunk with code {}", network.get_local_peer(), + &self.smart_contract_id, &naddr, data.error_code ); @@ -1143,21 +1215,27 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); continue; } }; // must be well-formed if new_chunk_inv.slot_versions.len() != self.num_slots { - info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, new_chunk_inv.slot_versions.len()); + info!("{:?}: {}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, new_chunk_inv.slot_versions.len()); continue; } // update bookkeeping debug!( - "{:?}: pushchunks_try_finish: Received StackerDBChunkInv from {:?}", + "{:?}: {}: pushchunks_try_finish: Received StackerDBChunkInv from {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); @@ -1169,7 +1247,7 @@ impl StackerDBSync { let inflight = self.comms.count_inflight(); debug!( - "{:?}: inflight messages for {}: {:?}", + "{:?}: {}: inflight messages: {:?}", network.get_local_peer(), &self.smart_contract_id, inflight @@ -1220,10 +1298,26 @@ impl StackerDBSync { return Ok(None); } + // make sure we have an up-to-date chain view. + // If not, then abort and immediately retry the sync (since any queued messages we have are + // likely gonna fail) + if let Some(rc_consensus_hash) = self.rc_consensus_hash.as_ref() { + if network.get_chain_view().rc_consensus_hash != *rc_consensus_hash { + debug!("{:?}: {}: Resetting and restarting running StackerDB sync due to chain view change", network.get_local_peer(), &self.smart_contract_id); + let result = self.reset(Some(network), config); + self.state = StackerDBSyncState::ConnectBegin; + self.rc_consensus_hash = Some(network.get_chain_view().rc_consensus_hash.clone()); + self.wakeup(); + return Ok(Some(result)); + } + } else { + self.rc_consensus_hash = Some(network.get_chain_view().rc_consensus_hash.clone()); + } + // throttle to write_freq if self.last_run_ts + config.write_freq.max(1) > get_epoch_time_secs() { debug!( - "{:?}: stacker DB sync for {} is throttled until {}", + "{:?}: {}: stacker DB sync is throttled until {}", network.get_local_peer(), &self.smart_contract_id, self.last_run_ts + config.write_freq @@ -1233,11 +1327,12 @@ impl StackerDBSync { loop { debug!( - "{:?}: stacker DB sync state for {} is {:?}", + "{:?}: {}: stacker DB sync state is {:?}", network.get_local_peer(), &self.smart_contract_id, &self.state ); + let mut blocked = true; match self.state { StackerDBSyncState::ConnectBegin => { @@ -1297,7 +1392,7 @@ impl StackerDBSync { // someone pushed newer chunk data to us, and getting chunks is // enabled, so immediately go request them debug!( - "{:?}: immediately retry StackerDB GetChunks on {} due to PushChunk NACK", + "{:?}: {}: immediately retry StackerDB GetChunks due to PushChunk NACK", network.get_local_peer(), &self.smart_contract_id ); @@ -1319,7 +1414,7 @@ impl StackerDBSync { if stale_inv { debug!( - "{:?}: immediately retry StackerDB sync on {} due to stale inventory", + "{:?}: {}: immediately retry StackerDB sync due to stale inventory", network.get_local_peer(), &self.smart_contract_id ); From b389d5e59755e200239103c74230776f9ef5b87f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:27:21 -0400 Subject: [PATCH 571/910] fix/refactor: make it so the small-scale neighbor tests will bind to a kernel-chosen port (avoids clobbering), and move topology tests to integration test CI --- stackslib/src/net/tests/neighbors.rs | 1344 ++------------------------ 1 file changed, 102 insertions(+), 1242 deletions(-) diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1937cb89bb..03b1224312d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -35,19 +35,16 @@ use crate::util_lib::test::*; const TEST_IN_OUT_DEGREES: u64 = 0x1; #[test] -#[ignore] fn test_step_walk_1_neighbor_plain() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31890); - let peer_2_config = TestPeerConfig::from_port(31892); - - // peer 1 crawls peer 2, but not vice versa - // (so only peer 1 will learn its public IP) - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -142,7 +139,7 @@ fn test_step_walk_1_neighbor_plain() { PeerAddress::from_socketaddr( &format!("127.0.0.1:1").parse::().unwrap() ), - 31890 + peer_1.config.server_port, ) ); assert!(peer_1.network.public_ip_learned); @@ -156,21 +153,20 @@ fn test_step_walk_1_neighbor_plain() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_plain_no_natpunch() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31980); - let mut peer_2_config = TestPeerConfig::from_port(31982); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); // simulate peer 2 not knowing how to handle a natpunch request peer_2_config.connection_opts.disable_natpunch = true; - // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -265,14 +261,10 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_denied() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31994); - let mut peer_2_config = TestPeerConfig::from_port(31996); - - // peer 1 crawls peer 2, but peer 1 has denied peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.walk_retry_count = 10; peer_2_config.connection_opts.walk_retry_count = 10; @@ -282,6 +274,8 @@ fn test_step_walk_1_neighbor_denied() { let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, but peer 1 has denied peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); { let mut tx = peer_1.network.peerdb.tx_begin().unwrap(); PeerDB::add_deny_cidr(&mut tx, &PeerAddress::from_ipv4(127, 0, 0, 1), 128).unwrap(); @@ -344,11 +338,10 @@ fn test_step_walk_1_neighbor_denied() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_bad_epoch() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31998); - let mut peer_2_config = TestPeerConfig::from_port(31990); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.walk_retry_count = 10; peer_2_config.connection_opts.walk_retry_count = 10; @@ -375,14 +368,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { network_epoch: PEER_VERSION_EPOCH_2_05, }]); - // peers know about each other, but peer 2 never talks to peer 1 since it believes that - // it's in a wholly different epoch - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peers know about each other, but peer 2 never talks to peer 1 since it believes that + // it's in a wholly different epoch + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -441,21 +434,20 @@ fn test_step_walk_1_neighbor_bad_epoch() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_heartbeat_ping() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32992); - let mut peer_2_config = TestPeerConfig::from_port(32994); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.heartbeat = 10; peer_2_config.connection_opts.heartbeat = 10; - // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -547,19 +539,18 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_bootstrapping() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32100); - let peer_2_config = TestPeerConfig::from_port(32102); - - // peer 1 crawls peer 2, but peer 1 doesn't add peer 2 to its frontier becuase peer 2 is - // too far behind. - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, but peer 1 doesn't add peer 2 to its frontier becuase peer 2 is + // too far behind. + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + // advance peer 1 for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { peer_1.add_empty_burnchain_block(); @@ -623,22 +614,21 @@ fn test_step_walk_1_neighbor_bootstrapping() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_behind() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32200); - let mut peer_2_config = TestPeerConfig::from_port(32202); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_natpunch = true; peer_2_config.connection_opts.disable_natpunch = true; - // peer 1 crawls peer 2, and peer 1 adds peer 2 to its frontier even though peer 2 does - // not, because peer 2 is too far ahead - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 1 adds peer 2 to its frontier even though peer 2 does + // not, because peer 2 is too far ahead + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + // advance peer 2 for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { peer_2.add_empty_burnchain_block(); @@ -743,14 +733,13 @@ fn test_step_walk_1_neighbor_behind() { } #[test] -#[ignore] fn test_step_walk_10_neighbors_of_neighbor_plain() { with_timeout(600, || { // peer 1 has peer 2 as its neighbor. // peer 2 has 10 other neighbors. // Goal: peer 1 learns about the 10 other neighbors. - let mut peer_1_config = TestPeerConfig::from_port(32300); - let mut peer_2_config = TestPeerConfig::from_port(32302); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_inv_sync = true; peer_1_config.connection_opts.disable_block_download = true; @@ -758,25 +747,25 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_2_neighbors = vec![]; for i in 0..10 { - let mut n = TestPeerConfig::from_port(2 * i + 4 + 32300); + let mut n = TestPeerConfig::new(function_name!(), 0, 0); // turn off features we don't use n.connection_opts.disable_inv_sync = true; n.connection_opts.disable_block_download = true; - peer_2_config.add_neighbor(&n.to_neighbor()); - let p = TestPeer::new(n); + + peer_2.add_neighbor(&mut p.to_neighbor(), None, false); peer_2_neighbors.push(p); } // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); // next, make peer 1 discover peer 2's neighbors and peer 2's in/out degree. // Do two full walks @@ -840,6 +829,7 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { Some(p) => { assert_eq!(p.public_key, n.public_key); assert_eq!(p.expire_block, n.expire_block); + test_debug!("confirmed handshake with peer {:?}", &n.addr); num_handshakes += 1; } } @@ -863,6 +853,7 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { test_debug!("no peer 2"); } Some(p2) => { + test_debug!("p2 degrees = {}/{}", p2.in_degree, p2.out_degree); if p2.out_degree >= 11 && p2.in_degree >= 1 { assert_eq!(p2.out_degree, 11); did_connect = true; @@ -888,14 +879,13 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { } #[test] -#[ignore] fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { with_timeout(600, || { // peer 1 has peer 2 as its neighbor. // peer 2 has 10 other neighbors, 5 of which are too far behind peer 1. // Goal: peer 1 learns about the 5 fresher neighbors. - let mut peer_1_config = TestPeerConfig::from_port(32400); - let mut peer_2_config = TestPeerConfig::from_port(32402); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_inv_sync = true; peer_1_config.connection_opts.disable_block_download = true; @@ -903,25 +893,24 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_2_neighbors = vec![]; for i in 0..10 { - let mut n = TestPeerConfig::from_port(2 * i + 4 + 32400); + let mut n = TestPeerConfig::new(function_name!(), 0, 0); // turn off features we don't use n.connection_opts.disable_inv_sync = true; n.connection_opts.disable_block_download = true; - peer_2_config.add_neighbor(&n.to_neighbor()); - let p = TestPeer::new(n); + peer_2.add_neighbor(&mut p.to_neighbor(), None, true); peer_2_neighbors.push(p); } // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); // advance peer 1 and peer 2, and some of peer 2's neighbors for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { @@ -1069,8 +1058,8 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { #[test] fn test_step_walk_2_neighbors_plain() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32500); - let mut peer_2_config = TestPeerConfig::from_port(32502); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1079,13 +1068,13 @@ fn test_step_walk_2_neighbors_plain() { peer_1_config.connection_opts.walk_max_duration = 10; peer_2_config.connection_opts.walk_max_duration = 10; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1201,8 +1190,8 @@ fn test_step_walk_2_neighbors_plain() { #[test] fn test_step_walk_2_neighbors_state_timeout() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32504); - let mut peer_2_config = TestPeerConfig::from_port(32506); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1214,13 +1203,13 @@ fn test_step_walk_2_neighbors_state_timeout() { peer_1_config.connection_opts.walk_state_timeout = 1; peer_2_config.connection_opts.walk_state_timeout = 1; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + for _i in 0..10 { let _ = peer_1.step(); let _ = peer_2.step(); @@ -1246,8 +1235,8 @@ fn test_step_walk_2_neighbors_state_timeout() { #[test] fn test_step_walk_2_neighbors_walk_timeout() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32508); - let mut peer_2_config = TestPeerConfig::from_port(32510); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1262,13 +1251,13 @@ fn test_step_walk_2_neighbors_walk_timeout() { peer_1_config.connection_opts.walk_reset_interval = 10; peer_2_config.connection_opts.walk_reset_interval = 10; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_step_count = 0; let mut walk_2_step_count = 0; @@ -1317,12 +1306,11 @@ fn test_step_walk_2_neighbors_walk_timeout() { } #[test] -#[ignore] fn test_step_walk_3_neighbors_inbound() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32520); - let mut peer_2_config = TestPeerConfig::from_port(32522); - let mut peer_3_config = TestPeerConfig::from_port(32524); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_3_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1340,16 +1328,20 @@ fn test_step_walk_3_neighbors_inbound() { peer_2_config.connection_opts.disable_block_download = true; peer_3_config.connection_opts.disable_block_download = true; - // Peer 2 and peer 3 are public nodes that don't know about each other, but peer 1 lists - // both of them as outbound neighbors. Goal is for peer 2 to learn about peer 3, and vice - // versa, by crawling peer 1 through an inbound neighbor walk. - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_1_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_1_config.connection_opts.log_neighbors_freq = 1; + peer_2_config.connection_opts.log_neighbors_freq = 1; + peer_3_config.connection_opts.log_neighbors_freq = 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let mut peer_3 = TestPeer::new(peer_3_config); + // Peer 2 and peer 3 are public nodes that don't know about each other, but peer 1 lists + // both of them as outbound neighbors. Goal is for peer 2 to learn about peer 3, and vice + // versa, by crawling peer 1 through an inbound neighbor walk. + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_1.add_neighbor(&mut peer_3.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1512,11 +1504,10 @@ fn test_step_walk_3_neighbors_inbound() { } #[test] -#[ignore] fn test_step_walk_2_neighbors_rekey() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32600); - let mut peer_2_config = TestPeerConfig::from_port(32602); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1537,13 +1528,13 @@ fn test_step_walk_2_neighbors_rekey() { peer_1_config.connection_opts.private_key_lifetime = 5; peer_2_config.connection_opts.private_key_lifetime = 5; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let initial_public_key_1 = peer_1.get_public_key(); let initial_public_key_2 = peer_2.get_public_key(); @@ -1630,30 +1621,19 @@ fn test_step_walk_2_neighbors_rekey() { #[test] fn test_step_walk_2_neighbors_different_networks() { with_timeout(600, || { - // peer 1 and 2 try to handshake but never succeed since they have different network IDs - let mut peer_1_config = TestPeerConfig::from_port(32700); - let mut peer_2_config = TestPeerConfig::from_port(32702); - - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - // peer 2 thinks peer 1 has the same network ID that it does - println!("1 ~~~ {}", peer_1_config.network_id); - println!("2 ~~~ {}", peer_2_config.network_id); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.network_id = peer_1_config.network_id + 1; - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - peer_1_config.network_id = peer_1_config.network_id - 1; - - // different network IDs - peer_2_config.network_id = peer_1_config.network_id + 1; - - println!("3 ~~~ {}", peer_1_config.network_id); - println!("4 ~~~ {}", peer_2_config.network_id); + peer_1_config.network_id = peer_2_config.network_id + 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); - println!("5 ~~~"); + + let mut peer_1_neighbor = peer_1.to_neighbor(); + peer_1_neighbor.addr.network_id = peer_2.config.network_id; + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1_neighbor, None, true); let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1703,1125 +1683,5 @@ fn test_step_walk_2_neighbors_different_networks() { .network .get_neighbor_stats(&peer_1.to_neighbor().addr); assert!(stats_2.is_none()); - - let neighbor_1 = peer_1.to_neighbor(); - let neighbor_2 = peer_2.to_neighbor(); - - // peer 1 was NOT added to the peer DB of peer 2 - assert!(PeerDB::get_peer( - peer_1.network.peerdb.conn(), - neighbor_2.addr.network_id, - &neighbor_2.addr.addrbytes, - neighbor_2.addr.port - ) - .unwrap() - .is_none()); - - // peer 2 was NOT added to the peer DB of peer 1 - assert!(PeerDB::get_peer( - peer_2.network.peerdb.conn(), - neighbor_1.addr.network_id, - &neighbor_1.addr.addrbytes, - neighbor_1.addr.port - ) - .unwrap() - .is_none()); - }) -} - -fn stacker_db_id(i: usize) -> QualifiedContractIdentifier { - QualifiedContractIdentifier::new( - StandardPrincipalData(0x01, [i as u8; 20]), - format!("db-{}", i).as_str().into(), - ) -} - -fn make_stacker_db_ids(i: usize) -> Vec { - let mut dbs = vec![]; - for j in 0..i { - dbs.push(stacker_db_id(j)); - } - dbs -} - -fn setup_peer_config( - i: usize, - port_base: u16, - neighbor_count: usize, - peer_count: usize, -) -> TestPeerConfig { - let mut conf = TestPeerConfig::from_port(port_base + (2 * i as u16)); - conf.connection_opts.num_neighbors = neighbor_count as u64; - conf.connection_opts.soft_num_neighbors = neighbor_count as u64; - - conf.connection_opts.num_clients = 256; - conf.connection_opts.soft_num_clients = 128; - - conf.connection_opts.max_http_clients = 1000; - conf.connection_opts.max_neighbors_of_neighbor = neighbor_count as u64; - - conf.connection_opts.max_clients_per_host = MAX_NEIGHBORS_DATA_LEN as u64; - conf.connection_opts.soft_max_clients_per_host = peer_count as u64; - - conf.connection_opts.max_neighbors_per_host = MAX_NEIGHBORS_DATA_LEN as u64; - conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count / 2) as u64; - conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count / 2) as u64; - - conf.connection_opts.walk_interval = 0; - - conf.connection_opts.disable_inv_sync = true; - conf.connection_opts.disable_block_download = true; - - let j = i as u32; - conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer - - // even-number peers support stacker DBs. - // odd-number peers do not - if i % 2 == 0 { - conf.services = (ServiceFlags::RELAY as u16) - | (ServiceFlags::RPC as u16) - | (ServiceFlags::STACKERDB as u16); - conf.stacker_dbs = make_stacker_db_ids(i); - } else { - conf.services = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); - conf.stacker_dbs = vec![]; - } - - conf -} - -#[test] -#[ignore] -fn test_walk_ring_allow_15() { - with_timeout(600, || { - // all initial peers are allowed - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32800, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; // always allowed - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_plain() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32900, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_pingback() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32950, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = true; - conf.connection_opts.disable_inbound_walks = false; - - peer_configs.push(conf); - } - - test_walk_ring_pingback(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33000 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33000", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33000, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - - // all peers see peer ::33000 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33000 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -fn test_walk_ring_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - test_pingback: bool, -) -> Vec { - // arrange neighbors into a "ring" topology, where - // neighbor N is connected to neighbor (N-1)%NUM_NEIGHBORS and (N+1)%NUM_NEIGHBORS. - // If test_pingback is true, then neighbor N is only connected to (N+1)%NUM_NEIGHBORS - let mut peers = vec![]; - - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 0..PEER_COUNT { - let n = (i + 1) % PEER_COUNT; - let neighbor = peer_configs[n].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - - if !test_pingback { - for i in 1..PEER_COUNT + 1 { - let p = i - 1; - let neighbor = peer_configs[p].to_neighbor(); - peer_configs[i % PEER_COUNT].add_neighbor(&neighbor); - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -fn test_walk_ring(peer_configs: &mut Vec, neighbor_count: usize) -> Vec { - test_walk_ring_ex(peer_configs, neighbor_count, false) -} - -fn test_walk_ring_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - test_walk_ring_ex(peer_configs, neighbor_count, true) -} - -#[test] -#[ignore] -fn test_walk_line_allowed_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33100, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_plain() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33200, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33300 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33300", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; // make this a little bigger to speed this test up - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33300, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - // all peers see peer ::33300 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, 0); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33300 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_pingback() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33350, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = false; - conf.connection_opts.disable_inbound_walks = true; - - peer_configs.push(conf); - } - - test_walk_line_pingback(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -fn test_walk_line( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, -) -> Vec { - test_walk_line_ex(peer_configs, neighbor_count, tests, false) -} - -fn test_walk_line_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, -) -> Vec { - test_walk_line_ex(peer_configs, neighbor_count, tests, true) -} - -fn test_walk_line_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, - pingback_test: bool, -) -> Vec { - // arrange neighbors into a "line" topology. - // If pingback_test is true, then the topology is unidirectional: - // - // 0 ---> 1 ---> 2 ---> ... ---> NEIGHBOR_COUNT - // - // If pingback_test is false, then the topology is bidirectional - // - // 0 <--> 1 <--> 2 <--> ... <--> NEIGHBOR_COUNT - // - // all initial peers are allowed - let mut peers = vec![]; - - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - for i in 0..PEER_COUNT - 1 { - let n = i + 1; - let neighbor = peer_configs[n].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - - if !pingback_test { - for i in 1..PEER_COUNT { - let p = i - 1; - let neighbor = peer_configs[p].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, tests); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -#[test] -#[ignore] -fn test_walk_star_allowed_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33400, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; // always allowed - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_plain() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33500, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_pingback() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33550, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = false; - conf.connection_opts.disable_inbound_walks = true; - conf.connection_opts.soft_max_neighbors_per_org = PEER_COUNT as u64; - - peer_configs.push(conf); - } - - test_walk_star_pingback(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33600 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33600", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33600, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - // all peers see peer ::33600 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33600 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -fn test_walk_star(peer_configs: &mut Vec, neighbor_count: usize) -> Vec { - test_walk_star_ex(peer_configs, neighbor_count, false) -} - -fn test_walk_star_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - test_walk_star_ex(peer_configs, neighbor_count, true) -} - -fn test_walk_star_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - pingback_test: bool, -) -> Vec { - // arrange neighbors into a "star" topology. - // If pingback_test is true, then initial connections are unidirectional -- each neighbor (except - // for 0) only knows about 0. Neighbor 0 knows about no one. - // If pingback_test is false, then initial connections are bidirectional. - - let mut peers = vec![]; - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 1..PEER_COUNT { - let neighbor = peer_configs[i].to_neighbor(); - let hub = peer_configs[0].to_neighbor(); - if !pingback_test { - peer_configs[0].add_neighbor(&neighbor); - } - - peer_configs[i].add_neighbor(&hub); - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, 0); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -fn test_walk_inbound_line( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - // arrange neighbors into a two-tiered "line" topology, where even-numbered neighbors are - // "NAT'ed" but connected to both the predecessor and successor odd neighbors. Odd - // numbered neighbors are not connected to anyone. The first and last even-numbered - // neighbor is only connected to its successor and predecessor, respectively. - // - // 1 3 5 - // ^ ^ ^ ^ ^ ^ - // / \ / \ / \ ... etc ... - // 0 2 4 6 - // - // The goal of this test is that odd-numbered neighbors all learn about each other - - let mut peers = vec![]; - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 0..PEER_COUNT { - if i % 2 == 0 { - if i > 0 { - let predecessor = peer_configs[i - 1].to_neighbor(); - peer_configs[i].add_neighbor(&predecessor); - } - if i + 1 < PEER_COUNT { - let successor = peer_configs[i + 1].to_neighbor(); - peer_configs[i].add_neighbor(&successor); - } - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test_ex( - &mut peers, - NEIGHBOR_COUNT, - 0, - |peers: &Vec| { - let mut done = true; - for i in 0..PEER_COUNT { - // only check "public" peers - if i % 2 != 0 { - let all_neighbors = - PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - if (all_neighbors.len() as u64) < ((PEER_COUNT / 2 - 1) as u64) { - let nk = peers[i].config.to_neighbor().addr; - test_debug!( - "waiting for public peer {:?} to fill up its frontier: {}", - &nk, - all_neighbors.len() - ); - done = false; - } - } - } - done - }, - true, - ); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -#[test] -#[ignore] -fn test_walk_inbound_line_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 15; // make this test go faster - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33250, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = true; - conf.connection_opts.disable_inbound_walks = false; - conf.connection_opts.walk_inbound_ratio = 2; - // basically, don't timeout (so public nodes can ask non-public inbound nodes about - // neighbors indefinitely) - conf.connection_opts.connect_timeout = 60000; - conf.connection_opts.timeout = 60000; - conf.connection_opts.handshake_timeout = 60000; - conf.connection_opts.soft_max_neighbors_per_org = (NEIGHBOR_COUNT + 1) as u64; - conf.connection_opts.soft_max_neighbors_per_host = (NEIGHBOR_COUNT + 1) as u64; - - peer_configs.push(conf); - } - - test_walk_inbound_line(&mut peer_configs, NEIGHBOR_COUNT); }) } - -fn dump_peers(peers: &Vec) -> () { - test_debug!("\n=== PEER DUMP ==="); - for i in 0..peers.len() { - let mut neighbor_index = vec![]; - let mut outbound_neighbor_index = vec![]; - for j in 0..peers.len() { - let stats_opt = peers[i] - .network - .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } - } - None => {} - } - } - - let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - let num_allowed = all_neighbors.iter().fold(0, |mut sum, ref n2| { - sum += if n2.allowed < 0 { 1 } else { 0 }; - sum - }); - test_debug!("Neighbor {} (all={}, outbound={}) (total neighbors = {}, total allowed = {}): outbound={:?} all={:?}", i, neighbor_index.len(), outbound_neighbor_index.len(), all_neighbors.len(), num_allowed, &outbound_neighbor_index, &neighbor_index); - } - test_debug!("\n"); -} - -fn dump_peer_histograms(peers: &Vec) -> () { - let mut outbound_hist: HashMap = HashMap::new(); - let mut inbound_hist: HashMap = HashMap::new(); - let mut all_hist: HashMap = HashMap::new(); - for i in 0..peers.len() { - let mut neighbor_index = vec![]; - let mut inbound_neighbor_index = vec![]; - let mut outbound_neighbor_index = vec![]; - for j in 0..peers.len() { - let stats_opt = peers[i] - .network - .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } - } - None => {} - } - } - for inbound in inbound_neighbor_index.iter() { - if inbound_hist.contains_key(inbound) { - let c = inbound_hist.get(inbound).unwrap().to_owned(); - inbound_hist.insert(*inbound, c + 1); - } else { - inbound_hist.insert(*inbound, 1); - } - } - for outbound in outbound_neighbor_index.iter() { - if outbound_hist.contains_key(outbound) { - let c = outbound_hist.get(outbound).unwrap().to_owned(); - outbound_hist.insert(*outbound, c + 1); - } else { - outbound_hist.insert(*outbound, 1); - } - } - for n in neighbor_index.iter() { - if all_hist.contains_key(n) { - let c = all_hist.get(n).unwrap().to_owned(); - all_hist.insert(*n, c + 1); - } else { - all_hist.insert(*n, 1); - } - } - } - - test_debug!("=== PEER HISTOGRAM ==="); - for i in 0..peers.len() { - test_debug!( - "Neighbor {}: #in={} #out={} #all={}", - i, - inbound_hist.get(&i).unwrap_or(&0), - outbound_hist.get(&i).unwrap_or(&0), - all_hist.get(&i).unwrap_or(&0) - ); - } - test_debug!("\n"); -} - -fn run_topology_test(peers: &mut Vec, neighbor_count: usize, test_bits: u64) -> () { - run_topology_test_ex(peers, neighbor_count, test_bits, |_| false, false) -} - -fn run_topology_test_ex( - peers: &mut Vec, - neighbor_count: usize, - test_bits: u64, - mut finished_check: F, - use_finished_check: bool, -) -> () -where - F: FnMut(&Vec) -> bool, -{ - let PEER_COUNT = peers.len(); - - let mut initial_allowed: HashMap> = HashMap::new(); - let mut initial_denied: HashMap> = HashMap::new(); - - for i in 0..PEER_COUNT { - // turn off components we don't need - peers[i].config.connection_opts.disable_inv_sync = true; - peers[i].config.connection_opts.disable_block_download = true; - let nk = peers[i].config.to_neighbor().addr.clone(); - for j in 0..peers[i].config.initial_neighbors.len() { - let initial = &peers[i].config.initial_neighbors[j]; - if initial.allowed < 0 { - if !initial_allowed.contains_key(&nk) { - initial_allowed.insert(nk.clone(), vec![]); - } - initial_allowed - .get_mut(&nk) - .unwrap() - .push(initial.addr.clone()); - } - if initial.denied < 0 { - if !initial_denied.contains_key(&nk) { - initial_denied.insert(nk.clone(), vec![]); - } - initial_denied - .get_mut(&nk) - .unwrap() - .push(initial.addr.clone()); - } - } - } - - for i in 0..PEER_COUNT { - peers[i].connect_initial().unwrap(); - } - - // go until each neighbor knows about each other neighbor - let mut finished = false; - let mut count = 0; - while !finished { - finished = true; - let mut peer_counts = 0; - let mut random_order = vec![0usize; PEER_COUNT]; - for i in 0..PEER_COUNT { - random_order[i] = i; - } - let mut rng = thread_rng(); - random_order.shuffle(&mut rng); - - debug!("Random order = {:?}", &random_order); - for i in random_order.into_iter() { - let _ = peers[i].step_with_ibd(false); - let nk = peers[i].config.to_neighbor().addr; - debug!("Step peer {:?}", &nk); - - // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } - } - } - None => {} - }; - - // denied peers are never connected - match initial_denied.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } - } - } - None => {} - }; - - // all ports are unique in the p2p socket table - let mut ports: HashSet = HashSet::new(); - for k in peers[i].network.events.keys() { - if ports.contains(&k.port) { - error!("duplicate port {} from {:?}", k.port, k); - assert!(false); - } - ports.insert(k.port); - } - - // done? - let now_finished = if use_finished_check { - finished_check(&peers) - } else { - let mut done = true; - let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - peer_counts += all_neighbors.len(); - test_debug!("Peer {} ({}) has {} neighbors", i, &nk, all_neighbors.len()); - - if (all_neighbors.len() as u64) < ((PEER_COUNT - 1) as u64) { - test_debug!( - "waiting for {:?} to fill up its frontier: {} < {}", - &nk, - all_neighbors.len(), - PEER_COUNT - 1 - ); - done = false; - } else { - test_debug!( - "not waiting for {:?} to fill up its frontier: {} >= {}", - &nk, - all_neighbors.len(), - PEER_COUNT - 1 - ); - } - done - }; - - finished = finished && now_finished; - } - - count += 1; - - test_debug!( - "Network convergence rate: {}%", - (100.0 * (peer_counts as f64)) / ((PEER_COUNT * PEER_COUNT) as f64), - ); - - if finished { - break; - } - - test_debug!("Finished walking the network {} times", count); - dump_peers(&peers); - dump_peer_histograms(&peers); - } - - test_debug!("Converged after {} calls to network.run()", count); - dump_peers(&peers); - dump_peer_histograms(&peers); - - // each peer learns each other peer's stacker DBs - for (i, peer) in peers.iter().enumerate() { - if i % 2 != 0 { - continue; - } - let mut expected_dbs = PeerDB::get_local_peer(peer.network.peerdb.conn()) - .unwrap() - .stacker_dbs; - expected_dbs.sort(); - for (j, other_peer) in peers.iter().enumerate() { - if i == j { - continue; - } - - let all_neighbors = PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap(); - - if (all_neighbors.len() as u64) < ((PEER_COUNT - 1) as u64) { - // this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes' - // DBs - continue; - } - - // what does the other peer see as this peer's stacker DBs? - let mut other_peer_dbs = other_peer - .network - .peerdb - .get_peer_stacker_dbs(&peer.config.to_neighbor()) - .unwrap(); - other_peer_dbs.sort(); - - if j % 2 == 0 { - test_debug!( - "Compare stacker DBs of {} vs {}", - &peer.config.to_neighbor(), - &other_peer.config.to_neighbor() - ); - assert_eq!(expected_dbs, other_peer_dbs); - } else { - // this peer doesn't support Stacker DBs - assert_eq!(other_peer_dbs, vec![]); - } - } - } -} From 6f37fa6de5da01eccce1a3d7a90d8f77ed89cc9b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:28:05 -0400 Subject: [PATCH 572/910] fix: sort_unstable_by() for sorting peers by health, since our comparison function is not a total order (oops) --- stackslib/src/net/prune.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index d7991a67c31..87b16d7bbab 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -82,7 +82,7 @@ impl PeerNetwork { }; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { test_debug!( "==== ORG NEIGHBOR DISTRIBUTION OF {:?} ===", @@ -199,9 +199,11 @@ impl PeerNetwork { match org_neighbors.get_mut(&org) { None => {} Some(ref mut neighbor_infos) => { - neighbor_infos.sort_by(|&(ref _nk1, ref stats1), &(ref _nk2, ref stats2)| { - PeerNetwork::compare_neighbor_uptime_health(stats1, stats2) - }); + neighbor_infos.sort_unstable_by( + |&(ref _nk1, ref stats1), &(ref _nk2, ref stats2)| { + PeerNetwork::compare_neighbor_uptime_health(stats1, stats2) + }, + ); } } } @@ -374,7 +376,7 @@ impl PeerNetwork { } /// Dump our peer table - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn dump_peer_table(&mut self) -> (Vec, Vec) { let mut inbound: Vec = vec![]; let mut outbound: Vec = vec![]; @@ -445,7 +447,7 @@ impl PeerNetwork { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { let (mut inbound, mut outbound) = self.dump_peer_table(); From 50a967bc8d73637c2ceaabed3e4138d14a5f4c2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:28:38 -0400 Subject: [PATCH 573/910] chore: add pathological reward cycles to downloader tests where the only sortitions in the reward cycle are to confirm the anchor block --- stackslib/src/net/tests/download/nakamoto.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31f88b50f8c..44bbaed7d29 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2066,6 +2066,19 @@ fn test_make_tenure_downloaders() { fn test_nakamoto_download_run_2_peers() { let observer = TestEventObserver::new(); let bitvecs = vec![ + // a reward cycle with one prepare phase sortition at the start + vec![ + true, true, true, true, true, true, true, false, false, false, + ], + // a reward cycle with one prepare phase sortition at the end, + // and no tenures in the first three reward phase sortitions + vec![ + false, false, false, true, true, false, false, true, true, false, + ], + // full reward cycle, minus the first three tenures + vec![ + false, false, false, true, true, true, true, true, true, true, + ], // full reward cycle vec![true, true, true, true, true, true, true, true, true, true], // alternating reward cycle, but with a full prepare phase From d442f2ce09b26c489fd97e52fd677891521a452e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:29:12 -0400 Subject: [PATCH 574/910] build: build tests with `feature = "testing"`, and disable unused warnings for tests --- stackslib/src/net/api/getneighbors.rs | 4 +++- stackslib/src/net/asn.rs | 2 +- stackslib/src/net/atlas/db.rs | 6 +++--- stackslib/src/net/codec.rs | 2 +- stackslib/src/net/dns.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 12 ++++++------ stackslib/src/net/httpcore.rs | 22 +++++++++++----------- stackslib/src/net/inv/epoch2x.rs | 9 +++------ stackslib/src/net/mod.rs | 19 ++++++++++++------- stackslib/src/net/poll.rs | 2 +- stackslib/src/net/stackerdb/db.rs | 2 +- stackslib/src/net/stackerdb/tests/mod.rs | 1 + stackslib/src/net/stackerdb/tests/sync.rs | 1 - stackslib/src/net/tests/download/mod.rs | 1 + stackslib/src/net/tests/inv/mod.rs | 1 + stackslib/src/net/tests/mempool/mod.rs | 1 + stackslib/src/net/tests/mod.rs | 1 + stackslib/src/net/tests/relay/mod.rs | 1 + 18 files changed, 49 insertions(+), 40 deletions(-) diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 6707ed3ba16..9e7d0402daf 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -19,6 +19,7 @@ use std::io::{Read, Write}; use clarity::vm::types::QualifiedContractIdentifier; use regex::{Captures, Regex}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use crate::net::db::PeerDB; @@ -145,10 +146,11 @@ impl RPCNeighborsInfo { peerdb_conn, network_id, network_epoch, - max_neighbor_age, + get_epoch_time_secs().saturating_sub(max_neighbor_age), MAX_NEIGHBORS_DATA_LEN, burnchain_view.burn_block_height, false, + true, ) .map_err(NetError::DBError)?; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index f38c6c54d4d..bb31146c819 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -222,7 +222,7 @@ impl ASEntry4 { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::io; use std::io::BufRead; diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index d6bdbb301eb..37ed22a26b1 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -374,7 +374,7 @@ impl AtlasDB { } // Open an atlas database in memory (used for testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; let mut db = AtlasDB { @@ -387,7 +387,7 @@ impl AtlasDB { Ok(db) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Only ever to be used in testing, open and instantiate a V1 atlasdb pub fn connect_memory_db_v1(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory()?; @@ -432,7 +432,7 @@ impl AtlasDB { Ok(db) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Only ever to be used in testing, connect to db, but using existing sqlconn pub fn connect_with_sqlconn( atlas_config: AtlasConfig, diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index bd8154e414b..4cb4099fb49 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1600,7 +1600,7 @@ impl ProtocolFamily for StacksP2P { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use stacks_common::bitvec::BitVec; use stacks_common::codec::NEIGHBOR_ADDRESS_ENCODED_SIZE; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index aedb73bd626..c63d1b4fedf 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -355,7 +355,7 @@ impl DNSClient { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::collections::HashMap; use std::error::Error; diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index c57d9d19bc8..5c926c41923 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -56,22 +56,22 @@ use crate::net::{ }; use crate::util_lib::db::{DBConn, Error as db_error}; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 180; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 0; /// If a URL never connects, don't use it again for this many seconds -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 300; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 60; /// If we created a request to download a block or microblock, don't do so again until this many /// seconds have passed. -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 60; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 30; /// This module is responsible for downloading blocks and microblocks from other peers, using block diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 804add6f331..fc296b9f2bf 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -547,14 +547,14 @@ impl StacksHttpRequest { (self.preamble, self.contents) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; Ok(ret) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_response_handler_index(&self) -> Option { self.response_handler_index } @@ -676,7 +676,7 @@ impl StacksHttpResponse { self.preamble.headers.clear(); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; @@ -700,7 +700,7 @@ pub enum StacksHttpPreamble { } impl StacksHttpPreamble { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn expect_request(self) -> HttpRequestPreamble { match self { Self::Request(x) => x, @@ -708,7 +708,7 @@ impl StacksHttpPreamble { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn expect_response(self) -> HttpResponsePreamble { match self { Self::Response(x) => x, @@ -1004,7 +1004,7 @@ impl StacksHttp { } /// Force the state machine to expect a response - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { let handler_index = self .find_response_handler(request_verb, request_path) @@ -1016,7 +1016,7 @@ impl StacksHttp { } /// Try to parse an inbound HTTP request using a given handler, preamble, and body - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn handle_try_parse_request( &self, handler: &mut dyn RPCRequestHandler, @@ -1202,7 +1202,7 @@ impl StacksHttp { Ok((response_preamble, response_contents)) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn num_pending(&self) -> usize { self.reply.as_ref().map(|_| 1).unwrap_or(0) } @@ -1346,10 +1346,10 @@ impl StacksHttp { } /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn parse_response( - verb: &str, - request_path: &str, + _verb: &str, + _request_path: &str, response_buf: &[u8], ) -> Result { let mut http = StacksHttp::new( diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index fc5f073b2e9..7068db7accc 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -47,9 +47,9 @@ use crate::net::{ use crate::util_lib::db::{DBConn, Error as db_error}; /// This module is responsible for synchronizing block inventories with other peers -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const INV_SYNC_INTERVAL: u64 = 150; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const INV_SYNC_INTERVAL: u64 = 3; pub const INV_REWARD_CYCLES: u64 = 2; @@ -1143,7 +1143,7 @@ impl InvState { self.block_stats.get_mut(nk) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) -> () { self.block_stats.insert( nk.clone(), @@ -2848,6 +2848,3 @@ impl PeerNetwork { work_state } } - -#[cfg(test)] -mod test {} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 7f8dea93291..8b36377a251 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -143,7 +143,7 @@ pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; #[derive(Debug)] @@ -571,7 +571,7 @@ impl From for Error { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] impl PartialEq for Error { /// (make I/O errors comparable for testing purposes) fn eq(&self, other: &Self) -> bool { @@ -1293,9 +1293,9 @@ pub const MAX_BROADCAST_INBOUND_RECEIVERS: usize = 16; pub const BLOCKS_AVAILABLE_MAX_LEN: u32 = 32; // maximum number of PoX reward cycles we can ask about -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const GETPOXINV_MAX_BITLEN: u64 = 4096; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const GETPOXINV_MAX_BITLEN: u64 = 8; // maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). @@ -1455,9 +1455,9 @@ pub const MAX_MICROBLOCKS_UNCONFIRMED: usize = 1024; pub const MAX_HEADERS: usize = 2100; // how long a peer will be denied for if it misbehaves -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const DENY_BAN_DURATION: u64 = 30; // seconds -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; @@ -1719,8 +1719,9 @@ pub trait Requestable: std::fmt::Display { fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest; } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::collections::HashMap; use std::io::{Cursor, ErrorKind, Read, Write}; use std::net::*; @@ -3919,6 +3920,10 @@ pub mod test { self.network.peerdb.conn() } + pub fn peerdb_mut(&mut self) -> &mut PeerDB { + &mut self.network.peerdb + } + pub fn get_burnchain_view(&mut self) -> Result { let sortdb = self.sortdb.take().unwrap(); let view_res = { diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index bdda12e6d42..ed24bc1168f 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -481,7 +481,7 @@ impl NetworkState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::collections::HashSet; diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 2b735668ac4..c06e495514e 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -515,7 +515,7 @@ impl StackerDBs { Self::instantiate(path, readwrite) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory() -> StackerDBs { Self::instantiate(":memory:", true).unwrap() } diff --git a/stackslib/src/net/stackerdb/tests/mod.rs b/stackslib/src/net/stackerdb/tests/mod.rs index 0838342100d..17c73daa045 100644 --- a/stackslib/src/net/stackerdb/tests/mod.rs +++ b/stackslib/src/net/stackerdb/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod config; pub mod db; diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index f45e3acb93e..b16b10291ff 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -51,7 +51,6 @@ const NUM_NEIGHBORS: usize = 8; /// Some testable configurations for stacker DB configs impl StackerDBConfig { - #[cfg(test)] pub fn template() -> StackerDBConfig { StackerDBConfig { chunk_size: CHUNK_SIZE, diff --git a/stackslib/src/net/tests/download/mod.rs b/stackslib/src/net/tests/download/mod.rs index 430b92e4144..5b191a1161c 100644 --- a/stackslib/src/net/tests/download/mod.rs +++ b/stackslib/src/net/tests/download/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/inv/mod.rs b/stackslib/src/net/tests/inv/mod.rs index 04e8e0fd4fd..04386e20970 100644 --- a/stackslib/src/net/tests/inv/mod.rs +++ b/stackslib/src/net/tests/inv/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 7a44a56788d..602f943cb30 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cell::RefCell; use std::{thread, time}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index d8ee197f420..47cec3b0b4f 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod download; pub mod httpcore; diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index c408e9ee60f..d75bae21e8f 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -12,6 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; From f20061ff0e592db91da899a81bfa8fc2b46b6214 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:30:14 -0400 Subject: [PATCH 575/910] chore: add dev-dependencies that will allow test modules to compile for stackslib when it's a dependency (e.g. so stackslib test code can be used in stacks-node tests) --- stackslib/Cargo.toml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index b7967fe2491..909e2375021 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -59,6 +59,10 @@ siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } +rstest = { version = "0.17.0", optional = true } +rstest_reuse = { version = "0.5.0", optional = true } +stdext = { version = "0.3.1", optional = true } +stx-genesis = { path = "../stx-genesis", optional = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -95,7 +99,7 @@ features = ["std"] assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" -stx-genesis = { path = "../stx-genesis"} +stx-genesis = { path = "../stx-genesis" } clarity = { features = ["default", "testing"], path = "../clarity" } stacks-common = { features = ["default", "testing"], path = "../stacks-common" } rstest = "0.17.0" @@ -109,7 +113,7 @@ disable-costs = [] developer-mode = ["clarity/developer-mode"] monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks-common/slog_json", "clarity/slog_json", "pox-locking/slog_json"] -testing = [] +testing = ["stdext", "rstest", "rstest_reuse", "stx-genesis"] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } From 2ee25e43a6f79473ab1750f105d59b4e275fa27a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:30:45 -0400 Subject: [PATCH 576/910] build: use `feature = "testing"` to build stackslib test code, and suppress `unused` warnings in tests --- stackslib/src/burnchains/bitcoin/indexer.rs | 10 ++-- stackslib/src/burnchains/bitcoin/spv.rs | 10 ++-- stackslib/src/burnchains/burnchain.rs | 2 +- stackslib/src/burnchains/mod.rs | 8 +-- stackslib/src/burnchains/tests/db.rs | 2 +- stackslib/src/burnchains/tests/mod.rs | 1 + stackslib/src/chainstate/burn/db/sortdb.rs | 4 +- stackslib/src/chainstate/burn/distribution.rs | 2 +- .../burn/operations/leader_block_commit.rs | 8 +-- .../burn/operations/leader_key_register.rs | 8 +-- .../src/chainstate/burn/operations/mod.rs | 6 +-- .../chainstate/burn/operations/stack_stx.rs | 4 +- .../burn/operations/transfer_stx.rs | 2 +- stackslib/src/chainstate/coordinator/mod.rs | 8 +-- stackslib/src/chainstate/coordinator/tests.rs | 1 + .../chainstate/nakamoto/coordinator/mod.rs | 4 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 6 +-- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 1 + .../chainstate/stacks/boot/contract_tests.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 17 +++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 1 + .../src/chainstate/stacks/boot/pox_3_tests.rs | 1 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 1 + .../chainstate/stacks/boot/signers_tests.rs | 1 + stackslib/src/chainstate/stacks/db/blocks.rs | 29 +++++----- stackslib/src/chainstate/stacks/db/mod.rs | 5 +- .../src/chainstate/stacks/db/transactions.rs | 1 + stackslib/src/chainstate/stacks/index/file.rs | 6 +-- stackslib/src/chainstate/stacks/index/marf.rs | 4 +- .../src/chainstate/stacks/index/storage.rs | 24 ++++----- .../src/chainstate/stacks/index/trie_sql.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 10 ++-- stackslib/src/chainstate/stacks/mod.rs | 5 +- stackslib/src/chainstate/stacks/tests/mod.rs | 1 + stackslib/src/clarity_vm/clarity.rs | 12 ++--- stackslib/src/clarity_vm/database/marf.rs | 4 +- stackslib/src/clarity_vm/mod.rs | 2 +- stackslib/src/clarity_vm/tests/mod.rs | 1 + stackslib/src/core/mempool.rs | 20 +++---- stackslib/src/core/mod.rs | 54 +++++++++---------- stackslib/src/cost_estimates/mod.rs | 2 +- stackslib/src/cost_estimates/tests/common.rs | 1 - stackslib/src/cost_estimates/tests/mod.rs | 1 + stackslib/src/lib.rs | 4 +- stackslib/src/util_lib/bloom.rs | 2 +- stackslib/src/util_lib/boot.rs | 2 +- stackslib/src/util_lib/mod.rs | 2 +- 49 files changed, 162 insertions(+), 147 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 40cabd86d30..7c9083985bc 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -167,7 +167,7 @@ impl BitcoinIndexerConfig { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_default(spv_headers_path: String) -> BitcoinIndexerConfig { BitcoinIndexerConfig { peer_host: "127.0.0.1".to_string(), @@ -203,7 +203,7 @@ impl BitcoinIndexerRuntime { } impl BitcoinIndexer { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( config: BitcoinIndexerConfig, runtime: BitcoinIndexerRuntime, @@ -216,7 +216,7 @@ impl BitcoinIndexer { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_unit_test(working_dir: &str) -> BitcoinIndexer { let mut working_dir_path = PathBuf::from(working_dir); if fs::metadata(&working_dir_path).is_err() { @@ -861,7 +861,7 @@ impl BitcoinIndexer { Ok(new_tip) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn raw_store_header(&mut self, header: BurnchainBlockHeader) -> Result<(), btc_error> { let mut spv_client = SpvClient::new( &self.config.spv_headers_path, @@ -887,7 +887,7 @@ impl BitcoinIndexer { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn mock_bitcoin_header( parent_block_hash: &BurnchainHeaderHash, timestamp: u32, diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 82cbb7b7f66..b2b886bdc40 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -182,7 +182,7 @@ impl SpvClient { Ok(client) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_without_migration( headers_path: &str, start_block: u64, @@ -211,7 +211,7 @@ impl SpvClient { Ok(client) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn disable_check_txcount(&mut self) { self.check_txcount = false; } @@ -220,7 +220,7 @@ impl SpvClient { &self.headers_db } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn conn_mut(&mut self) -> &mut DBConn { &mut self.headers_db } @@ -277,7 +277,7 @@ impl SpvClient { .and_then(|_| Ok(())) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { SpvClient::db_migrate(conn) } @@ -925,7 +925,7 @@ impl SpvClient { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_write_block_headers( &mut self, height: u64, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index a5ecaa04588..60f663c0de2 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -623,7 +623,7 @@ impl Burnchain { ret } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn default_unittest( first_block_height: u64, first_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0bc68897cbd..2720d48e8ca 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -57,7 +57,7 @@ pub mod burnchain; pub mod db; pub mod indexer; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub struct Txid(pub [u8; 32]); @@ -351,7 +351,7 @@ impl PoxConstants { _shadow: PhantomData, } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots PoxConstants::new( @@ -369,7 +369,7 @@ impl PoxConstants { ) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Create a PoX constants used in tests with 5-block cycles, /// 3-block prepare phases, a threshold of 3, rejection fraction of 25%, /// a participation threshold of 5% and no sunset or transition to pox-2 or beyond. @@ -821,7 +821,7 @@ impl From for Error { } impl BurnchainView { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_test_data(&mut self) { let oldest_height = if self.burn_stable_block_height < MAX_NEIGHBOR_BLOCK_DELAY { 0 diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index f14243d049d..8b69449d746 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -49,7 +49,7 @@ impl BurnchainDB { /// Get back all of the parsed burnchain operations for a given block. /// Used in testing to replay burnchain data. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_burnchain_block_ops( &self, block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 31e29c0b26e..f1bc0613af4 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod affirmation; pub mod burnchain; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 942e6774bde..39647323299 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5482,7 +5482,7 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_height, )?; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { let (block_consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); @@ -6566,7 +6566,7 @@ impl ChainstateDB for SortitionDB { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index ed01ae014b5..d91f158c27a 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -424,7 +424,7 @@ impl BurnSamplePoint { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests { use std::marker::PhantomData; diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cea03d44353..a1e5ee500a4 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -90,7 +90,7 @@ pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; impl LeaderBlockCommitOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn initial( block_header_hash: &BlockHeaderHash, block_height: u64, @@ -131,10 +131,10 @@ impl LeaderBlockCommitOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( block_header_hash: &BlockHeaderHash, - block_height: u64, + _block_height: u64, new_seed: &VRFSeed, parent: &LeaderBlockCommitOp, key_block_ptr: u32, @@ -170,7 +170,7 @@ impl LeaderBlockCommitOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; let new_burn_parent_modulus = if height > 0 { diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 44402adc0c8..87b13d8f50e 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -44,7 +44,7 @@ pub struct ParsedData { } impl LeaderKeyRegisterOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new(public_key: &VRFPublicKey) -> LeaderKeyRegisterOp { LeaderKeyRegisterOp { public_key: public_key.clone(), @@ -59,10 +59,10 @@ impl LeaderKeyRegisterOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_from_secrets( - num_sigs: u16, - hash_mode: &AddressHashMode, + _num_sigs: u16, + _hash_mode: &AddressHashMode, prover_key: &VRFPrivateKey, ) -> Option { let prover_pubk = VRFPublicKey::from_private(prover_key); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 0843e03b1ec..fd0d63ef59a 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -47,7 +47,7 @@ pub mod stack_stx; pub mod transfer_stx; pub mod vote_for_aggregate_key; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test; /// This module contains all burn-chain operations @@ -439,7 +439,7 @@ impl BlockstackOperationType { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_block_height(&mut self, height: u64) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => data.block_height = height, @@ -456,7 +456,7 @@ impl BlockstackOperationType { }; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_burn_header_hash(&mut self, hash: BurnchainHeaderHash) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => { diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index c4c54b97374..52e4d6bf3bb 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -57,7 +57,7 @@ struct ParsedData { pub static OUTPUTS_PER_COMMIT: usize = 2; impl PreStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new(sender: &StacksAddress) -> PreStxOp { PreStxOp { output: sender.clone(), @@ -155,7 +155,7 @@ impl PreStxOp { } impl StackStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( sender: &StacksAddress, reward_addr: &PoxAddress, diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d1d562d9cb..a70075ff7c6 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -46,7 +46,7 @@ struct ParsedData { } impl TransferStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( sender: &StacksAddress, recipient: &StacksAddress, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 2849b749047..666fd976575 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -82,7 +82,7 @@ use crate::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; use crate::util_lib::db::{DBConn, DBTx, Error as DBError}; pub mod comm; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; /// The 3 different states for the current @@ -110,7 +110,7 @@ impl NewBurnchainBlockStatus { /// Test helper to convert this status into the optional hash of the missing PoX anchor block. /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn into_missing_block_hash(self) -> Option { match self { Self::Ready => None, @@ -624,7 +624,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader ChainsCoordinator<'a, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_new( burnchain: &Burnchain, chain_id: u32, @@ -644,7 +644,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader } /// Create a coordinator for testing allowing for all configurable params - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_new_full( burnchain: &Burnchain, chain_id: u32, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 50127af1760..73b4349c2b8 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cmp; use std::collections::{BTreeMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index d4ef14ba9a1..aa8ac218911 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,7 +58,7 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; macro_rules! err_or_debug { @@ -506,7 +506,7 @@ pub fn load_nakamoto_reward_set( Err(e) => return Some(Err(e)), Ok(None) => { // no header for this snapshot (possibly invalid) - info!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + debug!("Failed to find Stacks block by consensus hash"; "consensus_hash" => %sn.consensus_hash); return None } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ddeea515735..5372853a8db 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet}; use std::sync::Mutex; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index cb515a860c9..d25c106d1c0 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -122,7 +122,7 @@ pub mod signer_set; pub mod staging_blocks; pub mod tenure; pub mod test_signers; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub use self::staging_blocks::{ @@ -270,7 +270,7 @@ lazy_static! { ]; } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test_stall { pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); @@ -1757,7 +1757,7 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] test_stall::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 81380cc93d0..4f09fd1f573 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -484,7 +484,7 @@ impl NakamotoChainState { /// Drop a nakamoto tenure. /// Used for testing - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub(crate) fn delete_nakamoto_tenure( tx: &Connection, ch: &ConsensusHash, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 722cfa541af..fa02a34a09b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::borrow::BorrowMut; use std::collections::HashMap; diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 04b74ba2e90..650617ab495 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -361,7 +361,7 @@ impl BurnStateDB for TestSimBurnStateDB { panic!("Not implemented in TestSim"); } - fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } @@ -525,7 +525,7 @@ impl BurnStateDB for TestSimBurnStateDB { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] impl HeadersDB for TestSimHeadersDB { fn get_burn_header_hash_for_block( &self, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 88ecc8887e2..77a24b938f2 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -626,7 +626,7 @@ impl StacksChainState { /// Determine the minimum amount of STX per reward address required to stack in the _next_ /// reward cycle - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_stacking_minimum( &mut self, sortdb: &SortitionDB, @@ -688,7 +688,7 @@ impl StacksChainState { } /// Determine how many uSTX are stacked in a given reward cycle - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_get_total_ustx_stacked( &mut self, sortdb: &SortitionDB, @@ -1379,19 +1379,20 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod contract_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_2_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_3_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_4_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod signers_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::collections::{HashMap, HashSet}; use std::fs; diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 7ae25d00f6f..2ac7d0e6f58 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 3134b4773a7..b34b7eb6c75 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 0968cc4de3f..affb4bcf7bf 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index bf3b5f312c6..ba1a97556eb 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use clarity::vm::clarity::ClarityConnection; use clarity::vm::contexts::OwnedEnvironment; diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 47cace8c4b0..8df89833013 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -415,7 +415,7 @@ impl FromRow for StagingBlock { } impl StagingMicroblock { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_into_microblock(self) -> Result { StacksMicroblock::consensus_deserialize(&mut &self.block_data[..]).map_err(|_e| self) } @@ -660,7 +660,7 @@ impl StacksChainState { } /// Store an empty block to the chunk store, named by its hash. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn store_empty_block( blocks_path: &str, consensus_hash: &ConsensusHash, @@ -760,10 +760,10 @@ impl StacksChainState { } /// Get a list of all microblocks' hashes, and their anchored blocks' hashes - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn list_microblocks( blocks_conn: &DBConn, - blocks_dir: &str, + _blocks_dir: &str, ) -> Result)>, Error> { let mut blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; @@ -1025,7 +1025,7 @@ impl StacksChainState { .map_err(|e| Error::DBError(db_error::from(e))) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn load_staging_block_data( block_conn: &DBConn, blocks_path: &str, @@ -1493,7 +1493,7 @@ impl StacksChainState { /// Get an anchored block's parent block header. /// Doesn't matter if it's staging or not. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn load_parent_block_header( sort_ic: &SortitionDBConn, blocks_path: &str, @@ -2500,7 +2500,7 @@ impl StacksChainState { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn set_block_orphaned<'a>( tx: &mut DBTx<'a>, blocks_path: &str, @@ -2522,7 +2522,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; - let orphaned_microblock_hashes = query_row_columns::( + let _orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, find_orphaned_microblocks_args, @@ -2801,7 +2801,7 @@ impl StacksChainState { /// Do we have any microblock available to serve in any capacity, given its parent anchored block's /// index block hash? - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn has_microblocks_indexed( &self, parent_index_block_hash: &StacksBlockId, @@ -2867,7 +2867,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2883,7 +2883,7 @@ impl StacksChainState { /// Load up the metadata on a microblock stream (but don't get the data itself) /// DO NOT USE IN PRODUCTION -- doesn't work for microblock forks. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn stream_microblock_get_info( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -3576,7 +3576,7 @@ impl StacksChainState { /// Given a burnchain snapshot, a Stacks block and a microblock stream, preprocess them all. /// This does not work when forking - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn preprocess_stacks_epoch( &mut self, sort_ic: &SortitionDBConn, @@ -6438,7 +6438,7 @@ impl StacksChainState { /// PoX aware (i.e., unit tests, and old stacks-node loops), /// Elsewhere, block processing is invoked by the ChainsCoordinator, /// which handles tracking the chain tip itself - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn process_blocks_at_tip( &mut self, burnchain_db_conn: &DBConn, @@ -6936,8 +6936,9 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::fs; use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index a942ec7fd15..81e954a0908 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -543,7 +543,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.seal() } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn commit_block(self) -> () { self.block.commit_block(); } @@ -2713,8 +2713,9 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::{env, fs}; use clarity::vm::test_util::TEST_BURN_STATE_DB; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 35ba5326678..99e92aac320 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1571,6 +1571,7 @@ impl StacksChainState { #[cfg(test)] pub mod test { + #![allow(unused)] use clarity::vm::clarity::TransactionConnection; use clarity::vm::contracts::Contract; use clarity::vm::representations::{ClarityName, ContractName}; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 4123b1310aa..53df16b7614 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -168,7 +168,7 @@ impl TrieFile { } /// Read a trie blob in its entirety from the blobs file - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_trie_blob(&mut self, db: &Connection, block_id: u32) -> Result, Error> { let (offset, length) = trie_sql::get_external_trie_offset_length(db, block_id)?; self.seek(SeekFrom::Start(offset))?; @@ -410,7 +410,7 @@ impl TrieFile { } /// Obtain a TrieHash for a node, given the node's block's hash (used only in testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_node_hash_bytes_by_bhh( &mut self, db: &Connection, @@ -424,7 +424,7 @@ impl TrieFile { } /// Get all (root hash, trie hash) pairs for this TrieFile - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_all_block_hashes_and_roots( &mut self, db: &Connection, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index d5dd77c51f8..73d387c07b5 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1494,12 +1494,12 @@ impl MARF { } /// Access internal storage - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn borrow_storage_backend(&mut self) -> TrieStorageConnection { self.storage.connection() } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn borrow_storage_transaction(&mut self) -> TrieStorageTransaction { self.storage.transaction().unwrap() } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6994c7ad053..9397145fcbd 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -364,7 +364,7 @@ impl UncommittedState { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn print_to_stderr(&self) { self.trie_ram_ref().print_to_stderr() } @@ -535,7 +535,7 @@ impl TrieRAM { result } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn stats(&mut self) -> (u64, u64) { let r = self.read_count; @@ -545,7 +545,7 @@ impl TrieRAM { (r, w) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn node_stats(&mut self) -> (u64, u64, u64) { let nr = self.read_node_count; @@ -559,7 +559,7 @@ impl TrieRAM { (nr, br, nw) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn leaf_stats(&mut self) -> (u64, u64) { let lr = self.read_leaf_count; @@ -677,7 +677,7 @@ impl TrieRAM { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_inner_seal( &mut self, storage_tx: &mut TrieStorageTransaction, @@ -1113,14 +1113,14 @@ impl TrieRAM { Ok(self.data.len() as u32) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn print_to_stderr(&self) { for dat in self.data.iter() { eprintln!("{}: {:?}", &dat.1, &dat.0); } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn data(&self) -> &Vec<(TrieNodeType, TrieHash)> { &self.data } @@ -2035,7 +2035,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Read the Trie root node's hash from the block table. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_block_root_hash(&mut self, bhh: &T) -> Result { let root_hash_ptr = TriePtr::new( TrieNodeID::Node256 as u8, @@ -2051,7 +2051,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn inner_read_persisted_root_to_blocks(&mut self) -> Result, Error> { let ret = match self.blobs.as_mut() { Some(blobs) => { @@ -2065,7 +2065,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Generate a mapping between Trie root hashes and the blocks that contain them - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_root_to_block_table(&mut self) -> Result, Error> { let mut ret = self.inner_read_persisted_root_to_blocks()?; let uncommitted_writes = match self.data.uncommitted_writes.take() { @@ -2738,12 +2738,12 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { self.bench.reset(); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn transient_data(&self) -> &TrieStorageTransientData { &self.data } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn transient_data_mut(&mut self) -> &mut TrieStorageTransientData { &mut self.data } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index c9d3b40dcef..1d54cce0d0b 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -422,7 +422,7 @@ pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Resul Ok(blob) } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn read_all_block_hashes_and_roots( conn: &Connection, ) -> Result, Error> { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 0195385d3b0..cb4709d123f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1603,7 +1603,7 @@ impl StacksBlockBuilder { /// Append a transaction if doing so won't exceed the epoch data size. /// Does not check for errors - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, @@ -1626,7 +1626,7 @@ impl StacksBlockBuilder { if !self.anchored_done { // save match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { + Ok((fee, _receipt)) => { self.total_anchored_fees += fee; } Err(e) => { @@ -1637,7 +1637,7 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { + Ok((fee, _receipt)) => { self.total_streamed_fees += fee; } Err(e) => { @@ -2003,7 +2003,7 @@ impl StacksBlockBuilder { } /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, @@ -2022,7 +2022,7 @@ impl StacksBlockBuilder { /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 35c82f9b94e..ed9cf98e84c 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -69,7 +69,7 @@ pub mod index; pub mod miner; pub mod transaction; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub use stacks_common::address::{ @@ -1131,8 +1131,9 @@ pub const MAX_EPOCH_SIZE: u32 = 2 * 1024 * 1024; // $MAX_EPOCH_SIZE bytes (so the average microblock size needs to be 4kb if there are 256 of them) pub const MAX_MICROBLOCK_SIZE: u32 = 65536; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index cda74cb46d1..87601268c09 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index c89679f4145..eb38daf68e6 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -176,7 +176,7 @@ macro_rules! using { } impl<'a, 'b> ClarityBlockConnection<'a, 'b> { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_test_conn( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, @@ -731,7 +731,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// Commits all changes in the current block by /// (1) committing the current MARF tip to storage, /// (2) committing side-storage. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn commit_block(self) -> LimitedCostTracker { debug!("Commit Clarity datastore"); self.datastore.test_commit(); @@ -1591,7 +1591,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { self.datastore } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_epoch(&mut self, epoch_id: StacksEpochId) { self.epoch = epoch_id; } @@ -1856,7 +1856,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } /// Evaluate a raw Clarity snippit - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn clarity_eval_raw(&mut self, code: &str) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_raw(code).map_err(Error::from), @@ -1865,7 +1865,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { Ok(result) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn eval_read_only( &mut self, contract: &QualifiedContractIdentifier, @@ -1879,7 +1879,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests { use std::fs; diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index fed0e70e95c..eaec528c178 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -257,7 +257,7 @@ impl MarfedKV { &mut self.marf } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn sql_conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -526,7 +526,7 @@ impl<'a> WritableMarfStore<'a> { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_commit(self) { let bhh = self.chain_tip.clone(); self.commit_to(&bhh).unwrap(); diff --git a/stackslib/src/clarity_vm/mod.rs b/stackslib/src/clarity_vm/mod.rs index a3e6d23b8cc..4e1688da117 100644 --- a/stackslib/src/clarity_vm/mod.rs +++ b/stackslib/src/clarity_vm/mod.rs @@ -6,5 +6,5 @@ pub mod special; /// Stacks blockchain specific Clarity database implementations and wrappers pub mod database; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests; diff --git a/stackslib/src/clarity_vm/tests/mod.rs b/stackslib/src/clarity_vm/tests/mod.rs index 5855d61f318..1cc597b3d13 100644 --- a/stackslib/src/clarity_vm/tests/mod.rs +++ b/stackslib/src/clarity_vm/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod analysis_costs; pub mod ast; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fe75d62bd2c..73d1fc1c942 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1025,7 +1025,7 @@ impl NonceCache { where C: ClarityConnection, { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] assert!(self.cache.len() <= self.max_cache_size); // Check in-memory cache @@ -1111,7 +1111,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d query_row(conn, sql, params![addr_str]) } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; @@ -1162,7 +1162,7 @@ impl CandidateCache { self.next.push_back(tx); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] assert!(self.cache.len() + self.next.len() <= self.max_cache_size); } @@ -1177,7 +1177,7 @@ impl CandidateCache { self.next.append(&mut self.cache); self.cache = std::mem::take(&mut self.next); - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { assert!(self.cache.len() <= self.max_cache_size + 1); assert!(self.next.len() <= self.max_cache_size + 1); @@ -1365,7 +1365,7 @@ impl MemPoolDB { .map(String::from) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn open_test( mainnet: bool, chain_id: u32, @@ -1934,7 +1934,7 @@ impl MemPoolDB { } /// Get all transactions across all tips - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_all_txs(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM mempool"; let rows = query_rows::(conn, &sql, NO_PARAMS)?; @@ -1942,7 +1942,7 @@ impl MemPoolDB { } /// Get all transactions at a specific block - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_num_tx_at_block( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -1955,7 +1955,7 @@ impl MemPoolDB { } /// Get a number of transactions after a given timestamp on a given chain tip. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -2283,7 +2283,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn clear_before_coinbase_height( &mut self, min_coinbase_height: u64, @@ -2666,7 +2666,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn dump_txs(&self) { let sql = "SELECT * FROM mempool"; let txs: Vec = query_rows(&self.db, sql, NO_PARAMS).unwrap(); diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index ade8a825899..9a3d67e7521 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,7 +30,7 @@ use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; use std::cmp::Ordering; @@ -68,9 +68,9 @@ pub const GENESIS_EPOCH: StacksEpochId = StacksEpochId::Epoch20; /// The number of blocks which will share the block bonus /// from burn blocks that occurred without a sortition. /// (See: https://forum.stacks.org/t/pox-consensus-and-stx-future-supply) -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10_000; pub const STACKS_2_0_LAST_BLOCK_TO_PROCESS: u64 = 700_000; @@ -557,29 +557,29 @@ fn test_ord_for_stacks_epoch_id() { ); } pub trait StacksEpochExtension { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -615,7 +615,7 @@ impl StacksEpochExtension for StacksEpoch { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -640,7 +640,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -678,7 +678,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -716,7 +716,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -767,7 +767,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_2(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -831,7 +831,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_3(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_3 first_burn_height = {}", @@ -908,7 +908,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_4(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_4 first_burn_height = {}", @@ -998,7 +998,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_5(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_5 first_burn_height = {}", @@ -1101,7 +1101,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_3_0 first_burn_height = {}", @@ -1217,7 +1217,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1268,7 +1268,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1342,7 +1342,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 => { diff --git a/stackslib/src/cost_estimates/mod.rs b/stackslib/src/cost_estimates/mod.rs index fc4aa5b1b2e..0992aa180a6 100644 --- a/stackslib/src/cost_estimates/mod.rs +++ b/stackslib/src/cost_estimates/mod.rs @@ -20,7 +20,7 @@ pub mod fee_scalar; pub mod metrics; pub mod pessimistic; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; use self::metrics::CostMetric; diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index fe6527ff53e..9ecfee27746 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -15,7 +15,6 @@ use crate::chainstate::stacks::{ use crate::core::StacksEpochId; /// Make a block receipt from `tx_receipts` with some dummy values filled for test. -#[cfg(test)] pub fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { StacksEpochReceipt { header: StacksHeaderInfo { diff --git a/stackslib/src/cost_estimates/tests/mod.rs b/stackslib/src/cost_estimates/tests/mod.rs index 792ecb778e7..e9292447bf4 100644 --- a/stackslib/src/cost_estimates/tests/mod.rs +++ b/stackslib/src/cost_estimates/tests/mod.rs @@ -1,3 +1,4 @@ +#![allow(unused)] use crate::cost_estimates::FeeRateEstimate; pub mod common; diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6e..8a6919412a0 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -27,11 +27,11 @@ extern crate slog; #[macro_use] extern crate serde_derive; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] #[macro_use] extern crate rstest; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] #[macro_use] extern crate rstest_reuse; diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d1632f0b14f..d37802150fc 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -592,7 +592,7 @@ impl BloomHash for BloomNodeHasher { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use std::fs; diff --git a/stackslib/src/util_lib/boot.rs b/stackslib/src/util_lib/boot.rs index 95cfca9c412..2585fe1b753 100644 --- a/stackslib/src/util_lib/boot.rs +++ b/stackslib/src/util_lib/boot.rs @@ -43,7 +43,7 @@ pub fn boot_code_acc(boot_code_address: StacksAddress, boot_code_nonce: u64) -> } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn boot_code_test_addr() -> StacksAddress { boot_code_addr(false) } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 83a7ab2a25b..44a2772c001 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -5,7 +5,7 @@ pub mod boot; pub mod signed_structured_data; pub mod strings; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use std::sync::mpsc::sync_channel; use std::{panic, process, thread}; From 6ed83d00f568ece3e21c98d18e2b5ddb78675578 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:31:17 -0400 Subject: [PATCH 577/910] build: plumb through features --- stx-genesis/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stx-genesis/Cargo.toml b/stx-genesis/Cargo.toml index 39e97465ced..6914ca14a5e 100644 --- a/stx-genesis/Cargo.toml +++ b/stx-genesis/Cargo.toml @@ -15,3 +15,6 @@ path = "src/lib.rs" [build-dependencies] libflate = "1.0.3" sha2 = { version = "0.10" } + +[features] +testing = [] From b721cf6bfea2b34eea2252b822e8f375ce1ff1d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:31:37 -0400 Subject: [PATCH 578/910] build: buidl stackslib and deps with "testing" feature so we can use stackslib test code in integration tests --- testnet/stacks-node/Cargo.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 19165db0a82..fb05aa0355f 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -43,15 +43,20 @@ warp = "0.3.5" tokio = "1.15" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} +rstest = "0.17.0" +rstest_reuse = "0.5.0" stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } stacks-signer = { path = "../../stacks-signer", features = ["testing"] } +stx-genesis = { path = "../../stx-genesis", features = ["testing"] } +stdext = "0.3.1" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" +rlimit = "0.10.2" [[bin]] name = "stacks-node" From 6324422580a7d427f9bfdb2ee4353c24b30a69fe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:00 -0400 Subject: [PATCH 579/910] chore: expose walk_seed_probability and log_neighbors_freq connection opts --- testnet/stacks-node/src/config.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f1c37750566..81159486b44 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -558,6 +558,8 @@ lazy_static! { max_http_clients: 1000, // maximum number of HTTP connections max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections inv_sync_interval: 45, // how often, in seconds, we refresh block inventories inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) @@ -2424,6 +2426,8 @@ pub struct ConnectionOptionsFile { pub soft_max_clients_per_host: Option, pub max_sockets: Option, pub walk_interval: Option, + pub walk_seed_probability: Option, + pub log_neighbors_freq: Option, pub dns_timeout: Option, pub max_inflight_blocks: Option, pub max_inflight_attachments: Option, @@ -2528,6 +2532,12 @@ impl ConnectionOptionsFile { walk_interval: self .walk_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + walk_seed_probability: self + .walk_seed_probability + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), + log_neighbors_freq: self + .log_neighbors_freq + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.log_neighbors_freq), dns_timeout: self .dns_timeout .map(|dns_timeout| dns_timeout as u128) From f7c2c2a0be80c4be5c1a4ca0a22595310db236b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:29 -0400 Subject: [PATCH 580/910] chore: move topology neighbor convergence tests to integration test CI --- testnet/stacks-node/src/tests/mod.rs | 1 + .../stacks-node/src/tests/p2p/convergence.rs | 1136 +++++++++++++++++ testnet/stacks-node/src/tests/p2p/mod.rs | 18 + 3 files changed, 1155 insertions(+) create mode 100644 testnet/stacks-node/src/tests/p2p/convergence.rs create mode 100644 testnet/stacks-node/src/tests/p2p/mod.rs diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a7892b9a2db..0ac8e151a90 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -62,6 +62,7 @@ mod integrations; mod mempool; pub mod nakamoto_integrations; pub mod neon_integrations; +pub mod p2p; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/p2p/convergence.rs b/testnet/stacks-node/src/tests/p2p/convergence.rs new file mode 100644 index 00000000000..8c273e43ce2 --- /dev/null +++ b/testnet/stacks-node/src/tests/p2p/convergence.rs @@ -0,0 +1,1136 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// You are going to need `ulimit -n` to be 4096 for these tests. +/// In Linux, the default is 1024. +use std::collections::{HashMap, HashSet}; + +use clarity::vm::types::{QualifiedContractIdentifier, StandardPrincipalData}; +use rand::prelude::*; +use rand::thread_rng; +use rlimit; +use stacks::core::PEER_VERSION_TESTNET; +use stacks::net::db::*; +use stacks::net::test::*; +use stacks::net::*; +use stacks::util_lib::test::*; + +fn setup_rlimit_nofiles() { + info!("Attempt to set nofile rlimit to 4096 (required for these tests to run)"); + assert!(rlimit::Resource::NOFILE.get().is_ok()); + let (slimit, hlimit) = rlimit::getrlimit(rlimit::Resource::NOFILE).unwrap(); + rlimit::setrlimit(rlimit::Resource::NOFILE, 4096.max(slimit), hlimit).unwrap(); + info!("Successfully set nofile rlimit to 4096"); +} + +fn stacker_db_id(i: usize) -> QualifiedContractIdentifier { + QualifiedContractIdentifier::new( + StandardPrincipalData(0x01, [i as u8; 20]), + format!("db-{}", i).as_str().into(), + ) +} + +fn make_stacker_db_ids(i: usize) -> Vec { + let mut dbs = vec![]; + for j in 0..i { + dbs.push(stacker_db_id(j)); + } + dbs +} + +fn setup_peer_config( + i: usize, + port_base: u16, + neighbor_count: usize, + peer_count: usize, +) -> TestPeerConfig { + let mut conf = TestPeerConfig::from_port(port_base + (2 * i as u16)); + conf.connection_opts.num_neighbors = neighbor_count as u64; + conf.connection_opts.soft_num_neighbors = neighbor_count as u64; + + conf.connection_opts.num_clients = 256; + conf.connection_opts.soft_num_clients = 128; + + conf.connection_opts.max_http_clients = 1000; + conf.connection_opts.max_neighbors_of_neighbor = neighbor_count as u64; + + conf.connection_opts.max_clients_per_host = MAX_NEIGHBORS_DATA_LEN as u64; + conf.connection_opts.soft_max_clients_per_host = peer_count as u64; + + conf.connection_opts.max_neighbors_per_host = MAX_NEIGHBORS_DATA_LEN as u64; + conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count / 2) as u64; + conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count / 2) as u64; + + conf.connection_opts.walk_interval = 0; + + conf.connection_opts.disable_inv_sync = true; + conf.connection_opts.disable_block_download = true; + + let j = i as u32; + conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer + + // even-number peers support stacker DBs. + // odd-number peers do not + if i % 2 == 0 { + conf.services = (ServiceFlags::RELAY as u16) + | (ServiceFlags::RPC as u16) + | (ServiceFlags::STACKERDB as u16); + conf.stacker_dbs = make_stacker_db_ids(i); + } else { + conf.services = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); + conf.stacker_dbs = vec![]; + } + + conf +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. Peers are always allowed, so always peered with. +#[test] +#[ignore] +fn test_walk_ring_allow_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // all initial peers are allowed + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32800, neighbor_count, peer_count); + + conf.allowed = -1; // always allowed + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_ring(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. No peer is always-allowed, and all walks are allowed. +#[test] +#[ignore] +fn test_walk_ring_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32900, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_ring(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. No inbound walks, but pingback walks are allowed. +#[test] +#[ignore] +fn test_walk_ring_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32950, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + + peer_configs.push(conf); + } + + test_walk_ring_pingback(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. Puts one peer in a different AS to bias the neighbor walk towards it. +#[test] +#[ignore] +fn test_walk_ring_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33000 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33000", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33000, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + + // all peers see peer ::33000 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_ring(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33000 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +fn test_walk_ring_ex(peer_configs: &mut Vec, test_pingback: bool) -> Vec { + // arrange neighbors into a "ring" topology, where + // neighbor N is connected to neighbor (N-1)%NUM_NEIGHBORS and (N+1)%NUM_NEIGHBORS. + // If test_pingback is true, then neighbor N is only connected to (N+1)%NUM_NEIGHBORS + let mut peers = vec![]; + + let peer_count = peer_configs.len(); + + for i in 0..peer_count { + let n = (i + 1) % peer_count; + let neighbor = peer_configs[n].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + + if !test_pingback { + for i in 1..peer_count + 1 { + let p = i - 1; + let neighbor = peer_configs[p].to_neighbor(); + peer_configs[i % peer_count].add_neighbor(&neighbor); + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +fn test_walk_ring(peer_configs: &mut Vec) -> Vec { + test_walk_ring_ex(peer_configs, false) +} + +fn test_walk_ring_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_ring_ex(peer_configs, true) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. All peers are whitelisted to one another. +#[test] +#[ignore] +fn test_walk_line_allowed_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33100, neighbor_count, peer_count); + + conf.allowed = -1; + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_line(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. No peers are whitelisted to one another, and all walk types are allowed. +#[test] +#[ignore] +fn test_walk_line_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33200, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_line(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. One peer is in a different AS. +#[test] +#[ignore] +fn test_walk_line_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33300 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33300", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; // make this a little bigger to speed this test up + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33300, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + // all peers see peer ::33300 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_line(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33300 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. No inbound walks allowed; only pingback walks. +#[test] +#[ignore] +fn test_walk_line_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33350, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + + peer_configs.push(conf); + } + + test_walk_line_pingback(&mut peer_configs); + }) +} + +fn test_walk_line(peer_configs: &mut Vec) -> Vec { + test_walk_line_ex(peer_configs, false) +} + +fn test_walk_line_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_line_ex(peer_configs, true) +} + +fn test_walk_line_ex(peer_configs: &mut Vec, pingback_test: bool) -> Vec { + // arrange neighbors into a "line" topology. + // If pingback_test is true, then the topology is unidirectional: + // + // 0 ---> 1 ---> 2 ---> ... ---> peer_count + // + // If pingback_test is false, then the topology is bidirectional + // + // 0 <--> 1 <--> 2 <--> ... <--> peer_count + // + // all initial peers are allowed + let mut peers = vec![]; + + let peer_count = peer_configs.len(); + for i in 0..peer_count - 1 { + let n = i + 1; + let neighbor = peer_configs[n].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + + if !pingback_test { + for i in 1..peer_count { + let p = i - 1; + let neighbor = peer_configs[p].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. All peers whitelist each other. +#[test] +#[ignore] +fn test_walk_star_allowed_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33400, neighbor_count, peer_count); + + conf.allowed = -1; // always allowed + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_star(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. No peers whitelist each other, and all walk types are alloweed. +#[test] +#[ignore] +fn test_walk_star_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33500, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_star(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. No peers whitelist each other, and inbound walks (but not pingbacks) are disabled. +#[test] +#[ignore] +fn test_walk_star_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33550, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + conf.connection_opts.soft_max_neighbors_per_org = peer_count as u64; + + peer_configs.push(conf); + } + + test_walk_star_pingback(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. One peer is in a separate AS. +#[test] +#[ignore] +fn test_walk_star_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33600 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33600", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33600, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + // all peers see peer ::33600 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_star(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33600 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +fn test_walk_star(peer_configs: &mut Vec) -> Vec { + test_walk_star_ex(peer_configs, false) +} + +fn test_walk_star_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_star_ex(peer_configs, true) +} + +fn test_walk_star_ex(peer_configs: &mut Vec, pingback_test: bool) -> Vec { + // arrange neighbors into a "star" topology. + // If pingback_test is true, then initial connections are unidirectional -- each neighbor (except + // for 0) only knows about 0. Neighbor 0 knows about no one. + // If pingback_test is false, then initial connections are bidirectional. + + let mut peers = vec![]; + let peer_count = peer_configs.len(); + + for i in 1..peer_count { + let neighbor = peer_configs[i].to_neighbor(); + let hub = peer_configs[0].to_neighbor(); + if !pingback_test { + peer_configs[0].add_neighbor(&neighbor); + } + + peer_configs[i].add_neighbor(&hub); + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +fn test_walk_inbound_line(peer_configs: &mut Vec) -> Vec { + // arrange neighbors into a two-tiered "line" topology, where even-numbered neighbors are + // "NAT'ed" but connected to both the predecessor and successor odd neighbors. Odd + // numbered neighbors are not connected to anyone. The first and last even-numbered + // neighbor is only connected to its successor and predecessor, respectively. + // + // 1 3 5 + // ^ ^ ^ ^ ^ ^ + // / \ / \ / \ ... etc ... + // 0 2 4 6 + // + // The goal of this test is that odd-numbered neighbors all learn about each other + + let mut peers = vec![]; + let peer_count = peer_configs.len(); + + for i in 0..peer_count { + if i % 2 == 0 { + if i > 0 { + let predecessor = peer_configs[i - 1].to_neighbor(); + peer_configs[i].add_neighbor(&predecessor); + } + if i + 1 < peer_count { + let successor = peer_configs[i + 1].to_neighbor(); + peer_configs[i].add_neighbor(&successor); + } + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test_ex( + &mut peers, + |peers: &Vec| { + let mut done = true; + for i in 0..peer_count { + // only check "public" peers + if i % 2 != 0 { + let all_neighbors = + PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + if (all_neighbors.len() as u64) < ((peer_count / 2 - 1) as u64) { + let nk = peers[i].config.to_neighbor().addr; + test_debug!( + "waiting for public peer {:?} to fill up its frontier: {}", + &nk, + all_neighbors.len() + ); + done = false; + } + } + } + done + }, + true, + ); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +/// Arrange 15 peers into an alternating line topology, and verify that each peer learns of each +/// other peer over time. Odd peers have no outbound neighbors initially, but share one or two +/// inbound peers. +#[test] +#[ignore] +fn test_walk_inbound_line_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 15; // make this test go faster + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33250, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = true; + conf.connection_opts.disable_inbound_walks = false; + conf.connection_opts.walk_inbound_ratio = 2; + // basically, don't timeout (so public nodes can ask non-public inbound nodes about + // neighbors indefinitely) + conf.connection_opts.connect_timeout = 60000; + conf.connection_opts.timeout = 60000; + conf.connection_opts.handshake_timeout = 60000; + conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count + 1) as u64; + conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count + 1) as u64; + + peer_configs.push(conf); + } + + test_walk_inbound_line(&mut peer_configs); + }) +} + +fn dump_peers(peers: &Vec) -> () { + test_debug!("\n=== PEER DUMP ==="); + for i in 0..peers.len() { + let mut neighbor_index = vec![]; + let mut outbound_neighbor_index = vec![]; + for j in 0..peers.len() { + let stats_opt = peers[i] + .network + .get_neighbor_stats(&peers[j].to_neighbor().addr); + match stats_opt { + Some(stats) => { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } + } + None => {} + } + } + + let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + let num_allowed = all_neighbors.iter().fold(0, |mut sum, ref n2| { + sum += if n2.allowed < 0 { 1 } else { 0 }; + sum + }); + test_debug!("Neighbor {} (all={}, outbound={}) (total neighbors = {}, total allowed = {}): outbound={:?} all={:?}", i, neighbor_index.len(), outbound_neighbor_index.len(), all_neighbors.len(), num_allowed, &outbound_neighbor_index, &neighbor_index); + } + test_debug!("\n"); +} + +fn dump_peer_histograms(peers: &Vec) -> () { + let mut outbound_hist: HashMap = HashMap::new(); + let mut inbound_hist: HashMap = HashMap::new(); + let mut all_hist: HashMap = HashMap::new(); + for i in 0..peers.len() { + let mut neighbor_index = vec![]; + let mut inbound_neighbor_index = vec![]; + let mut outbound_neighbor_index = vec![]; + for j in 0..peers.len() { + let stats_opt = peers[i] + .network + .get_neighbor_stats(&peers[j].to_neighbor().addr); + match stats_opt { + Some(stats) => { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); + } + } + None => {} + } + } + for inbound in inbound_neighbor_index.iter() { + if inbound_hist.contains_key(inbound) { + let c = inbound_hist.get(inbound).unwrap().to_owned(); + inbound_hist.insert(*inbound, c + 1); + } else { + inbound_hist.insert(*inbound, 1); + } + } + for outbound in outbound_neighbor_index.iter() { + if outbound_hist.contains_key(outbound) { + let c = outbound_hist.get(outbound).unwrap().to_owned(); + outbound_hist.insert(*outbound, c + 1); + } else { + outbound_hist.insert(*outbound, 1); + } + } + for n in neighbor_index.iter() { + if all_hist.contains_key(n) { + let c = all_hist.get(n).unwrap().to_owned(); + all_hist.insert(*n, c + 1); + } else { + all_hist.insert(*n, 1); + } + } + } + + test_debug!("=== PEER HISTOGRAM ==="); + for i in 0..peers.len() { + test_debug!( + "Neighbor {}: #in={} #out={} #all={}", + i, + inbound_hist.get(&i).unwrap_or(&0), + outbound_hist.get(&i).unwrap_or(&0), + all_hist.get(&i).unwrap_or(&0) + ); + } + test_debug!("\n"); +} + +fn run_topology_test(peers: &mut Vec) -> () { + run_topology_test_ex(peers, |_| false, false) +} + +fn run_topology_test_ex( + peers: &mut Vec, + mut finished_check: F, + use_finished_check: bool, +) -> () +where + F: FnMut(&Vec) -> bool, +{ + let peer_count = peers.len(); + + let mut initial_allowed: HashMap> = HashMap::new(); + let mut initial_denied: HashMap> = HashMap::new(); + + for i in 0..peer_count { + // turn off components we don't need + peers[i].config.connection_opts.disable_inv_sync = true; + peers[i].config.connection_opts.disable_block_download = true; + let nk = peers[i].config.to_neighbor().addr.clone(); + for j in 0..peers[i].config.initial_neighbors.len() { + let initial = &peers[i].config.initial_neighbors[j]; + if initial.allowed < 0 { + if !initial_allowed.contains_key(&nk) { + initial_allowed.insert(nk.clone(), vec![]); + } + initial_allowed + .get_mut(&nk) + .unwrap() + .push(initial.addr.clone()); + } + if initial.denied < 0 { + if !initial_denied.contains_key(&nk) { + initial_denied.insert(nk.clone(), vec![]); + } + initial_denied + .get_mut(&nk) + .unwrap() + .push(initial.addr.clone()); + } + } + } + + for i in 0..peer_count { + peers[i].connect_initial().unwrap(); + } + + // go until each neighbor knows about each other neighbor + let mut finished = false; + let mut count = 0; + while !finished { + finished = true; + let mut peer_counts = 0; + let mut random_order = vec![0usize; peer_count]; + for i in 0..peer_count { + random_order[i] = i; + } + let mut rng = thread_rng(); + random_order.shuffle(&mut rng); + + debug!("Random order = {:?}", &random_order); + for i in random_order.into_iter() { + let _ = peers[i].step_with_ibd(false); + let nk = peers[i].config.to_neighbor().addr; + debug!("Step peer {:?}", &nk); + + // allowed peers are still connected + match initial_allowed.get(&nk) { + Some(ref peer_list) => { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); + } + } + } + None => {} + }; + + // denied peers are never connected + match initial_denied.get(&nk) { + Some(ref peer_list) => { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); + } + } + } + None => {} + }; + + // all ports are unique in the p2p socket table + let mut ports: HashSet = HashSet::new(); + for k in peers[i].network.events.keys() { + if ports.contains(&k.port) { + error!("duplicate port {} from {:?}", k.port, k); + assert!(false); + } + ports.insert(k.port); + } + + // done? + let now_finished = if use_finished_check { + finished_check(&peers) + } else { + let mut done = true; + let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + peer_counts += all_neighbors.len(); + test_debug!("Peer {} ({}) has {} neighbors", i, &nk, all_neighbors.len()); + + if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) { + test_debug!( + "waiting for {:?} to fill up its frontier: {} < {}", + &nk, + all_neighbors.len(), + peer_count - 1 + ); + done = false; + } else { + test_debug!( + "not waiting for {:?} to fill up its frontier: {} >= {}", + &nk, + all_neighbors.len(), + peer_count - 1 + ); + } + done + }; + + finished = finished && now_finished; + } + + count += 1; + + test_debug!( + "Network convergence rate: {}%", + (100.0 * (peer_counts as f64)) / ((peer_count * peer_count) as f64), + ); + + if finished { + break; + } + + test_debug!("Finished walking the network {} times", count); + dump_peers(&peers); + dump_peer_histograms(&peers); + } + + test_debug!("Converged after {} calls to network.run()", count); + dump_peers(&peers); + dump_peer_histograms(&peers); + + // each peer learns each other peer's stacker DBs + for (i, peer) in peers.iter().enumerate() { + if i % 2 != 0 { + continue; + } + let mut expected_dbs = PeerDB::get_local_peer(peer.network.peerdb.conn()) + .unwrap() + .stacker_dbs; + expected_dbs.sort(); + for (j, other_peer) in peers.iter().enumerate() { + if i == j { + continue; + } + + let all_neighbors = PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap(); + + if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) { + // this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes' + // DBs + continue; + } + + // what does the other peer see as this peer's stacker DBs? + let mut other_peer_dbs = other_peer + .network + .peerdb + .get_peer_stacker_dbs(&peer.config.to_neighbor()) + .unwrap(); + other_peer_dbs.sort(); + + if j % 2 == 0 { + test_debug!( + "Compare stacker DBs of {} vs {}", + &peer.config.to_neighbor(), + &other_peer.config.to_neighbor() + ); + assert_eq!(expected_dbs, other_peer_dbs); + } else { + // this peer doesn't support Stacker DBs + assert_eq!(other_peer_dbs, vec![]); + } + } + } +} diff --git a/testnet/stacks-node/src/tests/p2p/mod.rs b/testnet/stacks-node/src/tests/p2p/mod.rs new file mode 100644 index 00000000000..c2a61de8ac8 --- /dev/null +++ b/testnet/stacks-node/src/tests/p2p/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Integration tests that verify that sets of nodes in various initial topologies will, over time, +/// learn about every other node in the network +pub mod convergence; From ca9c5169f042bc3724ad7414796b00f9285f75c5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:51 -0400 Subject: [PATCH 581/910] chore: activate p2p convergence tests --- .github/workflows/bitcoin-tests.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6c3aca0e140..360b4f74cb5 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -117,6 +117,19 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover + - tests::p2p::convergence::test_walk_ring_allow_15 + - tests::p2p::convergence::test_walk_ring_15_plain + - tests::p2p::convergence::test_walk_ring_15_pingback + - tests::p2p::convergence::test_walk_ring_15_org_biased + - tests::p2p::convergence::test_walk_line_allowed_15 + - tests::p2p::convergence::test_walk_line_15_plain + - tests::p2p::convergence::test_walk_line_15_org_biased + - tests::p2p::convergence::test_walk_line_15_pingback + - tests::p2p::convergence::test_walk_star_allowed_15 + - tests::p2p::convergence::test_walk_star_15_plain + - tests::p2p::convergence::test_walk_star_15_pingback + - tests::p2p::convergence::test_walk_star_15_org_biased + - tests::p2p::convergence::test_walk_inbound_line_15 # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From 57351cf11afac7019bd9b4c680dde29d58df6c90 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 08:34:43 -0500 Subject: [PATCH 582/910] fix build error when built without prom --- stacks-signer/src/client/stacks_client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 130e4d98325..cc780166afa 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -598,7 +598,6 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("stacks_node_client: Getting pox data..."); - #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client From c2ec5ef35e4206c8e2f2673cc990ddabec982bc2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:49:40 -0700 Subject: [PATCH 583/910] feat: revert change to bubble error from announce_new_stacks_block --- stackslib/src/net/relay.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 35627d9dd4e..d022148b3ae 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2049,11 +2049,9 @@ impl Relayer { } } if !http_uploaded_blocks.is_empty() { - if let Some(comm) = coord_comms { - if !comm.announce_new_stacks_block() { - return Err(net_error::CoordinatorClosed); - } - }; + coord_comms.inspect(|comm| { + comm.announce_new_stacks_block(); + }); } accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); From 1002d23f485055125e1cd1e261b8a441fd11f16b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:53:44 -0700 Subject: [PATCH 584/910] fix: revert default staging_db schema handling --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index be904395c25..f17dda37a8f 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -673,7 +673,7 @@ impl StacksChainState { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(NAKAMOTO_STAGING_DB_SCHEMA_LATEST); + return Ok(1); } }; From 8cdc6dc01c2e50458e1be31eb977bcf8c47e9c04 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:58:29 -0700 Subject: [PATCH 585/910] fix: add missing insert_block failure catch after merge --- stacks-signer/src/v0/signer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 522ef4e249b..a5f635cf16a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -783,6 +783,7 @@ impl Signer { } if let Err(e) = self.signer_db.insert_block(&block_info) { warn!("{self}: Failed to update block state: {e:?}",); + panic!("{self} Failed to update block state: {e}"); } } @@ -903,7 +904,7 @@ impl Signer { "Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e ); - e + panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] { From 0ca9d3971023765c8fc96539ebb320a13a942938 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 10:09:24 -0700 Subject: [PATCH 586/910] fix: remove extra unused param from replace_blocks query --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index f17dda37a8f..0fcdaffad81 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -600,7 +600,6 @@ impl<'a> NakamotoStagingBlocksTx<'a> { &block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), - u64_to_sql(get_epoch_time_secs())?, &block.header.consensus_hash, &block.header.block_hash(), ])?; From 565a37ff16e999b3d92cdbf7bd6a3581beb18338 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 11:09:21 -0700 Subject: [PATCH 587/910] fix: use lower-cardinality prometheus metrics in signer --- stacks-signer/src/client/stacks_client.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cc780166afa..f44d988138f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -450,7 +450,10 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer = crate::monitoring::new_rpc_call_timer( + "/v3/tenures/fork_info/:start/:stop", + &self.http_origin, + ); let send_request = || { self.stacks_node_client .get(&path) @@ -491,7 +494,8 @@ impl StacksClient { pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client.get(&path).send().map_err(|e| { warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); @@ -561,7 +565,7 @@ impl StacksClient { ) -> Result>, ClientError> { debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( - &self.reward_set_path(reward_cycle), + &format!("{}/v3/stacker_set/:reward_cycle", self.http_origin), &self.http_origin, ); let send_request = || { @@ -644,8 +648,8 @@ impl StacksClient { address: &StacksAddress, ) -> Result { debug!("stacks_node_client: Getting account info..."); - let timer = - crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); + let timer_label = format!("{}/v2/accounts/:stacks_address", self.http_origin); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client .get(self.accounts_path(address)) @@ -797,7 +801,11 @@ impl StacksClient { let body = json!({"sender": self.stacks_address.to_string(), "arguments": args}).to_string(); let path = self.read_only_path(contract_addr, contract_name, function_name); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer_label = format!( + "{}/v2/contracts/call-read/:principal/{contract_name}/{function_name}", + self.http_origin + ); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let response = self .stacks_node_client .post(path) From 8ad5e95b85a7b4cbb918e63048f5eea70229e8ce Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 13:53:29 -0500 Subject: [PATCH 588/910] chore: move p2p::conv tests back to stackslib --- .github/workflows/bitcoin-tests.yml | 13 ----- Cargo.lock | 5 +- stackslib/Cargo.toml | 9 ++-- stackslib/src/burnchains/bitcoin/indexer.rs | 10 ++-- stackslib/src/burnchains/bitcoin/spv.rs | 10 ++-- stackslib/src/burnchains/burnchain.rs | 2 +- stackslib/src/burnchains/mod.rs | 8 +-- stackslib/src/burnchains/tests/db.rs | 2 +- stackslib/src/burnchains/tests/mod.rs | 1 - stackslib/src/chainstate/burn/db/sortdb.rs | 4 +- stackslib/src/chainstate/burn/distribution.rs | 2 +- .../burn/operations/leader_block_commit.rs | 8 +-- .../burn/operations/leader_key_register.rs | 8 +-- .../src/chainstate/burn/operations/mod.rs | 6 +-- .../chainstate/burn/operations/stack_stx.rs | 4 +- .../burn/operations/transfer_stx.rs | 2 +- stackslib/src/chainstate/coordinator/mod.rs | 8 +-- stackslib/src/chainstate/coordinator/tests.rs | 1 - .../chainstate/nakamoto/coordinator/mod.rs | 2 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 6 +-- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 1 - .../chainstate/stacks/boot/contract_tests.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 17 +++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 1 - .../src/chainstate/stacks/boot/pox_3_tests.rs | 1 - .../src/chainstate/stacks/boot/pox_4_tests.rs | 1 - .../chainstate/stacks/boot/signers_tests.rs | 1 - stackslib/src/chainstate/stacks/db/blocks.rs | 29 +++++----- stackslib/src/chainstate/stacks/db/mod.rs | 5 +- .../src/chainstate/stacks/db/transactions.rs | 1 - stackslib/src/chainstate/stacks/index/file.rs | 6 +-- stackslib/src/chainstate/stacks/index/marf.rs | 4 +- .../src/chainstate/stacks/index/storage.rs | 24 ++++----- .../src/chainstate/stacks/index/trie_sql.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 10 ++-- stackslib/src/chainstate/stacks/mod.rs | 5 +- stackslib/src/chainstate/stacks/tests/mod.rs | 1 - stackslib/src/clarity_vm/clarity.rs | 12 ++--- stackslib/src/clarity_vm/database/marf.rs | 4 +- stackslib/src/clarity_vm/mod.rs | 2 +- stackslib/src/clarity_vm/tests/mod.rs | 1 - stackslib/src/core/mempool.rs | 20 +++---- stackslib/src/core/mod.rs | 54 +++++++++---------- stackslib/src/cost_estimates/mod.rs | 2 +- stackslib/src/cost_estimates/tests/common.rs | 1 + stackslib/src/cost_estimates/tests/mod.rs | 1 - stackslib/src/lib.rs | 4 +- stackslib/src/net/asn.rs | 2 +- stackslib/src/net/atlas/db.rs | 6 +-- stackslib/src/net/chat.rs | 2 +- stackslib/src/net/codec.rs | 2 +- stackslib/src/net/connection.rs | 3 +- stackslib/src/net/dns.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 12 ++--- stackslib/src/net/httpcore.rs | 22 ++++---- stackslib/src/net/inv/epoch2x.rs | 9 ++-- stackslib/src/net/mod.rs | 19 +++---- stackslib/src/net/neighbors/mod.rs | 20 +++---- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/poll.rs | 2 +- stackslib/src/net/prune.rs | 6 +-- stackslib/src/net/stackerdb/db.rs | 2 +- stackslib/src/net/stackerdb/mod.rs | 2 +- stackslib/src/net/stackerdb/tests/mod.rs | 1 - stackslib/src/net/stackerdb/tests/sync.rs | 1 + .../src/net/tests}/convergence.rs | 11 ++-- stackslib/src/net/tests/download/mod.rs | 1 - stackslib/src/net/tests/inv/mod.rs | 1 - stackslib/src/net/tests/mempool/mod.rs | 1 - stackslib/src/net/tests/mod.rs | 2 +- stackslib/src/net/tests/relay/mod.rs | 1 - stackslib/src/util_lib/bloom.rs | 2 +- stackslib/src/util_lib/boot.rs | 2 +- stackslib/src/util_lib/mod.rs | 2 +- stx-genesis/Cargo.toml | 3 -- testnet/stacks-node/Cargo.toml | 5 -- testnet/stacks-node/src/tests/mod.rs | 1 - testnet/stacks-node/src/tests/p2p/mod.rs | 18 ------- 80 files changed, 213 insertions(+), 280 deletions(-) rename {testnet/stacks-node/src/tests/p2p => stackslib/src/net/tests}/convergence.rs (99%) delete mode 100644 testnet/stacks-node/src/tests/p2p/mod.rs diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e411179a706..a7a483665ee 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -121,19 +121,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - - tests::p2p::convergence::test_walk_ring_allow_15 - - tests::p2p::convergence::test_walk_ring_15_plain - - tests::p2p::convergence::test_walk_ring_15_pingback - - tests::p2p::convergence::test_walk_ring_15_org_biased - - tests::p2p::convergence::test_walk_line_allowed_15 - - tests::p2p::convergence::test_walk_line_15_plain - - tests::p2p::convergence::test_walk_line_15_org_biased - - tests::p2p::convergence::test_walk_line_15_pingback - - tests::p2p::convergence::test_walk_star_allowed_15 - - tests::p2p::convergence::test_walk_star_15_plain - - tests::p2p::convergence::test_walk_star_15_pingback - - tests::p2p::convergence::test_walk_star_15_org_biased - - tests::p2p::convergence::test_walk_inbound_line_15 # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/Cargo.lock b/Cargo.lock index e56e4400b44..b9b45849d31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3420,9 +3420,6 @@ dependencies = [ "regex", "reqwest", "ring 0.16.20", - "rlimit", - "rstest 0.17.0", - "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -3431,7 +3428,6 @@ dependencies = [ "stacks-common", "stacks-signer", "stackslib", - "stdext", "stx-genesis", "tikv-jemallocator", "tiny_http", @@ -3507,6 +3503,7 @@ dependencies = [ "rand_core 0.6.4", "regex", "ripemd", + "rlimit", "rstest 0.17.0", "rstest_reuse 0.5.0", "rusqlite", diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index 909e2375021..d04fc3b1af3 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -59,10 +59,6 @@ siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } -rstest = { version = "0.17.0", optional = true } -rstest_reuse = { version = "0.5.0", optional = true } -stdext = { version = "0.3.1", optional = true } -stx-genesis = { path = "../stx-genesis", optional = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -99,12 +95,13 @@ features = ["std"] assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" -stx-genesis = { path = "../stx-genesis" } +stx-genesis = { path = "../stx-genesis"} clarity = { features = ["default", "testing"], path = "../clarity" } stacks-common = { features = ["default", "testing"], path = "../stacks-common" } rstest = "0.17.0" rstest_reuse = "0.5.0" mutants = "0.0.3" +rlimit = "0.10.2" [features] default = [] @@ -113,7 +110,7 @@ disable-costs = [] developer-mode = ["clarity/developer-mode"] monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks-common/slog_json", "clarity/slog_json", "pox-locking/slog_json"] -testing = ["stdext", "rstest", "rstest_reuse", "stx-genesis"] +testing = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 7c9083985bc..40cabd86d30 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -167,7 +167,7 @@ impl BitcoinIndexerConfig { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_default(spv_headers_path: String) -> BitcoinIndexerConfig { BitcoinIndexerConfig { peer_host: "127.0.0.1".to_string(), @@ -203,7 +203,7 @@ impl BitcoinIndexerRuntime { } impl BitcoinIndexer { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( config: BitcoinIndexerConfig, runtime: BitcoinIndexerRuntime, @@ -216,7 +216,7 @@ impl BitcoinIndexer { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_unit_test(working_dir: &str) -> BitcoinIndexer { let mut working_dir_path = PathBuf::from(working_dir); if fs::metadata(&working_dir_path).is_err() { @@ -861,7 +861,7 @@ impl BitcoinIndexer { Ok(new_tip) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn raw_store_header(&mut self, header: BurnchainBlockHeader) -> Result<(), btc_error> { let mut spv_client = SpvClient::new( &self.config.spv_headers_path, @@ -887,7 +887,7 @@ impl BitcoinIndexer { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn mock_bitcoin_header( parent_block_hash: &BurnchainHeaderHash, timestamp: u32, diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index b2b886bdc40..82cbb7b7f66 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -182,7 +182,7 @@ impl SpvClient { Ok(client) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_without_migration( headers_path: &str, start_block: u64, @@ -211,7 +211,7 @@ impl SpvClient { Ok(client) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn disable_check_txcount(&mut self) { self.check_txcount = false; } @@ -220,7 +220,7 @@ impl SpvClient { &self.headers_db } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn conn_mut(&mut self) -> &mut DBConn { &mut self.headers_db } @@ -277,7 +277,7 @@ impl SpvClient { .and_then(|_| Ok(())) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { SpvClient::db_migrate(conn) } @@ -925,7 +925,7 @@ impl SpvClient { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_write_block_headers( &mut self, height: u64, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 60f663c0de2..a5ecaa04588 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -623,7 +623,7 @@ impl Burnchain { ret } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn default_unittest( first_block_height: u64, first_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 2720d48e8ca..0bc68897cbd 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -57,7 +57,7 @@ pub mod burnchain; pub mod db; pub mod indexer; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub struct Txid(pub [u8; 32]); @@ -351,7 +351,7 @@ impl PoxConstants { _shadow: PhantomData, } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots PoxConstants::new( @@ -369,7 +369,7 @@ impl PoxConstants { ) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Create a PoX constants used in tests with 5-block cycles, /// 3-block prepare phases, a threshold of 3, rejection fraction of 25%, /// a participation threshold of 5% and no sunset or transition to pox-2 or beyond. @@ -821,7 +821,7 @@ impl From for Error { } impl BurnchainView { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_test_data(&mut self) { let oldest_height = if self.burn_stable_block_height < MAX_NEIGHBOR_BLOCK_DELAY { 0 diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 8b69449d746..f14243d049d 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -49,7 +49,7 @@ impl BurnchainDB { /// Get back all of the parsed burnchain operations for a given block. /// Used in testing to replay burnchain data. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_burnchain_block_ops( &self, block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index f1bc0613af4..31e29c0b26e 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod affirmation; pub mod burnchain; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 39647323299..942e6774bde 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5482,7 +5482,7 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_height, )?; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { let (block_consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); @@ -6566,7 +6566,7 @@ impl ChainstateDB for SortitionDB { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index d91f158c27a..ed01ae014b5 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -424,7 +424,7 @@ impl BurnSamplePoint { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests { use std::marker::PhantomData; diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index a1e5ee500a4..cea03d44353 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -90,7 +90,7 @@ pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; impl LeaderBlockCommitOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn initial( block_header_hash: &BlockHeaderHash, block_height: u64, @@ -131,10 +131,10 @@ impl LeaderBlockCommitOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( block_header_hash: &BlockHeaderHash, - _block_height: u64, + block_height: u64, new_seed: &VRFSeed, parent: &LeaderBlockCommitOp, key_block_ptr: u32, @@ -170,7 +170,7 @@ impl LeaderBlockCommitOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; let new_burn_parent_modulus = if height > 0 { diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 87b13d8f50e..44402adc0c8 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -44,7 +44,7 @@ pub struct ParsedData { } impl LeaderKeyRegisterOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new(public_key: &VRFPublicKey) -> LeaderKeyRegisterOp { LeaderKeyRegisterOp { public_key: public_key.clone(), @@ -59,10 +59,10 @@ impl LeaderKeyRegisterOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_from_secrets( - _num_sigs: u16, - _hash_mode: &AddressHashMode, + num_sigs: u16, + hash_mode: &AddressHashMode, prover_key: &VRFPrivateKey, ) -> Option { let prover_pubk = VRFPublicKey::from_private(prover_key); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index fd0d63ef59a..0843e03b1ec 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -47,7 +47,7 @@ pub mod stack_stx; pub mod transfer_stx; pub mod vote_for_aggregate_key; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test; /// This module contains all burn-chain operations @@ -439,7 +439,7 @@ impl BlockstackOperationType { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_block_height(&mut self, height: u64) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => data.block_height = height, @@ -456,7 +456,7 @@ impl BlockstackOperationType { }; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_burn_header_hash(&mut self, hash: BurnchainHeaderHash) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => { diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 52e4d6bf3bb..c4c54b97374 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -57,7 +57,7 @@ struct ParsedData { pub static OUTPUTS_PER_COMMIT: usize = 2; impl PreStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new(sender: &StacksAddress) -> PreStxOp { PreStxOp { output: sender.clone(), @@ -155,7 +155,7 @@ impl PreStxOp { } impl StackStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( sender: &StacksAddress, reward_addr: &PoxAddress, diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index a70075ff7c6..9d1d562d9cb 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -46,7 +46,7 @@ struct ParsedData { } impl TransferStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( sender: &StacksAddress, recipient: &StacksAddress, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 666fd976575..2849b749047 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -82,7 +82,7 @@ use crate::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; use crate::util_lib::db::{DBConn, DBTx, Error as DBError}; pub mod comm; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; /// The 3 different states for the current @@ -110,7 +110,7 @@ impl NewBurnchainBlockStatus { /// Test helper to convert this status into the optional hash of the missing PoX anchor block. /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn into_missing_block_hash(self) -> Option { match self { Self::Ready => None, @@ -624,7 +624,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader ChainsCoordinator<'a, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_new( burnchain: &Burnchain, chain_id: u32, @@ -644,7 +644,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader } /// Create a coordinator for testing allowing for all configurable params - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_new_full( burnchain: &Burnchain, chain_id: u32, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 73b4349c2b8..50127af1760 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cmp; use std::collections::{BTreeMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index aa8ac218911..cb1966d8060 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,7 +58,7 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; macro_rules! err_or_debug { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 902efc8a83f..6a2a484790c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet}; use std::sync::Mutex; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8bdc48c933f..cc8d6caaa2c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -122,7 +122,7 @@ pub mod signer_set; pub mod staging_blocks; pub mod tenure; pub mod test_signers; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub use self::staging_blocks::{ @@ -270,7 +270,7 @@ lazy_static! { ]; } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod fault_injection { static PROCESS_BLOCK_STALL: std::sync::Mutex = std::sync::Mutex::new(false); @@ -1756,7 +1756,7 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fault_injection::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 4f09fd1f573..81380cc93d0 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -484,7 +484,7 @@ impl NakamotoChainState { /// Drop a nakamoto tenure. /// Used for testing - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub(crate) fn delete_nakamoto_tenure( tx: &Connection, ch: &ConsensusHash, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index fa02a34a09b..722cfa541af 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::borrow::BorrowMut; use std::collections::HashMap; diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 650617ab495..04b74ba2e90 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -361,7 +361,7 @@ impl BurnStateDB for TestSimBurnStateDB { panic!("Not implemented in TestSim"); } - fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } @@ -525,7 +525,7 @@ impl BurnStateDB for TestSimBurnStateDB { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] impl HeadersDB for TestSimHeadersDB { fn get_burn_header_hash_for_block( &self, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 77a24b938f2..88ecc8887e2 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -626,7 +626,7 @@ impl StacksChainState { /// Determine the minimum amount of STX per reward address required to stack in the _next_ /// reward cycle - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_stacking_minimum( &mut self, sortdb: &SortitionDB, @@ -688,7 +688,7 @@ impl StacksChainState { } /// Determine how many uSTX are stacked in a given reward cycle - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_get_total_ustx_stacked( &mut self, sortdb: &SortitionDB, @@ -1379,20 +1379,19 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod contract_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_2_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_3_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_4_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod signers_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::collections::{HashMap, HashSet}; use std::fs; diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2ac7d0e6f58..7ae25d00f6f 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index b34b7eb6c75..3134b4773a7 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index affb4bcf7bf..0968cc4de3f 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index ba1a97556eb..bf3b5f312c6 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use clarity::vm::clarity::ClarityConnection; use clarity::vm::contexts::OwnedEnvironment; diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 8df89833013..47cace8c4b0 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -415,7 +415,7 @@ impl FromRow for StagingBlock { } impl StagingMicroblock { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_into_microblock(self) -> Result { StacksMicroblock::consensus_deserialize(&mut &self.block_data[..]).map_err(|_e| self) } @@ -660,7 +660,7 @@ impl StacksChainState { } /// Store an empty block to the chunk store, named by its hash. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn store_empty_block( blocks_path: &str, consensus_hash: &ConsensusHash, @@ -760,10 +760,10 @@ impl StacksChainState { } /// Get a list of all microblocks' hashes, and their anchored blocks' hashes - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn list_microblocks( blocks_conn: &DBConn, - _blocks_dir: &str, + blocks_dir: &str, ) -> Result)>, Error> { let mut blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; @@ -1025,7 +1025,7 @@ impl StacksChainState { .map_err(|e| Error::DBError(db_error::from(e))) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn load_staging_block_data( block_conn: &DBConn, blocks_path: &str, @@ -1493,7 +1493,7 @@ impl StacksChainState { /// Get an anchored block's parent block header. /// Doesn't matter if it's staging or not. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn load_parent_block_header( sort_ic: &SortitionDBConn, blocks_path: &str, @@ -2500,7 +2500,7 @@ impl StacksChainState { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn set_block_orphaned<'a>( tx: &mut DBTx<'a>, blocks_path: &str, @@ -2522,7 +2522,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; - let _orphaned_microblock_hashes = query_row_columns::( + let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, find_orphaned_microblocks_args, @@ -2801,7 +2801,7 @@ impl StacksChainState { /// Do we have any microblock available to serve in any capacity, given its parent anchored block's /// index block hash? - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn has_microblocks_indexed( &self, parent_index_block_hash: &StacksBlockId, @@ -2867,7 +2867,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2883,7 +2883,7 @@ impl StacksChainState { /// Load up the metadata on a microblock stream (but don't get the data itself) /// DO NOT USE IN PRODUCTION -- doesn't work for microblock forks. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn stream_microblock_get_info( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -3576,7 +3576,7 @@ impl StacksChainState { /// Given a burnchain snapshot, a Stacks block and a microblock stream, preprocess them all. /// This does not work when forking - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn preprocess_stacks_epoch( &mut self, sort_ic: &SortitionDBConn, @@ -6438,7 +6438,7 @@ impl StacksChainState { /// PoX aware (i.e., unit tests, and old stacks-node loops), /// Elsewhere, block processing is invoked by the ChainsCoordinator, /// which handles tracking the chain tip itself - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn process_blocks_at_tip( &mut self, burnchain_db_conn: &DBConn, @@ -6936,9 +6936,8 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::fs; use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 81e954a0908..a942ec7fd15 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -543,7 +543,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.seal() } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn commit_block(self) -> () { self.block.commit_block(); } @@ -2713,9 +2713,8 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::{env, fs}; use clarity::vm::test_util::TEST_BURN_STATE_DB; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 99e92aac320..35ba5326678 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1571,7 +1571,6 @@ impl StacksChainState { #[cfg(test)] pub mod test { - #![allow(unused)] use clarity::vm::clarity::TransactionConnection; use clarity::vm::contracts::Contract; use clarity::vm::representations::{ClarityName, ContractName}; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 53df16b7614..4123b1310aa 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -168,7 +168,7 @@ impl TrieFile { } /// Read a trie blob in its entirety from the blobs file - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_trie_blob(&mut self, db: &Connection, block_id: u32) -> Result, Error> { let (offset, length) = trie_sql::get_external_trie_offset_length(db, block_id)?; self.seek(SeekFrom::Start(offset))?; @@ -410,7 +410,7 @@ impl TrieFile { } /// Obtain a TrieHash for a node, given the node's block's hash (used only in testing) - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_node_hash_bytes_by_bhh( &mut self, db: &Connection, @@ -424,7 +424,7 @@ impl TrieFile { } /// Get all (root hash, trie hash) pairs for this TrieFile - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_all_block_hashes_and_roots( &mut self, db: &Connection, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 73d387c07b5..d5dd77c51f8 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1494,12 +1494,12 @@ impl MARF { } /// Access internal storage - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn borrow_storage_backend(&mut self) -> TrieStorageConnection { self.storage.connection() } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn borrow_storage_transaction(&mut self) -> TrieStorageTransaction { self.storage.transaction().unwrap() } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 9397145fcbd..6994c7ad053 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -364,7 +364,7 @@ impl UncommittedState { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn print_to_stderr(&self) { self.trie_ram_ref().print_to_stderr() } @@ -535,7 +535,7 @@ impl TrieRAM { result } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn stats(&mut self) -> (u64, u64) { let r = self.read_count; @@ -545,7 +545,7 @@ impl TrieRAM { (r, w) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn node_stats(&mut self) -> (u64, u64, u64) { let nr = self.read_node_count; @@ -559,7 +559,7 @@ impl TrieRAM { (nr, br, nw) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn leaf_stats(&mut self) -> (u64, u64) { let lr = self.read_leaf_count; @@ -677,7 +677,7 @@ impl TrieRAM { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_inner_seal( &mut self, storage_tx: &mut TrieStorageTransaction, @@ -1113,14 +1113,14 @@ impl TrieRAM { Ok(self.data.len() as u32) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn print_to_stderr(&self) { for dat in self.data.iter() { eprintln!("{}: {:?}", &dat.1, &dat.0); } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn data(&self) -> &Vec<(TrieNodeType, TrieHash)> { &self.data } @@ -2035,7 +2035,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Read the Trie root node's hash from the block table. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_block_root_hash(&mut self, bhh: &T) -> Result { let root_hash_ptr = TriePtr::new( TrieNodeID::Node256 as u8, @@ -2051,7 +2051,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn inner_read_persisted_root_to_blocks(&mut self) -> Result, Error> { let ret = match self.blobs.as_mut() { Some(blobs) => { @@ -2065,7 +2065,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Generate a mapping between Trie root hashes and the blocks that contain them - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_root_to_block_table(&mut self) -> Result, Error> { let mut ret = self.inner_read_persisted_root_to_blocks()?; let uncommitted_writes = match self.data.uncommitted_writes.take() { @@ -2738,12 +2738,12 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { self.bench.reset(); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn transient_data(&self) -> &TrieStorageTransientData { &self.data } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn transient_data_mut(&mut self) -> &mut TrieStorageTransientData { &mut self.data } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 1d54cce0d0b..c9d3b40dcef 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -422,7 +422,7 @@ pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Resul Ok(blob) } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn read_all_block_hashes_and_roots( conn: &Connection, ) -> Result, Error> { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index cb4709d123f..0195385d3b0 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1603,7 +1603,7 @@ impl StacksBlockBuilder { /// Append a transaction if doing so won't exceed the epoch data size. /// Does not check for errors - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, @@ -1626,7 +1626,7 @@ impl StacksBlockBuilder { if !self.anchored_done { // save match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, _receipt)) => { + Ok((fee, receipt)) => { self.total_anchored_fees += fee; } Err(e) => { @@ -1637,7 +1637,7 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, _receipt)) => { + Ok((fee, receipt)) => { self.total_streamed_fees += fee; } Err(e) => { @@ -2003,7 +2003,7 @@ impl StacksBlockBuilder { } /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, @@ -2022,7 +2022,7 @@ impl StacksBlockBuilder { /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index ed9cf98e84c..35c82f9b94e 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -69,7 +69,7 @@ pub mod index; pub mod miner; pub mod transaction; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub use stacks_common::address::{ @@ -1131,9 +1131,8 @@ pub const MAX_EPOCH_SIZE: u32 = 2 * 1024 * 1024; // $MAX_EPOCH_SIZE bytes (so the average microblock size needs to be 4kb if there are 256 of them) pub const MAX_MICROBLOCK_SIZE: u32 = 65536; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 87601268c09..cda74cb46d1 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index eb38daf68e6..c89679f4145 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -176,7 +176,7 @@ macro_rules! using { } impl<'a, 'b> ClarityBlockConnection<'a, 'b> { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_test_conn( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, @@ -731,7 +731,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// Commits all changes in the current block by /// (1) committing the current MARF tip to storage, /// (2) committing side-storage. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn commit_block(self) -> LimitedCostTracker { debug!("Commit Clarity datastore"); self.datastore.test_commit(); @@ -1591,7 +1591,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { self.datastore } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_epoch(&mut self, epoch_id: StacksEpochId) { self.epoch = epoch_id; } @@ -1856,7 +1856,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } /// Evaluate a raw Clarity snippit - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn clarity_eval_raw(&mut self, code: &str) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_raw(code).map_err(Error::from), @@ -1865,7 +1865,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { Ok(result) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn eval_read_only( &mut self, contract: &QualifiedContractIdentifier, @@ -1879,7 +1879,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests { use std::fs; diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index eaec528c178..fed0e70e95c 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -257,7 +257,7 @@ impl MarfedKV { &mut self.marf } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn sql_conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -526,7 +526,7 @@ impl<'a> WritableMarfStore<'a> { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_commit(self) { let bhh = self.chain_tip.clone(); self.commit_to(&bhh).unwrap(); diff --git a/stackslib/src/clarity_vm/mod.rs b/stackslib/src/clarity_vm/mod.rs index 4e1688da117..a3e6d23b8cc 100644 --- a/stackslib/src/clarity_vm/mod.rs +++ b/stackslib/src/clarity_vm/mod.rs @@ -6,5 +6,5 @@ pub mod special; /// Stacks blockchain specific Clarity database implementations and wrappers pub mod database; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests; diff --git a/stackslib/src/clarity_vm/tests/mod.rs b/stackslib/src/clarity_vm/tests/mod.rs index 1cc597b3d13..5855d61f318 100644 --- a/stackslib/src/clarity_vm/tests/mod.rs +++ b/stackslib/src/clarity_vm/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod analysis_costs; pub mod ast; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 73d1fc1c942..fe75d62bd2c 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1025,7 +1025,7 @@ impl NonceCache { where C: ClarityConnection, { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] assert!(self.cache.len() <= self.max_cache_size); // Check in-memory cache @@ -1111,7 +1111,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d query_row(conn, sql, params![addr_str]) } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; @@ -1162,7 +1162,7 @@ impl CandidateCache { self.next.push_back(tx); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] assert!(self.cache.len() + self.next.len() <= self.max_cache_size); } @@ -1177,7 +1177,7 @@ impl CandidateCache { self.next.append(&mut self.cache); self.cache = std::mem::take(&mut self.next); - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { assert!(self.cache.len() <= self.max_cache_size + 1); assert!(self.next.len() <= self.max_cache_size + 1); @@ -1365,7 +1365,7 @@ impl MemPoolDB { .map(String::from) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn open_test( mainnet: bool, chain_id: u32, @@ -1934,7 +1934,7 @@ impl MemPoolDB { } /// Get all transactions across all tips - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_all_txs(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM mempool"; let rows = query_rows::(conn, &sql, NO_PARAMS)?; @@ -1942,7 +1942,7 @@ impl MemPoolDB { } /// Get all transactions at a specific block - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_num_tx_at_block( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -1955,7 +1955,7 @@ impl MemPoolDB { } /// Get a number of transactions after a given timestamp on a given chain tip. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -2283,7 +2283,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn clear_before_coinbase_height( &mut self, min_coinbase_height: u64, @@ -2666,7 +2666,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn dump_txs(&self) { let sql = "SELECT * FROM mempool"; let txs: Vec = query_rows(&self.db, sql, NO_PARAMS).unwrap(); diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 9a3d67e7521..ade8a825899 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,7 +30,7 @@ use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; use std::cmp::Ordering; @@ -68,9 +68,9 @@ pub const GENESIS_EPOCH: StacksEpochId = StacksEpochId::Epoch20; /// The number of blocks which will share the block bonus /// from burn blocks that occurred without a sortition. /// (See: https://forum.stacks.org/t/pox-consensus-and-stx-future-supply) -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10_000; pub const STACKS_2_0_LAST_BLOCK_TO_PROCESS: u64 = 700_000; @@ -557,29 +557,29 @@ fn test_ord_for_stacks_epoch_id() { ); } pub trait StacksEpochExtension { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -615,7 +615,7 @@ impl StacksEpochExtension for StacksEpoch { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -640,7 +640,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -678,7 +678,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -716,7 +716,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -767,7 +767,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_2(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -831,7 +831,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_3(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_3 first_burn_height = {}", @@ -908,7 +908,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_4(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_4 first_burn_height = {}", @@ -998,7 +998,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_5(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_5 first_burn_height = {}", @@ -1101,7 +1101,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_3_0 first_burn_height = {}", @@ -1217,7 +1217,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1268,7 +1268,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1342,7 +1342,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 => { diff --git a/stackslib/src/cost_estimates/mod.rs b/stackslib/src/cost_estimates/mod.rs index 0992aa180a6..fc4aa5b1b2e 100644 --- a/stackslib/src/cost_estimates/mod.rs +++ b/stackslib/src/cost_estimates/mod.rs @@ -20,7 +20,7 @@ pub mod fee_scalar; pub mod metrics; pub mod pessimistic; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; use self::metrics::CostMetric; diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 9ecfee27746..fe6527ff53e 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -15,6 +15,7 @@ use crate::chainstate::stacks::{ use crate::core::StacksEpochId; /// Make a block receipt from `tx_receipts` with some dummy values filled for test. +#[cfg(test)] pub fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { StacksEpochReceipt { header: StacksHeaderInfo { diff --git a/stackslib/src/cost_estimates/tests/mod.rs b/stackslib/src/cost_estimates/tests/mod.rs index e9292447bf4..792ecb778e7 100644 --- a/stackslib/src/cost_estimates/tests/mod.rs +++ b/stackslib/src/cost_estimates/tests/mod.rs @@ -1,4 +1,3 @@ -#![allow(unused)] use crate::cost_estimates::FeeRateEstimate; pub mod common; diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 8a6919412a0..31f97628a6e 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -27,11 +27,11 @@ extern crate slog; #[macro_use] extern crate serde_derive; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] #[macro_use] extern crate rstest; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] #[macro_use] extern crate rstest_reuse; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index bb31146c819..f38c6c54d4d 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -222,7 +222,7 @@ impl ASEntry4 { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::io; use std::io::BufRead; diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 37ed22a26b1..d6bdbb301eb 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -374,7 +374,7 @@ impl AtlasDB { } // Open an atlas database in memory (used for testing) - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn connect_memory(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; let mut db = AtlasDB { @@ -387,7 +387,7 @@ impl AtlasDB { Ok(db) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Only ever to be used in testing, open and instantiate a V1 atlasdb pub fn connect_memory_db_v1(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory()?; @@ -432,7 +432,7 @@ impl AtlasDB { Ok(db) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Only ever to be used in testing, connect to db, but using existing sqlconn pub fn connect_with_sqlconn( atlas_config: AtlasConfig, diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5949db0bbf6..99b07a60559 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3058,7 +3058,7 @@ impl ConversationP2P { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { #![allow(unused)] use std::fs; diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index 4cb4099fb49..bd8154e414b 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1600,7 +1600,7 @@ impl ProtocolFamily for StacksP2P { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use stacks_common::bitvec::BitVec; use stacks_common::codec::NEIGHBOR_ADDRESS_ENCODED_SIZE; diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index db50c46333d..c360d7a548b 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -1504,9 +1504,8 @@ pub type ReplyHandleP2P = NetworkReplyHandle; pub type ConnectionHttp = NetworkConnection; pub type ReplyHandleHttp = NetworkReplyHandle; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { - #![allow(unused)] use std::io::prelude::*; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index c63d1b4fedf..aedb73bd626 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -355,7 +355,7 @@ impl DNSClient { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::collections::HashMap; use std::error::Error; diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 5c926c41923..c57d9d19bc8 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -56,22 +56,22 @@ use crate::net::{ }; use crate::util_lib::db::{DBConn, Error as db_error}; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 180; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 0; /// If a URL never connects, don't use it again for this many seconds -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 300; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 60; /// If we created a request to download a block or microblock, don't do so again until this many /// seconds have passed. -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 60; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_REREQUEST_INTERVAL: u64 = 30; /// This module is responsible for downloading blocks and microblocks from other peers, using block diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index fc296b9f2bf..804add6f331 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -547,14 +547,14 @@ impl StacksHttpRequest { (self.preamble, self.contents) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; Ok(ret) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_response_handler_index(&self) -> Option { self.response_handler_index } @@ -676,7 +676,7 @@ impl StacksHttpResponse { self.preamble.headers.clear(); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; @@ -700,7 +700,7 @@ pub enum StacksHttpPreamble { } impl StacksHttpPreamble { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn expect_request(self) -> HttpRequestPreamble { match self { Self::Request(x) => x, @@ -708,7 +708,7 @@ impl StacksHttpPreamble { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn expect_response(self) -> HttpResponsePreamble { match self { Self::Response(x) => x, @@ -1004,7 +1004,7 @@ impl StacksHttp { } /// Force the state machine to expect a response - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { let handler_index = self .find_response_handler(request_verb, request_path) @@ -1016,7 +1016,7 @@ impl StacksHttp { } /// Try to parse an inbound HTTP request using a given handler, preamble, and body - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn handle_try_parse_request( &self, handler: &mut dyn RPCRequestHandler, @@ -1202,7 +1202,7 @@ impl StacksHttp { Ok((response_preamble, response_contents)) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn num_pending(&self) -> usize { self.reply.as_ref().map(|_| 1).unwrap_or(0) } @@ -1346,10 +1346,10 @@ impl StacksHttp { } /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn parse_response( - _verb: &str, - _request_path: &str, + verb: &str, + request_path: &str, response_buf: &[u8], ) -> Result { let mut http = StacksHttp::new( diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 7068db7accc..fc5f073b2e9 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -47,9 +47,9 @@ use crate::net::{ use crate::util_lib::db::{DBConn, Error as db_error}; /// This module is responsible for synchronizing block inventories with other peers -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const INV_SYNC_INTERVAL: u64 = 150; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const INV_SYNC_INTERVAL: u64 = 3; pub const INV_REWARD_CYCLES: u64 = 2; @@ -1143,7 +1143,7 @@ impl InvState { self.block_stats.get_mut(nk) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) -> () { self.block_stats.insert( nk.clone(), @@ -2848,3 +2848,6 @@ impl PeerNetwork { work_state } } + +#[cfg(test)] +mod test {} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 8b36377a251..7f8dea93291 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -143,7 +143,7 @@ pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; #[derive(Debug)] @@ -571,7 +571,7 @@ impl From for Error { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] impl PartialEq for Error { /// (make I/O errors comparable for testing purposes) fn eq(&self, other: &Self) -> bool { @@ -1293,9 +1293,9 @@ pub const MAX_BROADCAST_INBOUND_RECEIVERS: usize = 16; pub const BLOCKS_AVAILABLE_MAX_LEN: u32 = 32; // maximum number of PoX reward cycles we can ask about -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const GETPOXINV_MAX_BITLEN: u64 = 4096; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const GETPOXINV_MAX_BITLEN: u64 = 8; // maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). @@ -1455,9 +1455,9 @@ pub const MAX_MICROBLOCKS_UNCONFIRMED: usize = 1024; pub const MAX_HEADERS: usize = 2100; // how long a peer will be denied for if it misbehaves -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const DENY_BAN_DURATION: u64 = 30; // seconds -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; @@ -1719,9 +1719,8 @@ pub trait Requestable: std::fmt::Display { fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest; } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::collections::HashMap; use std::io::{Cursor, ErrorKind, Read, Write}; use std::net::*; @@ -3920,10 +3919,6 @@ pub mod test { self.network.peerdb.conn() } - pub fn peerdb_mut(&mut self) -> &mut PeerDB { - &mut self.network.peerdb - } - pub fn get_burnchain_view(&mut self) -> Result { let sortdb = self.sortdb.take().unwrap(); let view_res = { diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 28355d0e1a0..efe368efa1e 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -79,31 +79,31 @@ pub const WALK_STATE_TIMEOUT: u64 = 60; /// Total number of seconds for which a particular walk can exist. It will be reset if it exceeds /// this age. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const WALK_RESET_INTERVAL: u64 = 60; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const WALK_RESET_INTERVAL: u64 = 600; /// How often the node will consider pruning neighbors from its neighbor set. The node will prune /// neighbors from over-represented hosts and IP ranges in order to maintain connections to a /// diverse set of neighbors. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const PRUNE_FREQUENCY: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const PRUNE_FREQUENCY: u64 = 43200; /// Not all neighbors discovered will have an up-to-date chain tip. This value is the highest /// discrepancy between the local burnchain block height and the remote node's burnchain block /// height for which the neighbor will be considered as a worthwhile peer to remember. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 25; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 288; /// How often to kick off neighbor walks. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const NEIGHBOR_WALK_INTERVAL: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 120; // seconds /// Probability that we begin an always-allowed peer walk if we're either in IBD or if we're not @@ -367,7 +367,7 @@ impl PeerNetwork { return true; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn print_walk_diagnostics(&mut self) { let (mut inbound, mut outbound) = self.dump_peer_table(); @@ -397,7 +397,7 @@ impl PeerNetwork { debug!("{:?}: Walk finished ===================", &self.local_peer); } - #[cfg(not(any(test, feature = "testing")))] + #[cfg(not(test))] fn print_walk_diagnostics(&self) {} /// Update the state of our peer graph walk. diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 3796a6c5f2d..45183cdf1b7 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5168,7 +5168,7 @@ impl PeerNetwork { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::{thread, time}; diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index ed24bc1168f..bdda12e6d42 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -481,7 +481,7 @@ impl NetworkState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::collections::HashSet; diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 87b16d7bbab..c33b7fea769 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -82,7 +82,7 @@ impl PeerNetwork { }; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { test_debug!( "==== ORG NEIGHBOR DISTRIBUTION OF {:?} ===", @@ -376,7 +376,7 @@ impl PeerNetwork { } /// Dump our peer table - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn dump_peer_table(&mut self) -> (Vec, Vec) { let mut inbound: Vec = vec![]; let mut outbound: Vec = vec![]; @@ -447,7 +447,7 @@ impl PeerNetwork { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { let (mut inbound, mut outbound) = self.dump_peer_table(); diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index c06e495514e..2b735668ac4 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -515,7 +515,7 @@ impl StackerDBs { Self::instantiate(path, readwrite) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn connect_memory() -> StackerDBs { Self::instantiate(":memory:", true).unwrap() } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index e971d9ebfc7..40fbc7711a0 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -111,7 +111,7 @@ /// state periodically (whereas Gaia stores data for as long as the back-end storage provider's SLA /// indicates). -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub mod config; diff --git a/stackslib/src/net/stackerdb/tests/mod.rs b/stackslib/src/net/stackerdb/tests/mod.rs index 17c73daa045..0838342100d 100644 --- a/stackslib/src/net/stackerdb/tests/mod.rs +++ b/stackslib/src/net/stackerdb/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod config; pub mod db; diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index b16b10291ff..f45e3acb93e 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -51,6 +51,7 @@ const NUM_NEIGHBORS: usize = 8; /// Some testable configurations for stacker DB configs impl StackerDBConfig { + #[cfg(test)] pub fn template() -> StackerDBConfig { StackerDBConfig { chunk_size: CHUNK_SIZE, diff --git a/testnet/stacks-node/src/tests/p2p/convergence.rs b/stackslib/src/net/tests/convergence.rs similarity index 99% rename from testnet/stacks-node/src/tests/p2p/convergence.rs rename to stackslib/src/net/tests/convergence.rs index 8c273e43ce2..8494f4ea46e 100644 --- a/testnet/stacks-node/src/tests/p2p/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -22,11 +22,12 @@ use clarity::vm::types::{QualifiedContractIdentifier, StandardPrincipalData}; use rand::prelude::*; use rand::thread_rng; use rlimit; -use stacks::core::PEER_VERSION_TESTNET; -use stacks::net::db::*; -use stacks::net::test::*; -use stacks::net::*; -use stacks::util_lib::test::*; + +use crate::core::PEER_VERSION_TESTNET; +use crate::net::db::*; +use crate::net::test::*; +use crate::net::*; +use crate::util_lib::test::*; fn setup_rlimit_nofiles() { info!("Attempt to set nofile rlimit to 4096 (required for these tests to run)"); diff --git a/stackslib/src/net/tests/download/mod.rs b/stackslib/src/net/tests/download/mod.rs index 5b191a1161c..430b92e4144 100644 --- a/stackslib/src/net/tests/download/mod.rs +++ b/stackslib/src/net/tests/download/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/inv/mod.rs b/stackslib/src/net/tests/inv/mod.rs index 04386e20970..04e8e0fd4fd 100644 --- a/stackslib/src/net/tests/inv/mod.rs +++ b/stackslib/src/net/tests/inv/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 602f943cb30..7a44a56788d 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cell::RefCell; use std::{thread, time}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index ebb7158b1ce..a74cb0fd2cc 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -13,8 +13,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] +pub mod convergence; pub mod download; pub mod httpcore; pub mod inv; diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index d75bae21e8f..c408e9ee60f 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -12,7 +12,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d37802150fc..d1632f0b14f 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -592,7 +592,7 @@ impl BloomHash for BloomNodeHasher { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use std::fs; diff --git a/stackslib/src/util_lib/boot.rs b/stackslib/src/util_lib/boot.rs index 2585fe1b753..95cfca9c412 100644 --- a/stackslib/src/util_lib/boot.rs +++ b/stackslib/src/util_lib/boot.rs @@ -43,7 +43,7 @@ pub fn boot_code_acc(boot_code_address: StacksAddress, boot_code_nonce: u64) -> } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn boot_code_test_addr() -> StacksAddress { boot_code_addr(false) } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 44a2772c001..83a7ab2a25b 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -5,7 +5,7 @@ pub mod boot; pub mod signed_structured_data; pub mod strings; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use std::sync::mpsc::sync_channel; use std::{panic, process, thread}; diff --git a/stx-genesis/Cargo.toml b/stx-genesis/Cargo.toml index 6914ca14a5e..39e97465ced 100644 --- a/stx-genesis/Cargo.toml +++ b/stx-genesis/Cargo.toml @@ -15,6 +15,3 @@ path = "src/lib.rs" [build-dependencies] libflate = "1.0.3" sha2 = { version = "0.10" } - -[features] -testing = [] diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index fb05aa0355f..19165db0a82 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -43,20 +43,15 @@ warp = "0.3.5" tokio = "1.15" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} -rstest = "0.17.0" -rstest_reuse = "0.5.0" stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } stacks-signer = { path = "../../stacks-signer", features = ["testing"] } -stx-genesis = { path = "../../stx-genesis", features = ["testing"] } -stdext = "0.3.1" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" -rlimit = "0.10.2" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 0ac8e151a90..a7892b9a2db 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -62,7 +62,6 @@ mod integrations; mod mempool; pub mod nakamoto_integrations; pub mod neon_integrations; -pub mod p2p; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/p2p/mod.rs b/testnet/stacks-node/src/tests/p2p/mod.rs deleted file mode 100644 index c2a61de8ac8..00000000000 --- a/testnet/stacks-node/src/tests/p2p/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/// Integration tests that verify that sets of nodes in various initial topologies will, over time, -/// learn about every other node in the network -pub mod convergence; From e5b9a731e18d070b3142bcabcb4d1cc9f338549d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 14:26:39 -0500 Subject: [PATCH 589/910] chore: last cleanup from p2p::conv test movement --- stackslib/src/net/chat.rs | 3 +-- stackslib/src/net/neighbors/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 99b07a60559..6cdf0b7e494 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -510,7 +510,7 @@ impl Neighbor { } }; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer @@ -3060,7 +3060,6 @@ impl ConversationP2P { #[cfg(test)] mod test { - #![allow(unused)] use std::fs; use std::io::prelude::*; use std::io::{Read, Write}; diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index efe368efa1e..450dc04463d 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -42,9 +42,9 @@ pub use db::{NeighborReplacements, NeighborWalkDB, PeerDBNeighborWalk}; pub use walk::{NeighborPingback, NeighborWalk, NeighborWalkResult}; /// How often we can contact other neighbors, at a minimim -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 600; /// Default number of seconds to wait for a reply from a neighbor From 23482ebd1a5d4399d92aff5c529dd117ae75a25e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:40:40 -0700 Subject: [PATCH 590/910] Add new workflow for p2p convergence tests --- .github/workflows/ci.yml | 23 +++++++ .github/workflows/p2p-tests.yml | 87 ++++++++++++++++++++++++++ .github/workflows/standalone-tests.yml | 18 ++++++ 3 files changed, 128 insertions(+) create mode 100644 .github/workflows/p2p-tests.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1ae6522663..1c59f23e8db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,6 +186,29 @@ jobs: - check-release uses: ./.github/workflows/bitcoin-tests.yml + + p2p-tests: + if: | + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' + ) + ) + name: P2P Tests + needs: + - rustfmt + - create-cache + - check-release + uses: ./.github/workflows/p2p-tests.yml + ## Test to run on a tagged release ## ## Runs when: diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml new file mode 100644 index 00000000000..a8346e2948f --- /dev/null +++ b/.github/workflows/p2p-tests.yml @@ -0,0 +1,87 @@ +## Github workflow to run p2p tests + +name: Tests::P2P + +on: + workflow_call: + +## env vars are transferred to composite action steps +env: + BITCOIND_TEST: 0 + RUST_BACKTRACE: full + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 15 + TEST_TIMEOUT: 30 + +concurrency: + group: stackslib-tests-${{ github.head_ref || github.ref || github.run_id}} + ## Only cancel in progress if this is for a PR + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + # p2p integration tests with code coverage + integration-tests: + name: Integration Tests + runs-on: ubuntu-latest + strategy: + ## Continue with the test matrix even if we've had a failure + fail-fast: false + ## Run a maximum of 32 concurrent tests from the test matrix + max-parallel: 32 + matrix: + test-name: + - net::tests::convergence::test_walk_ring_allow_15 + - net::tests::convergence::test_walk_ring_15_plain + - net::tests::convergence::test_walk_ring_15_pingback + - net::tests::convergence::test_walk_ring_15_org_biased + - net::tests::convergence::test_walk_line_allowed_15 + - net::tests::convergence::test_walk_line_15_plain + - net::tests::convergence::test_walk_line_15_org_biased + - net::tests::convergence::test_walk_line_15_pingback + - net::tests::convergence::test_walk_star_allowed_15 + - net::tests::convergence::test_walk_star_15_plain + - net::tests::convergence::test_walk_star_15_pingback + - net::tests::convergence::test_walk_star_15_org_biased + - net::tests::convergence::test_walk_inbound_line_15 + steps: + ## Setup test environment + - name: Setup Test Environment + id: setup_tests + uses: stacks-network/actions/stacks-core/testenv@main + with: + btc-version: "25.0" + + ## Increase open file descriptors limit + - name: Increase Open File Descriptors + run: | + sudo prlimit --nofile=4096:4096 + + ## Run test matrix using restored cache of archive file + ## - Test will timeout after env.TEST_TIMEOUT minutes + - name: Run Tests + id: run_tests + timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} + uses: stacks-network/actions/stacks-core/run-tests@main + with: + test-name: ${{ matrix.test-name }} + threads: 1 + + ## Create and upload code coverage file + - name: Code Coverage + id: codecov + uses: stacks-network/actions/codecov@main + with: + test-name: ${{ matrix.test-name }} + + check-tests: + name: Check Tests + runs-on: ubuntu-latest + if: always() + needs: + - integration-tests + steps: + - name: Check Tests Status + id: check_tests_status + uses: stacks-network/actions/check-jobs-status@main + with: + jobs: ${{ toJson(needs) }} + summary_print: "true" diff --git a/.github/workflows/standalone-tests.yml b/.github/workflows/standalone-tests.yml index e0fe2d345b4..8a56acc3ec6 100644 --- a/.github/workflows/standalone-tests.yml +++ b/.github/workflows/standalone-tests.yml @@ -21,6 +21,7 @@ on: - Atlas Tests - Bitcoin Tests - Epoch Tests + - P2P Tests - Slow Tests - Stacks-Core Tests - SBTC Tests @@ -69,6 +70,23 @@ jobs: - create-cache uses: ./.github/workflows/bitcoin-tests.yml + ## Runs when: + ## either or of the following: + ## - workflow is 'Release Tests' + ## - workflow is 'CI Tests' + ## - workflow is 'P2P Tests' + p2p-tests: + if: | + ( + inputs.workflow == 'Release Tests' || + inputs.workflow == 'CI Tests' || + inputs.workflow == 'P2P Tests' + ) + name: P2P Tests + needs: + - create-cache + uses: ./.github/workflows/p2p-tests.yml + ##################################################### ## Runs when: ## either or of the following: From 2d3dc50f670cdc312546e435cf0fd8bda178c8a7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 16 Sep 2024 15:44:14 -0400 Subject: [PATCH 591/910] fix: correct logic handling submit_operation errors --- testnet/stacks-node/src/neon_node.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b7e2843ece2..4ba1e691115 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2753,19 +2753,21 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); - self.failed_to_submit_last_attempt = match res { - Ok(_) => false, - Err(BurnchainControllerError::IdenticalOperation) => { - info!("Relayer: Block-commit already submitted"); - true - } + match res { + Ok(_) => self.failed_to_submit_last_attempt = false, Err(_) if mock_mining => { debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - true + self.failed_to_submit_last_attempt = true; + } + Err(BurnchainControllerError::IdenticalOperation) => { + info!("Relayer: Block-commit already submitted"); + self.failed_to_submit_last_attempt = true; + return None; } Err(e) => { warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); - true + self.failed_to_submit_last_attempt = true; + return None; } }; From edc73180f786baa2f4fc87e240193c0ffbcdb7d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:18:53 -0400 Subject: [PATCH 592/910] feat: report peer age in /v2/neighbors --- stackslib/src/net/api/getneighbors.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 9e7d0402daf..06f01e6e85a 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -54,6 +54,8 @@ pub struct RPCNeighbor { #[serde(skip_serializing_if = "Option::is_none")] #[serde(with = "serde_opt_vec_qci")] pub stackerdbs: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub age: Option, } /// Serialize and deserialize `Option>` @@ -95,6 +97,7 @@ impl RPCNeighbor { pkh: Hash160, auth: bool, stackerdbs: Vec, + age: Option, ) -> RPCNeighbor { RPCNeighbor { network_id: nk.network_id, @@ -104,6 +107,7 @@ impl RPCNeighbor { public_key_hash: pkh, authenticated: auth, stackerdbs: Some(stackerdbs), + age, } } } @@ -138,6 +142,7 @@ impl RPCNeighborsInfo { Hash160::from_node_public_key(&n.public_key), true, stackerdb_contract_ids, + None, ) }) .collect(); @@ -164,6 +169,7 @@ impl RPCNeighborsInfo { Hash160::from_node_public_key(&n.public_key), true, stackerdb_contract_ids, + None, ) }) .collect(); @@ -185,6 +191,7 @@ impl RPCNeighborsInfo { naddr.public_key_hash, convo.is_authenticated(), convo.get_stackerdb_contract_ids().to_vec(), + Some(convo.age()), )); } else { inbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( @@ -192,6 +199,7 @@ impl RPCNeighborsInfo { naddr.public_key_hash, convo.is_authenticated(), convo.get_stackerdb_contract_ids().to_vec(), + Some(convo.age()), )); } } From 95301f32b6c5fa7d5010edc0316c9a5ccb823b1b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:06 -0400 Subject: [PATCH 593/910] feat: compute peer age for p2p convo --- stackslib/src/net/chat.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 6cdf0b7e494..ba0b70b1a5a 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -609,6 +609,10 @@ impl ConversationP2P { } } + pub fn age(&self) -> u64 { + get_epoch_time_secs().saturating_sub(self.instantiated) + } + pub fn set_public_key(&mut self, pubkey_opt: Option) -> () { self.connection.set_public_key(pubkey_opt); } From 6af50f51f471c17fe97dca898a7778cebb4dce2b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:35 -0400 Subject: [PATCH 594/910] fix: pin connections to peers we're inv-syncing with, so they don't get pruned --- stackslib/src/net/inv/epoch2x.rs | 52 +++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index fc5f073b2e9..bbdd8f68aeb 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -973,6 +973,9 @@ pub struct InvState { /// What's the last reward cycle we _started_ the inv scan at? pub block_sortition_start: u64, + + /// event IDs of connections we established, so they don't get pruned + pinned: HashSet, } impl InvState { @@ -994,11 +997,13 @@ impl InvState { num_inv_syncs: 0, block_sortition_start: 0, + pinned: HashSet::new(), } } fn reset_sync_peers( &mut self, + network: &PeerNetwork, peers: HashSet, bootstrap_peers: &HashSet, max_neighbors: usize, @@ -1042,6 +1047,24 @@ impl InvState { added, &peers ); + + // if we're still connected to these peers, then keep them pinned + self.pinned.clear(); + for peer in peers.iter() { + if let Some(event_id) = network.get_event_id(&peer) { + self.pinned.insert(event_id); + } + } + } + + /// Pin a connection + pub fn pin_connection(&mut self, event_id: usize) { + self.pinned.insert(event_id); + } + + /// Get the set of connections this state machine is using + pub fn get_pinned_connections(&self) -> &HashSet { + &self.pinned } pub fn get_peer_status(&self, nk: &NeighborKey) -> NodeStatus { @@ -1801,6 +1824,7 @@ impl PeerNetwork { /// Start requesting the next batch of PoX inventories fn inv_getpoxinv_begin( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -1821,6 +1845,8 @@ impl PeerNetwork { }; let payload = StacksMessageType::GetPoxInv(getpoxinv); + let event_id_opt = self.get_event_id(&nk); + let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) @@ -1830,6 +1856,10 @@ impl PeerNetwork { })?; stats.getpoxinv_begin(request, target_pox_reward_cycle); + if let Some(event_id) = event_id_opt { + pins.insert(event_id); + } + Ok(()) } @@ -1988,6 +2018,7 @@ impl PeerNetwork { /// Start requesting the next batch of block inventories fn inv_getblocksinv_begin( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -2008,6 +2039,7 @@ impl PeerNetwork { let num_blocks_expected = getblocksinv.num_blocks; let payload = StacksMessageType::GetBlocksInv(getblocksinv); + let event_id_opt = self.get_event_id(nk); let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) @@ -2017,6 +2049,9 @@ impl PeerNetwork { })?; stats.getblocksinv_begin(request, target_block_reward_cycle, num_blocks_expected); + if let Some(event_id) = event_id_opt { + pins.insert(event_id); + } Ok(()) } @@ -2114,6 +2149,7 @@ impl PeerNetwork { /// Run a single state-machine to completion fn inv_sync_run( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -2130,13 +2166,13 @@ impl PeerNetwork { debug!("Inv sync state is {:?}", &stats.state); let again = match stats.state { InvWorkState::GetPoxInvBegin => self - .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout) + .inv_getpoxinv_begin(pins, sortdb, nk, stats, request_timeout) .and_then(|_| Ok(true))?, InvWorkState::GetPoxInvFinish => { self.inv_getpoxinv_try_finish(sortdb, nk, stats, ibd)? } InvWorkState::GetBlocksInvBegin => self - .inv_getblocksinv_begin(sortdb, nk, stats, request_timeout) + .inv_getblocksinv_begin(pins, sortdb, nk, stats, request_timeout) .and_then(|_| Ok(true))?, InvWorkState::GetBlocksInvFinish => { self.inv_getblocksinv_try_finish(nk, stats, ibd)? @@ -2231,9 +2267,10 @@ impl PeerNetwork { ) -> (bool, bool, Vec, Vec) { PeerNetwork::with_inv_state(self, |network, inv_state| { debug!( - "{:?}: Inventory state has {} block stats tracked", + "{:?}: Inventory state has {} block stats tracked on connections {:?}", &network.local_peer, - inv_state.block_stats.len() + inv_state.block_stats.len(), + inv_state.pinned, ); let mut all_done = true; @@ -2261,6 +2298,7 @@ impl PeerNetwork { return (true, true, vec![], vec![]); } + let mut new_pins = HashSet::new(); for (nk, stats) in inv_state.block_stats.iter_mut() { debug!( "{:?}: inv state-machine for {:?} is in state {:?}, at PoX {},target={}; blocks {},target={}; status {:?}, done={}", @@ -2275,7 +2313,7 @@ impl PeerNetwork { stats.done ); if !stats.done { - match network.inv_sync_run(sortdb, nk, stats, inv_state.request_timeout, ibd) { + match network.inv_sync_run(&mut new_pins, sortdb, nk, stats, inv_state.request_timeout, ibd) { Ok(d) => d, Err(net_error::StaleView) => { // stop work on this state machine -- it needs to be restarted. @@ -2341,6 +2379,9 @@ impl PeerNetwork { } } } + let _ = new_pins + .into_iter() + .map(|event_id| inv_state.pin_connection(event_id)); if all_done { let mut new_sync_peers = network.get_outbound_sync_peers(); @@ -2450,6 +2491,7 @@ impl PeerNetwork { } inv_state.reset_sync_peers( + network, good_sync_peers_set, &bootstrap_peers, network.connection_opts.num_neighbors as usize, From 25e84f23e5d0d9783dc781d01c21dd414495dd3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:59 -0400 Subject: [PATCH 595/910] fix: report pinned connections so the pruner won't disconnect nakamoto inv sync peers --- stackslib/src/net/inv/nakamoto.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index f24ad1a87ce..d5d4931e344 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::StacksBlockId; @@ -557,6 +557,10 @@ impl NakamotoInvStateMachine { self.comms.reset(); } + pub fn get_pinned_connections(&self) -> &HashSet { + self.comms.get_pinned_connections() + } + /// Remove state for a particular neighbor pub fn del_peer(&mut self, peer: &NeighborAddress) { self.inventories.remove(peer); From c514062a53feab5fab1bd26ae12440c048232d0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:20:53 -0400 Subject: [PATCH 596/910] fix: don't unpin a connection once it connects --- stackslib/src/net/neighbors/comms.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 8fdf38d87b3..f3e160ff578 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -50,6 +50,8 @@ pub trait NeighborComms { fn get_connecting(&self, network: &PeerNetwork, nk: &NK) -> Option; /// Remove a neighbor from connecting state fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK); + /// Remove a neighbor from connecting state due to an error + fn remove_connecting_error(&mut self, network: &PeerNetwork, nk: &NK); /// Mark a neighbor as dead (inactive, unreachable, etc.) fn add_dead(&mut self, network: &PeerNetwork, nk: &NK); /// Mark a neighbor as broken (in protocol violation) @@ -150,7 +152,7 @@ pub trait NeighborComms { // is the peer network still working? if !network.is_connecting(event_id) { debug!("{:?}: Failed to connect to {:?} (event {} no longer connecting; assumed timed out)", network.get_local_peer(), event_id, &nk); - self.remove_connecting(network, &nk); + self.remove_connecting_error(network, &nk); return Err(net_error::PeerNotConnected); } @@ -518,7 +520,13 @@ impl NeighborComms for PeerNetworkComms { .map(|event_ref| *event_ref) } + /// Remove a connecting neighbor because it conected fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK) { + self.connecting.remove(&nk.to_neighbor_key(network)); + } + + /// Remove a connecting neighbor due to an error. The connection will be unpinned. + fn remove_connecting_error(&mut self, network: &PeerNetwork, nk: &NK) { let event_id_opt = self.connecting.remove(&nk.to_neighbor_key(network)); if let Some(event_id) = event_id_opt { self.unpin_connection(event_id); From a76ffa45bcf261d8e9f7b8b1647d661aaf4fec64 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:07 -0400 Subject: [PATCH 597/910] fix: don't prune connections the inv state machines and stackerdb state machine are using --- stackslib/src/net/p2p.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 45183cdf1b7..20144f0d723 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -2399,7 +2399,7 @@ impl PeerNetwork { } /// Prune inbound and outbound connections if we can - fn prune_connections(&mut self) -> () { + pub(crate) fn prune_connections(&mut self) -> () { if cfg!(test) && self.connection_opts.disable_network_prune { return; } @@ -2443,6 +2443,22 @@ impl PeerNetwork { } } + // if we're in the middle of epoch2 inv sync, then don't prune any connections it + // established + if let Some(inv_state) = self.inv_state.as_ref() { + if inv_state.get_pinned_connections().contains(event_id) { + safe.insert(*event_id); + } + } + + // if we're in the middle of nakamoto inv sync, then don't prune any connections it + // established + if let Some(nakamoto_inv) = self.inv_state_nakamoto.as_ref() { + if nakamoto_inv.get_pinned_connections().contains(event_id) { + safe.insert(*event_id); + } + } + // if we're running stacker DBs, then don't prune any outbound connections it // established if let Some(stacker_db_syncs) = self.stacker_db_syncs.as_ref() { @@ -2454,6 +2470,7 @@ impl PeerNetwork { } } + debug!("Pinned connections: {:?}", &safe); self.prune_frontier(&safe); } From fdee274c40a94d8a65f292297eb5f950eea10914 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:32 -0400 Subject: [PATCH 598/910] chore: keep stackerdb replicas pinned across restarts, and only unpin on irrecoverable error --- stackslib/src/net/stackerdb/sync.rs | 50 ++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 467bc608e15..08e6e978eab 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -27,6 +27,7 @@ use stacks_common::util::hash::Hash160; use crate::net::chat::ConversationP2P; use crate::net::connection::ReplyHandleP2P; use crate::net::db::PeerDB; +use crate::net::neighbors::comms::ToNeighborKey; use crate::net::neighbors::NeighborComms; use crate::net::p2p::PeerNetwork; use crate::net::stackerdb::{ @@ -216,8 +217,22 @@ impl StackerDBSync { self.expected_versions.clear(); self.downloaded_chunks.clear(); - // reset comms, but keep all replicas pinned + // reset comms, but keep all connected replicas pinned self.comms.reset(); + if let Some(network) = network { + for naddr in self.replicas.iter() { + if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { + self.comms.pin_connection(event_id); + debug!( + "{:?}: {}: reuse connection for replica {:?} on event {}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + event_id + ); + } + } + } // reload from config self.num_slots = config.num_slots() as usize; @@ -240,6 +255,15 @@ impl StackerDBSync { self.comms.get_pinned_connections() } + /// Unpin and remove a connected replica by naddr + pub fn unpin_connected_replica(&mut self, network: &PeerNetwork, naddr: &NeighborAddress) { + let nk = naddr.to_neighbor_key(network); + if let Some(event_id) = network.get_event_id(&nk) { + self.comms.unpin_connection(event_id); + } + self.connected_replicas.remove(&naddr); + } + /// Make a chunk inv request pub fn make_getchunkinv(&self, rc_consensus_hash: &ConsensusHash) -> StacksMessageType { StacksMessageType::StackerDBGetChunkInv(StackerDBGetChunkInvData { @@ -743,6 +767,7 @@ impl StackerDBSync { &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash ); + // don't unpin, since it's usually transient self.connected_replicas.remove(&naddr); continue; } @@ -756,11 +781,13 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView || data.error_code == NackErrorCodes::FutureView { + self.connected_replicas.remove(&naddr); self.stale_neighbors.insert(naddr); + } else { + self.unpin_connected_replica(network, &naddr); } continue; } @@ -788,7 +815,7 @@ impl StackerDBSync { ); // disconnect - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } @@ -887,11 +914,13 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView || data.error_code == NackErrorCodes::FutureView { + self.connected_replicas.remove(&naddr); self.stale_neighbors.insert(naddr); + } else { + self.unpin_connected_replica(network, &naddr); } continue; } @@ -902,7 +931,7 @@ impl StackerDBSync { &self.smart_contract_id, &x ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } }; @@ -958,6 +987,7 @@ impl StackerDBSync { ); let mut requested = 0; + let mut unpin = HashSet::new(); // fill up our comms with $capacity requests for _i in 0..self.request_capacity { @@ -1001,7 +1031,7 @@ impl StackerDBSync { &selected_neighbor, &e ); - self.connected_replicas.remove(&selected_neighbor); + unpin.insert(selected_neighbor.clone()); continue; } @@ -1013,6 +1043,10 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_fetch_priorities.len(); } + let _ = unpin + .into_iter() + .map(|naddr| self.unpin_connected_replica(network, &naddr)); + if requested == 0 && self.comms.count_inflight() == 0 { return Err(net_error::PeerNotConnected); } @@ -1058,7 +1092,7 @@ impl StackerDBSync { &self.smart_contract_id, &x ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } }; @@ -1072,7 +1106,7 @@ impl StackerDBSync { &naddr, data.slot_id ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } From 7f34262c6c81414efdaf02603f120a68a29a83f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:54 -0400 Subject: [PATCH 599/910] chore: enhance stackerdb test to force the network pruner to run, so as to verify that connection pinning prevents decoherence --- stackslib/src/net/stackerdb/tests/sync.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index f45e3acb93e..565a97f4222 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -1070,6 +1070,19 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peer_config.connection_opts.disable_stackerdb_get_chunks = true; } + // run up against pruner limits + peer_config.connection_opts.disable_network_prune = false; + peer_config.connection_opts.num_neighbors = 5; + peer_config.connection_opts.num_clients = 5; + peer_config.connection_opts.soft_num_neighbors = 5; + peer_config.connection_opts.soft_num_clients = 5; + peer_config.connection_opts.max_neighbors_per_host = 5; + peer_config.connection_opts.max_clients_per_host = 5; + peer_config.connection_opts.soft_max_neighbors_per_host = 5; + peer_config.connection_opts.soft_max_neighbors_per_org = 5; + peer_config.connection_opts.soft_max_clients_per_host = 5; + peer_config.connection_opts.max_neighbors_of_neighbor = 5; + // short-lived walks... peer_config.connection_opts.walk_max_duration = 10; let idx = add_stackerdb(&mut peer_config, Some(StackerDBConfig::template())); @@ -1129,6 +1142,9 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); + // force this to run + peers[i].network.prune_connections(); + if let Ok(res) = res { check_sync_results(&res); let rc_ch = peers[i].network.get_chain_view().rc_consensus_hash.clone(); From dbf7bf5312028e747039d2fa329135fd127ae709 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:22:23 -0400 Subject: [PATCH 600/910] chore: fix test --- stackslib/src/net/tests/httpcore.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index d9c62eedf67..4bcf52605c2 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -418,6 +418,7 @@ fn test_http_response_type_codec() { .unwrap(), authenticated: true, stackerdbs: Some(vec![]), + age: None, }, RPCNeighbor { network_id: 3, @@ -433,6 +434,7 @@ fn test_http_response_type_codec() { .unwrap(), authenticated: false, stackerdbs: Some(vec![]), + age: None, }, ], inbound: vec![], From dc454e171799eb902723659b0959552435bafa20 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 18:29:55 -0700 Subject: [PATCH 601/910] fix: use `:principal` in metric name --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f44d988138f..2727205f640 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -648,7 +648,7 @@ impl StacksClient { address: &StacksAddress, ) -> Result { debug!("stacks_node_client: Getting account info..."); - let timer_label = format!("{}/v2/accounts/:stacks_address", self.http_origin); + let timer_label = format!("{}/v2/accounts/:principal", self.http_origin); let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client From 02fe4cb2416b66dc403c45032877ff76adbf5d0d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Sep 2024 19:19:44 -0700 Subject: [PATCH 602/910] Check that stackerdb is set before configuring the signer Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 5 ++++ stacks-signer/src/client/stacks_client.rs | 36 +++++++++++++++++++---- stacks-signer/src/runloop.rs | 26 ++++++++++++++-- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5ce87062747..d93e03f1ba6 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -603,4 +603,9 @@ pub(crate) mod tests { serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); format!("HTTP/1.1 200 OK\n\n{response_json}") } + + pub fn build_get_last_set_cycle_response(cycle: u64) -> String { + let clarity_value = ClarityValue::UInt(cycle as u128); + build_read_only_response(&clarity_value) + } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cc780166afa..58c88c1cc49 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -19,7 +19,7 @@ use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ @@ -162,6 +162,20 @@ impl StacksClient { Ok(sortition_info) } + /// Get the last set reward cycle stored within the stackerdb contract + pub fn get_last_set_cycle(&self) -> Result { + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); + let function_name_str = "stackerdb-get-last-set-cycle"; + let function_name = ClarityName::from(function_name_str); + let value = self.read_only_contract_call( + &signer_stackerdb_contract_id.issuer.clone().into(), + &signer_stackerdb_contract_id.name, + &function_name, + &[], + )?; + Ok(value.expect_u128()?) + } + /// Retrieve the signer slots stored within the stackerdb contract pub fn get_stackerdb_signer_slots( &self, @@ -962,11 +976,11 @@ mod tests { use super::*; use crate::client::tests::{ build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, - build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_tenure_tip_response, build_get_vote_for_aggregate_key_response, - build_get_weight_threshold_response, build_read_only_response, write_response, - MockServerClient, + build_get_last_round_response, build_get_last_set_cycle_response, + build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, + build_get_pox_data_response, build_get_round_info_response, build_get_tenure_tip_response, + build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_read_only_response, write_response, MockServerClient, }; #[test] @@ -1623,4 +1637,14 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), header); } + + #[test] + fn get_last_set_cycle_should_succeed() { + let mock = MockServerClient::new(); + let reward_cycle = thread_rng().next_u64(); + let response = build_get_last_set_cycle_response(reward_cycle); + let h = spawn(move || mock.client.get_last_set_cycle()); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), reward_cycle as u128); + } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 5b05393bd78..7f16210ebd6 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -34,6 +34,17 @@ use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, S use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; +#[derive(thiserror::Error, Debug)] +/// Configuration error type +pub enum ConfigurationError { + /// Error occurred while fetching data from the stacks node + #[error("{0}")] + ClientError(#[from] ClientError), + /// The stackerdb signer config is not yet updated + #[error("The stackerdb config is not yet updated")] + StackerDBNotUpdated, +} + /// The internal signer state info #[derive(PartialEq, Clone, Debug)] pub struct StateInfo { @@ -274,14 +285,23 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo fn get_signer_config( &mut self, reward_cycle: u64, - ) -> Result, ClientError> { + ) -> Result, ConfigurationError> { + // We can only register for a reward cycle if its stackerdb has been updated + let last_calculated_reward_cycle = + self.stacks_client.get_last_set_cycle().inspect_err(|e| { + warn!("Error while fetching last calculated reward cycle: {e:?}"); + })?; + if last_calculated_reward_cycle < reward_cycle as u128 { + return Err(ConfigurationError::StackerDBNotUpdated); + } + // We can only register for a reward cycle if a reward set exists. let signer_entries = match self.get_parsed_reward_set(reward_cycle) { Ok(Some(x)) => x, Ok(None) => return Ok(None), Err(e) => { warn!("Error while fetching reward set {reward_cycle}: {e:?}"); - return Err(e); + return Err(e.into()); } }; let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) @@ -289,7 +309,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(x) => x, Err(e) => { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); - return Err(e); + return Err(e.into()); } }; let current_addr = self.stacks_client.get_signer_address(); From 78b715cf7eb52e77bbcd41d1fc4a720d45df6af9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Sep 2024 19:29:30 -0700 Subject: [PATCH 603/910] Ensure the last set cycle is set by putting it on an exponential backoff to prevent waiting needlessly for a burn block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 7f16210ebd6..970b04d0259 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -286,15 +286,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo &mut self, reward_cycle: u64, ) -> Result, ConfigurationError> { - // We can only register for a reward cycle if its stackerdb has been updated - let last_calculated_reward_cycle = - self.stacks_client.get_last_set_cycle().inspect_err(|e| { - warn!("Error while fetching last calculated reward cycle: {e:?}"); - })?; - if last_calculated_reward_cycle < reward_cycle as u128 { - return Err(ConfigurationError::StackerDBNotUpdated); - } - // We can only register for a reward cycle if a reward set exists. let signer_entries = match self.get_parsed_reward_set(reward_cycle) { Ok(Some(x)) => x, @@ -304,6 +295,25 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo return Err(e.into()); } }; + + // Ensure that the stackerdb has been updated for the reward cycle before proceeding + retry_with_exponential_backoff(|| { + let last_calculated_reward_cycle = self + .stacks_client + .get_last_set_cycle() + .map_err(|e| backoff::Error::transient(e.into()))?; + if last_calculated_reward_cycle < reward_cycle as u128 { + warn!( + "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + ); + Err(backoff::Error::transient( + ConfigurationError::StackerDBNotUpdated, + )) + } else { + Ok(()) + } + })?; + let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) { Ok(x) => x, From 9b09f0b3776d65ab25fc9cd21ee4c1c15f465c2f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 10:29:15 -0500 Subject: [PATCH 604/910] call correct .signers function, error fast rather than retry --- stacks-signer/src/client/mod.rs | 2 +- stacks-signer/src/client/stacks_client.rs | 4 +-- stacks-signer/src/runloop.rs | 39 +++++++++++------------ 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index d93e03f1ba6..ccf7a993f5a 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -605,7 +605,7 @@ pub(crate) mod tests { } pub fn build_get_last_set_cycle_response(cycle: u64) -> String { - let clarity_value = ClarityValue::UInt(cycle as u128); + let clarity_value = ClarityValue::okay(ClarityValue::UInt(cycle as u128)).unwrap(); build_read_only_response(&clarity_value) } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 58c88c1cc49..c10ceba7795 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -165,7 +165,7 @@ impl StacksClient { /// Get the last set reward cycle stored within the stackerdb contract pub fn get_last_set_cycle(&self) -> Result { let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); - let function_name_str = "stackerdb-get-last-set-cycle"; + let function_name_str = "get-last-set-cycle"; let function_name = ClarityName::from(function_name_str); let value = self.read_only_contract_call( &signer_stackerdb_contract_id.issuer.clone().into(), @@ -173,7 +173,7 @@ impl StacksClient { &function_name, &[], )?; - Ok(value.expect_u128()?) + Ok(value.expect_result_ok()?.expect_u128()?) } /// Retrieve the signer slots stored within the stackerdb contract diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 970b04d0259..1988be47858 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -297,31 +297,28 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo }; // Ensure that the stackerdb has been updated for the reward cycle before proceeding - retry_with_exponential_backoff(|| { - let last_calculated_reward_cycle = self - .stacks_client - .get_last_set_cycle() - .map_err(|e| backoff::Error::transient(e.into()))?; - if last_calculated_reward_cycle < reward_cycle as u128 { + let last_calculated_reward_cycle = + self.stacks_client.get_last_set_cycle().map_err(|e| { warn!( - "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + "Failed to fetch last calculated stackerdb cycle from stacks-node"; + "reward_cycle" => reward_cycle, + "err" => ?e ); - Err(backoff::Error::transient( - ConfigurationError::StackerDBNotUpdated, - )) - } else { - Ok(()) - } - })?; + ConfigurationError::StackerDBNotUpdated + })?; + if last_calculated_reward_cycle < reward_cycle as u128 { + warn!( + "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + ); + return Err(ConfigurationError::StackerDBNotUpdated); + } - let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) - { - Ok(x) => x, - Err(e) => { + let signer_slot_ids = self + .get_parsed_signer_slots(&self.stacks_client, reward_cycle) + .map_err(|e| { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); - return Err(e.into()); - } - }; + e + })?; let current_addr = self.stacks_client.get_signer_address(); let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { From 0fb886db46a97929ee3793cf094f56d1d809efc0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 10:38:45 -0500 Subject: [PATCH 605/910] test: update reloads_signer_set_in --- testnet/stacks-node/src/tests/signer/v0.rs | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b3a78fcf6fc..04f848670aa 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -831,9 +831,7 @@ fn reloads_signer_set_in() { ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; let short_timeout = Duration::from_secs(30); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = signer_test.get_current_reward_cycle() + 1; @@ -841,21 +839,23 @@ fn reloads_signer_set_in() { .running_nodes .btc_regtest_controller .build_next_block(1); - while !reward_set_calculated { - let reward_set = signer_test + wait_for(short_timeout.as_secs(), || { + let reward_set = match signer_test .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - info!("Signer set: {:?}", reward_set.unwrap()); + { + Ok(x) => x, + Err(e) => { + warn!("Failed to check if reward set is calculated yet: {e:?}. Will try again"); + return Ok(false); + } + }; + if let Some(ref set) = reward_set { + info!("Signer set: {:?}", set); } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + Ok(reward_set.is_some()) + }) + .expect("Timed out waiting for reward set to be calculated"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state From bba65534ecff2e1965bcfac7ee73d708bc44eae0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 11:56:07 -0400 Subject: [PATCH 606/910] fix: limit number of UTXOs retrieved with `listunspent` This prevents the response from being too large and exceeding the 16MB limit that we support. --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index e342b452fcd..9399ff1eaed 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2599,6 +2599,9 @@ impl BitcoinRPCRequest { let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); + // Specify the maximum number of UTXOs to get from listunspent, to + // ensure the response is not too large. + let maximum_count = 1024; let payload = BitcoinRPCRequest { method: "listunspent".to_string(), @@ -2607,7 +2610,7 @@ impl BitcoinRPCRequest { max_conf.into(), addresses.into(), include_unsafe.into(), - json!({ "minimumAmount": minimum_amount }), + json!({ "minimumAmount": minimum_amount, "maximumCount": maximum_count }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), From e8e7de132ead7384422a3b34c09d0d5af3e9d172 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 12:44:03 -0500 Subject: [PATCH 607/910] Update stacks-signer/src/client/stacks_client.rs Co-authored-by: Brice Dobry --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 2dc9f4688a2..d96bea94c08 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -581,7 +581,7 @@ impl StacksClient { backoff::Error::permanent(e.into()) })?; if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)); + Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); Err(backoff::Error::permanent(ClientError::RequestFailure( From 8c368eceda90ce71a9bb1681443928c7add5ee9f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 15:57:05 -0400 Subject: [PATCH 608/910] test: add test validating new config option `max_unspent_utxos` --- .../burnchains/bitcoin_regtest_controller.rs | 9 ++-- testnet/stacks-node/src/config.rs | 13 +++++ .../src/tests/nakamoto_integrations.rs | 2 - .../src/tests/neon_integrations.rs | 53 ++++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 1 - 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 9399ff1eaed..b42007da426 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2314,6 +2314,10 @@ impl UTXOSet { pub fn total_available(&self) -> u64 { self.utxos.iter().map(|o| o.amount).sum() } + + pub fn num_utxos(&self) -> usize { + self.utxos.len() + } } #[derive(Debug, Clone)] @@ -2599,9 +2603,6 @@ impl BitcoinRPCRequest { let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); - // Specify the maximum number of UTXOs to get from listunspent, to - // ensure the response is not too large. - let maximum_count = 1024; let payload = BitcoinRPCRequest { method: "listunspent".to_string(), @@ -2610,7 +2611,7 @@ impl BitcoinRPCRequest { max_conf.into(), addresses.into(), include_unsafe.into(), - json!({ "minimumAmount": minimum_amount, "maximumCount": maximum_count }), + json!({ "minimumAmount": minimum_amount, "maximumCount": config.burnchain.max_unspent_utxos }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3ff7e8bdb60..3852bf42241 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1446,6 +1446,10 @@ pub struct BurnchainConfig { /// fault injection to simulate a slow burnchain peer. /// Delay burnchain block downloads by the given number of millseconds pub fault_injection_burnchain_block_delay: u64, + /// The maximum number of unspent UTXOs to request from the bitcoin node. + /// This value is passed as the `maximumCount` query option to the + /// `listunspent` RPC call. + pub max_unspent_utxos: Option, } impl BurnchainConfig { @@ -1486,6 +1490,7 @@ impl BurnchainConfig { ast_precheck_size_height: None, affirmation_overrides: HashMap::new(), fault_injection_burnchain_block_delay: 0, + max_unspent_utxos: Some(1024), } } pub fn get_rpc_url(&self, wallet: Option) -> String { @@ -1582,6 +1587,7 @@ pub struct BurnchainConfigFile { pub ast_precheck_size_height: Option, pub affirmation_overrides: Option>, pub fault_injection_burnchain_block_delay: Option, + pub max_unspent_utxos: Option, } impl BurnchainConfigFile { @@ -1797,6 +1803,13 @@ impl BurnchainConfigFile { fault_injection_burnchain_block_delay: self .fault_injection_burnchain_block_delay .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), + max_unspent_utxos: self + .max_unspent_utxos + .map(|val| { + assert!(val <= 1024, "Value for max_unspent_utxos should be <= 1024"); + val + }) + .or(default_burnchain_config.max_unspent_utxos), }; if let BitcoinNetworkType::Mainnet = config.get_bitcoin_network().1 { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7379f1f16a5..32924ab7b78 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2274,8 +2274,6 @@ fn correct_burn_outs() { let mut last_block_time = None; for block in new_blocks_with_reward_set.iter() { - let cycle_number = block["cycle_number"].as_u64().unwrap(); - let reward_set = block["reward_set"].as_object().unwrap(); if let Some(block_time) = block["block_time"].as_u64() { if let Some(last) = last_block_time { assert!(block_time > last, "Block times should be increasing"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6b02a3fac8a..5494f41302e 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -62,6 +62,7 @@ use stacks::net::atlas::{ AtlasConfig, AtlasDB, GetAttachmentResponse, GetAttachmentsInvResponse, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, }; +use stacks::types::PublicKey; use stacks::util_lib::boot::{boot_code_addr, boot_code_id}; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; use stacks::util_lib::signed_structured_data::pox4::{ @@ -82,7 +83,7 @@ use super::{ make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::burnchains::bitcoin_regtest_controller::{self, BitcoinRPCRequest, UTXO}; +use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; @@ -12794,3 +12795,53 @@ fn mock_miner_replay() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +#[test] +#[ignore] +/// Verify that the config option, `burnchain.max_unspent_utxos`, is respected. +fn listunspent_max_utxos() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + conf.burnchain.max_unspent_utxos = Some(10); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let keychain = Keychain::default(conf.node.seed.clone()); + let mut op_signer = keychain.generate_op_signer(); + + let (_, network_id) = conf.burnchain.get_bitcoin_network(); + let hash160 = Hash160::from_data(&op_signer.get_public_key().to_bytes()); + let address = BitcoinAddress::from_bytes_legacy( + network_id, + LegacyBitcoinAddressType::PublicKeyHash, + &hash160.0, + ) + .expect("Public key incorrect"); + + let filter_addresses = vec![addr2str(&address)]; + + let res = BitcoinRPCRequest::list_unspent(&conf, filter_addresses, false, 1, &None, 0); + let utxos = res.expect("Failed to get utxos"); + assert_eq!(utxos.num_utxos(), 10); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b3a78fcf6fc..4ec7c2f98c2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4796,7 +4796,6 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // Induce block N+2 to get mined let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - sender_nonce += 1; let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+2"); From a66e3b606e4579924f771414338ede9aea39f74a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 16:05:24 -0400 Subject: [PATCH 609/910] test: add new test to bitcoin-tests.yml --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665ee..e618eedebe6 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -74,6 +74,7 @@ jobs: - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay + - tests::neon_integrations::listunspent_max_utxos - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration From 2b3b0d01e6f5c9a2fb65a1d33214f7dba7a6a004 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 15:28:36 -0500 Subject: [PATCH 610/910] chore: fix two flaky tests Fixes: * signer::v0::locally_rejected_blocks_overriden_by_global_acceptance * signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds These tests used to depend on the `blocks_mined` counter and then immediately check a `v2/info` assertion -- this is a race condition: the stacks-node may not have processed the mined block yet. This caused test flake in CI (but usually not in local runs where machines are fast enough to never experience this condition). --- testnet/stacks-node/src/tests/signer/v0.rs | 98 ++++++++++------------ 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 04f848670aa..13bc664d2a9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4128,25 +4128,24 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); sender_nonce += 1; let info_after = signer_test @@ -4196,13 +4195,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); loop { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events @@ -4274,13 +4274,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4348,31 +4349,31 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); + assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height @@ -4400,13 +4401,12 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - loop { + wait_for(short_timeout.as_secs(), || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4423,15 +4423,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { } }) .collect::>(); - if ignored_signers.len() + ignoring_signers.len() == num_signers { - break; - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block proposal acceptance", - ); - sleep_ms(1000); - } + Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4464,25 +4458,23 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .lock() .unwrap() .replace(Vec::new()); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); let info_after = signer_test .stacks_client From fdbf170ff977a3e35673e778a78a25a90edd6d0b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 16:51:35 -0400 Subject: [PATCH 611/910] test: don't stop bitcoind at end of test This causes a problem in CI. --- testnet/stacks-node/src/tests/neon_integrations.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5494f41302e..967947cc569 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12840,8 +12840,4 @@ fn listunspent_max_utxos() { let res = BitcoinRPCRequest::list_unspent(&conf, filter_addresses, false, 1, &None, 0); let utxos = res.expect("Failed to get utxos"); assert_eq!(utxos.num_utxos(), 10); - - btcd_controller - .stop_bitcoind() - .expect("Failed to stop bitcoind"); } From c44954d96f65df54497d081f7b8b993d998fab55 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 16:34:59 -0500 Subject: [PATCH 612/910] chore: signer tests should wait for networking to come back up after 3.0 boundary --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 13bc664d2a9..a47422431b8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -264,6 +264,9 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); + std::thread::sleep(Duration::from_secs(1)); + wait_for(60, || Ok(get_chain_info_opt(&self.running_nodes.conf).is_some())) + .expect("Timed out waiting for network to restart after 3.0 boundary reached"); // Wait until we see the first block of epoch 3.0. // Note, we don't use `nakamoto_blocks_mined` counter, because there From 6388ed703eff73a81164c2cbf0f4c332e1025993 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 16:37:40 -0500 Subject: [PATCH 613/910] chore: cargo fmt --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a47422431b8..3ac091b0e29 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -265,8 +265,10 @@ impl SignerTest { self.run_until_epoch_3_boundary(); std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || Ok(get_chain_info_opt(&self.running_nodes.conf).is_some())) - .expect("Timed out waiting for network to restart after 3.0 boundary reached"); + wait_for(60, || { + Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) + }) + .expect("Timed out waiting for network to restart after 3.0 boundary reached"); // Wait until we see the first block of epoch 3.0. // Note, we don't use `nakamoto_blocks_mined` counter, because there From 5b330cfa044b94704d5f47d0568d40260c22c9e9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 17 Sep 2024 22:00:22 -0400 Subject: [PATCH 614/910] fix: run nakamoto inv, downloader, and natpunch state machines once per PeerNetwork::run() --- stackslib/src/net/p2p.rs | 117 ++++++++++++--------------------------- 1 file changed, 35 insertions(+), 82 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 45183cdf1b7..7b36dc3c334 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -3560,8 +3560,8 @@ impl PeerNetwork { let prune = if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { debug!("{:?}: run Nakamoto work loop", self.get_local_peer()); - // in Nakamoto epoch, so do Nakamoto things - let prune = self.do_network_work_nakamoto( + // in Nakamoto epoch, so we can always prune + self.do_network_work_nakamoto( burnchain_height, sortdb, chainstate, @@ -3593,9 +3593,10 @@ impl PeerNetwork { "{:?}: ran Epoch 2.x work loop in Nakamoto epoch", self.get_local_peer() ); - prune || epoch2_prune + epoch2_prune } else { - prune + // we can always prune in Nakamoto, since all state machines pin their connections + true } } else { // in epoch 2.x, so do epoch 2.x things @@ -3623,89 +3624,41 @@ impl PeerNetwork { chainstate: &mut StacksChainState, ibd: bool, network_result: &mut NetworkResult, - ) -> bool { - // do some Actual Work(tm) - let mut do_prune = false; - let mut did_cycle = false; - - while !did_cycle { - // always do an inv sync - let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd); - debug!( - "{:?}: network work state is {:?}", - self.get_local_peer(), - &self.nakamoto_work_state; - "learned_new_blocks?" => learned - ); - - // always do block download - let new_blocks = self - .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { - warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e - }) - .unwrap_or(HashMap::new()); - - network_result.consume_nakamoto_blocks(new_blocks); - - let cur_state = self.nakamoto_work_state; - match self.nakamoto_work_state { - PeerNetworkWorkState::GetPublicIP => { - if cfg!(test) && self.connection_opts.disable_natpunch { - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } else { - // (re)determine our public IP address - let done = self.do_get_public_ip(); - if done { - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } - } - } - PeerNetworkWorkState::BlockInvSync => { - // this state is useless in Nakamoto since we're always doing inv-syncs - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } - PeerNetworkWorkState::BlockDownload => { - // this state is useless in Nakamoto since we're always doing download-syncs - self.nakamoto_work_state = PeerNetworkWorkState::AntiEntropy; - } - PeerNetworkWorkState::AntiEntropy => { - debug!( - "{:?}: Block anti-entropy for Nakamoto is not yet implemented", - self.get_local_peer() - ); - self.nakamoto_work_state = PeerNetworkWorkState::Prune; - } - PeerNetworkWorkState::Prune => { - // did one pass - did_cycle = true; - do_prune = true; + ) { + // always do an inv sync + let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd); + debug!( + "{:?}: network work state is {:?}", + self.get_local_peer(), + &self.nakamoto_work_state; + "learned_new_blocks?" => learned + ); - // restart - self.nakamoto_work_state = PeerNetworkWorkState::GetPublicIP; - } - } + // always do block download + let new_blocks = self + .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) + .map_err(|e| { + warn!( + "{:?}: Failed to perform Nakamoto block sync: {:?}", + &self.get_local_peer(), + &e + ); + e + }) + .unwrap_or(HashMap::new()); - if self.nakamoto_work_state == cur_state { - // only break early if we can't make progress - break; - } - } + network_result.consume_nakamoto_blocks(new_blocks); - if did_cycle { - self.num_state_machine_passes += 1; - debug!( - "{:?}: Finished full p2p state-machine pass for Nakamoto ({})", - &self.local_peer, self.num_state_machine_passes - ); + // make sure our public IP is fresh (this self-throttles if we recently learned it). + if !self.connection_opts.disable_natpunch { + self.do_get_public_ip(); } - do_prune + self.num_state_machine_passes += 1; + debug!( + "{:?}: Finished full p2p state-machine pass for Nakamoto ({})", + &self.local_peer, self.num_state_machine_passes + ); } /// Do the actual work in the state machine. From 3359f1e590f882ce73775514b82ea1242dd317e1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 22:02:01 -0400 Subject: [PATCH 615/910] test: try stopping bitcoind within Rust, rather than spawning the CLI --- .github/workflows/bitcoin-tests.yml | 6 +-- .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 35 ++++++++-------- .../src/tests/neon_integrations.rs | 41 +++++++++++++++++++ 4 files changed, 64 insertions(+), 20 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665ee..bab888e2e16 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -74,6 +74,9 @@ jobs: - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay + - tests::neon_integrations::bitcoin_reorg_flap + - tests::neon_integrations::bitcoin_reorg_flap_with_follower + - tests::neon_integrations::start_stop_bitcoind - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration @@ -121,9 +124,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - # Do not run this one until we figure out why it fails in CI - # - tests::neon_integrations::bitcoin_reorg_flap - # - tests::neon_integrations::bitcoin_reorg_flap_with_follower # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index e342b452fcd..6af31c83a8d 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2815,7 +2815,7 @@ impl BitcoinRPCRequest { Ok(()) } - fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { + pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 6619152f9ff..621f92aa476 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -11,6 +11,7 @@ use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; +use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; @@ -19,12 +20,14 @@ use crate::Config; #[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), + StopFailed(String), } impl std::fmt::Display for BitcoinCoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), } } } @@ -109,25 +112,25 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { - let mut command = Command::new("bitcoin-cli"); - command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); - - self.add_rpc_cli_args(&mut command); - - command.arg("stop"); - - let mut process = match command.spawn() { - Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), + let payload = BitcoinRPCRequest { + method: "stop".to_string(), + params: vec![], + id: "stacks".to_string(), + jsonrpc: "2.0".to_string(), }; - let mut out_reader = BufReader::new(process.stdout.take().unwrap()); - let mut line = String::new(); - while let Ok(bytes_read) = out_reader.read_line(&mut line) { - if bytes_read == 0 { - break; + let res = BitcoinRPCRequest::send(&self.config, payload) + .map_err(|e| BitcoinCoreError::StopFailed(format!("{e:?}")))?; + + if let Some(err) = res.get("error") { + if !err.is_null() { + return Err(BitcoinCoreError::StopFailed(format!("{err}"))); } - eprintln!("{line}"); + } else { + return Err(BitcoinCoreError::StopFailed(format!( + "Invalid response: {:?}", + res + ))); } } Ok(()) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6b02a3fac8a..84181fdc636 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12794,3 +12794,44 @@ fn mock_miner_replay() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +#[test] +#[ignore] +/// Test out stopping bitcoind and restarting it +fn start_stop_bitcoind() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); + + thread::sleep(Duration::from_secs(5)); + + btcd_controller + .start_bitcoind() + .expect("Failed to start bitcoind"); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} From dec98a9d0cf37cd6110a3eb9fb95f3cf22ec7bda Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 17 Sep 2024 23:26:05 -0400 Subject: [PATCH 616/910] fix: Remove NakamotoDownloadStateMachine::load_tenure_start_blocks(), and all code paths that depended on it --- .../nakamoto/download_state_machine.rs | 65 +--------- .../download/nakamoto/tenure_downloader.rs | 111 +----------------- .../nakamoto/tenure_downloader_set.rs | 106 +---------------- stackslib/src/net/tests/download/nakamoto.rs | 42 +------ 4 files changed, 11 insertions(+), 313 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 8cef43a9aa3..a2f4fe5dc52 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -115,8 +115,6 @@ pub struct NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap, /// Ongoing confirmed tenure downloads for when we know the start and end block hashes. tenure_downloads: NakamotoTenureDownloaderSet, - /// resolved tenure-start blocks - tenure_start_blocks: HashMap, /// comms to remote neighbors pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip @@ -140,7 +138,6 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_download_schedule: VecDeque::new(), tenure_downloads: NakamotoTenureDownloaderSet::new(), unconfirmed_tenure_downloads: HashMap::new(), - tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, last_unconfirmed_download_run_ms: 0, @@ -367,48 +364,6 @@ impl NakamotoDownloadStateMachine { ) } - /// Find all stored (but not necessarily processed) tenure-start blocks for a list - /// of wanted tenures that this node has locally. NOTE: these tenure-start blocks - /// do not correspond to the tenure; they correspond to the _parent_ tenure (since a - /// `WantedTenure` captures the tenure-start block hash of the parent tenure; the same data - /// captured by a sortition). - /// - /// This method is static to ease testing. - /// - /// Returns Ok(()) on success and fills in newly-discovered blocks into `tenure_start_blocks`. - /// Returns Err(..) on DB error. - pub(crate) fn load_tenure_start_blocks( - wanted_tenures: &[WantedTenure], - chainstate: &mut StacksChainState, - tenure_start_blocks: &mut HashMap, - ) -> Result<(), NetError> { - for wt in wanted_tenures { - let candidate_tenure_start_blocks = chainstate - .nakamoto_blocks_db() - .get_nakamoto_tenure_start_blocks(&wt.tenure_id_consensus_hash)?; - - for candidate_tenure_start_block in candidate_tenure_start_blocks.into_iter() { - tenure_start_blocks.insert( - candidate_tenure_start_block.block_id(), - candidate_tenure_start_block, - ); - } - } - Ok(()) - } - - /// Update our local tenure start block data - fn update_tenure_start_blocks( - &mut self, - chainstate: &mut StacksChainState, - ) -> Result<(), NetError> { - Self::load_tenure_start_blocks( - &self.wanted_tenures, - chainstate, - &mut self.tenure_start_blocks, - ) - } - /// Update `self.wanted_tenures` with newly-discovered sortition data. fn extend_wanted_tenures( &mut self, @@ -670,7 +625,6 @@ impl NakamotoDownloadStateMachine { &mut self, network: &PeerNetwork, sortdb: &SortitionDB, - chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; @@ -688,7 +642,6 @@ impl NakamotoDownloadStateMachine { // not at a reward cycle boundary, so just extend self.wanted_tenures debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); self.extend_wanted_tenures(network, sortdb)?; - self.update_tenure_start_blocks(chainstate)?; return Ok(()); } @@ -728,7 +681,6 @@ impl NakamotoDownloadStateMachine { self.wanted_tenures = new_wanted_tenures; self.reward_cycle = sort_rc; - self.update_tenure_start_blocks(chainstate)?; Ok(()) } @@ -1485,21 +1437,6 @@ impl NakamotoDownloadStateMachine { // run all downloaders let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); - // give blocked downloaders their tenure-end blocks from other downloaders that have - // obtained their tenure-start blocks - let new_tenure_starts = self.tenure_downloads.find_new_tenure_start_blocks(); - self.tenure_start_blocks - .extend(new_tenure_starts.into_iter()); - - let dead = self - .tenure_downloads - .handle_tenure_end_blocks(&self.tenure_start_blocks); - - // bookkeeping - for naddr in dead.into_iter() { - self.neighbor_rpc.add_dead(network, &naddr); - } - new_blocks } @@ -1729,7 +1666,7 @@ impl NakamotoDownloadStateMachine { ) -> Result>, NetError> { self.nakamoto_tip = network.stacks_tip.block_id(); debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); - self.update_wanted_tenures(&network, sortdb, chainstate)?; + self.update_wanted_tenures(&network, sortdb)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); self.last_sort_tip = Some(network.burnchain_tip.clone()); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 95d97f67d51..92e032fa383 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -68,31 +68,7 @@ use crate::util_lib::db::{DBConn, Error as DBError}; pub enum NakamotoTenureDownloadState { /// Getting the tenure-start block (the given StacksBlockId is it's block ID). GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. + /// Getting the tenure-end block. /// /// The field here is the block ID of the tenure end block. GetTenureEndBlock(StacksBlockId), @@ -163,8 +139,7 @@ pub struct NakamotoTenureDownloader { pub tenure_start_block: Option, /// Pre-stored tenure end block. /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + /// the start-block for the current tenure is downloaded. pub tenure_end_block: Option, /// Tenure blocks pub tenure_blocks: Option>, @@ -205,16 +180,6 @@ impl NakamotoTenureDownloader { self } - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - /// Validate and accept a given tenure-start block. If accepted, then advance the state. /// Returns Ok(()) if the start-block is valid. /// Returns Err(..) if it is not valid. @@ -266,66 +231,15 @@ impl NakamotoTenureDownloader { tenure_end_block.block_id(), &self.tenure_id_consensus_hash ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); self.try_accept_tenure_end_block(&tenure_end_block)?; } else { - // need to get tenure_end_block. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); + // need to get tenure_end_block. + self.state = + NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone()); } Ok(()) } - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - /// Validate and accept a tenure-end block. If accepted, then advance the state. /// Once accepted, this function extracts the tenure-change transaction and block header from /// this block (it does not need the entire block). @@ -338,8 +252,7 @@ impl NakamotoTenureDownloader { ) -> Result<(), NetError> { if !matches!( &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) + NakamotoTenureDownloadState::GetTenureEndBlock(_) ) { warn!("Invalid state for this method"; "state" => %self.state); @@ -577,14 +490,6 @@ impl NakamotoTenureDownloader { debug!("Request tenure-start block {}", &start_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { debug!("Request tenure-end block {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) @@ -665,10 +570,6 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { debug!("Got download response to tenure-end block {}", &_block_id); let block = response.decode_nakamoto_block().map_err(|e| { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 28a40e7eb50..74ff83460d6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -172,9 +172,6 @@ impl NakamotoTenureDownloaderSet { if downloader.idle { continue; } - if downloader.is_waiting() { - continue; - } if downloader.is_done() { continue; } @@ -233,9 +230,6 @@ impl NakamotoTenureDownloaderSet { if !downloader.idle { continue; } - if downloader.is_waiting() { - continue; - } if downloader.naddr != naddr { continue; } @@ -264,7 +258,7 @@ impl NakamotoTenureDownloaderSet { idled.push(naddr.clone()); continue; }; - if downloader.idle || downloader.is_waiting() { + if downloader.idle { debug!( "Remove idled peer {} for tenure download {}", &naddr, &downloader.tenure_id_consensus_hash @@ -306,43 +300,6 @@ impl NakamotoTenureDownloaderSet { ret } - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - /// Does there exist a downloader (possibly unscheduled) for the given tenure? pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { for downloader_opt in self.downloaders.iter() { @@ -351,11 +308,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state + "Have downloader for tenure {} already (idle={}, state={})", + tenure_id, downloader.idle, &downloader.state ); return true; } @@ -363,59 +317,6 @@ impl NakamotoTenureDownloaderSet { false } - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - /// Create a given number of downloads from a schedule and availability set. /// Removes items from the schedule, and neighbors from the availability set. /// A neighbor will be issued at most one request. @@ -438,7 +339,6 @@ impl NakamotoTenureDownloaderSet { self.clear_finished_downloaders(); self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); while self.inflight() < count { let Some(ch) = schedule.front() else { break; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 44bbaed7d29..a6307b324b0 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -255,7 +255,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_start_block(blocks.first().unwrap().clone()) .is_ok()); - let NakamotoTenureDownloadState::WaitForTenureEndBlock(block_id, _) = td.state else { + let NakamotoTenureDownloadState::GetTenureEndBlock(block_id) = td.state else { panic!("wrong state"); }; assert_eq!(block_id, next_tenure_start_block.header.block_id()); @@ -1456,46 +1456,6 @@ fn test_make_tenure_downloaders() { } } - // test load_tenure_start_blocks - { - let sortdb = peer.sortdb(); - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - nakamoto_start, - tip.block_height + 1, - ) - .unwrap(); - - // the first block loaded won't have data, since the blocks are loaded by consensus hash - // but the resulting map is keyed by block ID (and we don't have the first block ID) - let wanted_tenures_with_blocks = wanted_tenures[1..].to_vec(); - - let nakamoto_tip = peer.network.stacks_tip.block_id(); - let chainstate = peer.chainstate(); - let mut tenure_start_blocks = HashMap::new(); - NakamotoDownloadStateMachine::load_tenure_start_blocks( - &wanted_tenures, - chainstate, - &mut tenure_start_blocks, - ) - .unwrap(); - - // remove malleablized blocks - tenure_start_blocks.retain(|_, block| block.header.version == 0); - - assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); - - for wt in wanted_tenures_with_blocks { - if tenure_start_blocks.get(&wt.winning_block_id).is_none() { - warn!("No tenure start block for wanted tenure {:?}", &wt); - } - - let block = tenure_start_blocks.get(&wt.winning_block_id).unwrap(); - assert!(block.is_wellformed_tenure_start_block().unwrap()); - } - } - // test find_available_tenures { // test for reward cycle From a3d4d42dad79437f32439698a0538055f6b6f16d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 18 Sep 2024 17:03:24 +0300 Subject: [PATCH 617/910] add integration test to run on CI --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665ee..04b4a51f701 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -77,6 +77,7 @@ jobs: - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration + - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb From 7c073faaa2051a903347a9e7cc642a2d60d9db07 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 11:12:14 -0700 Subject: [PATCH 618/910] Cannot assume stacks transaction will get mined AFTER the burn block is mined Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 44 +++---------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 32924ab7b78..7a71725f6c3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5185,51 +5185,21 @@ fn clarity_burn_state() { vec![&Value::UInt(burn_block_height)], ); result.expect_result_ok().expect("Read-only call failed"); - - // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) - let call_tx = tests::make_contract_call( - &sender_sk, - sender_nonce, - tx_fee, - &sender_addr, - contract_name, - "bar", - &[Value::UInt(burn_block_height + 1)], - ); - sender_nonce += 1; - submit_tx(&http_origin, &call_tx); } let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); - // Assert that the contract call was successful - test_observer::get_mined_nakamoto_blocks() - .last() - .unwrap() - .tx_events - .iter() - .for_each(|event| match event { - TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { - // Ignore coinbase and tenure transactions - if *fee == 0 { - return; - } - - info!("Contract call result: {}", result); - result.clone().expect_result_ok().expect("Ok result"); - } - _ => { - info!("Unsuccessful event: {:?}", event); - panic!("Expected a successful transaction"); - } - }); - // mine the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { info!("Mining interim block {interim_block_ix}"); From d9c002c70f56a825ba318c2e4731914d360ca6e0 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 12:42:53 -0700 Subject: [PATCH 619/910] CRC: ensure that the tenure change transaction and contract call get mined in the same stacks block Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 11 ++++ .../src/tests/nakamoto_integrations.rs | 63 ++++++++++++++++--- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index da1c75c7087..1a5f4aa3c20 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -60,6 +60,8 @@ use crate::neon_node; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; +#[cfg(test)] +pub static TEST_MINE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] @@ -291,6 +293,15 @@ impl BlockMinerThread { let mut attempts = 0; // now, actually run this tenure loop { + #[cfg(test)] + if *TEST_MINE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Mining is stalled due to testing directive"); + while *TEST_MINE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Mining is no longer stalled due to testing directive. Continuing..."); + } let new_block = loop { // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7a71725f6c3..6fa0dafc88e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,9 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::miner::{ + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, +}; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -5185,21 +5187,68 @@ fn clarity_burn_state() { vec![&Value::UInt(burn_block_height)], ); result.expect_result_ok().expect("Read-only call failed"); + + // Pause mining to prevent the stacks block from being mined before the tenure change is processed + TEST_MINE_STALL.lock().unwrap().replace(true); + // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[Value::UInt(burn_block_height + 1)], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); } let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + TEST_MINE_STALL.lock().unwrap().replace(false); + wait_for(20, || { + Ok(coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + > blocks_processed_before) + }) .unwrap(); let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { + // Ignore coinbase and tenure transactions + if *fee == 0 { + return; + } + + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + // mine the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { info!("Mining interim block {interim_block_ix}"); From 27410626b0b75d60030e7c550cef149456f13685 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 18 Sep 2024 22:55:15 +0300 Subject: [PATCH 620/910] update flashblocks integration test for CI --- .../src/tests/nakamoto_integrations.rs | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 54d82df92c8..85bbf9120d3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1676,7 +1676,6 @@ fn simple_neon_integration() { } #[test] -#[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1900,15 +1899,35 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); // Check that we have the expected burn blocks - // We expect to have blocks 220-230 and 234 onwards, with a gap for the flash blocks + // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks let bhh = u64::from(tip.burn_header_height); - test_observer::contains_burn_block_range(220..=230).unwrap(); - test_observer::contains_burn_block_range(234..=bhh).unwrap(); - // Verify that we're missing the expected flash blocks - assert!( - test_observer::contains_burn_block_range(231..=233).is_err(), - "Expected to be missing burn blocks 231-233 due to flash blocks" + // Find the gap in burn blocks + let mut gap_start = 0; + let mut gap_end = 0; + for i in 220..=bhh { + if test_observer::contains_burn_block_range(i..=i).is_err() { + if gap_start == 0 { + gap_start = i; + } + gap_end = i; + } else if gap_start != 0 { + break; + } + } + + // Verify that there's a gap of exactly 3 blocks + assert_eq!( + gap_end - gap_start + 1, + 3, + "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", + gap_start, + gap_end ); + + // Verify blocks before and after the gap + test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); + test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + info!("Verified burn block ranges, including expected gap for flash blocks"); coord_channel From 33ff38a2d25182ff107bd3bd333c354c60e73c4b Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 19 Sep 2024 00:01:11 +0300 Subject: [PATCH 621/910] add ignore to flashblocks test header --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 85bbf9120d3..9b89e048568 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1676,6 +1676,7 @@ fn simple_neon_integration() { } #[test] +#[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, From 08a62a25329e7ab2855d4b28dbcce5a5da95251e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 16:32:18 -0700 Subject: [PATCH 622/910] CRC: add unit tests and tighten regex restriction and update change log with full path Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 2 +- stackslib/src/net/api/getsigner.rs | 19 ++++- stackslib/src/net/api/tests/getsigner.rs | 96 ++++++++++++++++++++++++ stackslib/src/net/api/tests/mod.rs | 1 + 4 files changed, 113 insertions(+), 5 deletions(-) create mode 100644 stackslib/src/net/api/tests/getsigner.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 05d3ada08db..f5c84db9a60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed -- Added `/v3/signer/` endpoint +- Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint ## [2.5.0.0.7] diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 1231e195c1b..90bcc796bfb 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -45,8 +45,17 @@ use crate::util_lib::db::Error as DBError; #[derive(Clone, Default)] pub struct GetSignerRequestHandler { - signer_pubkey: Option, - reward_cycle: Option, + pub signer_pubkey: Option, + pub reward_cycle: Option, +} + +impl GetSignerRequestHandler { + pub fn new() -> Self { + Self { + signer_pubkey: None, + reward_cycle: None, + } + } } #[derive(Debug, Serialize, Deserialize)] @@ -61,8 +70,10 @@ impl HttpRequest for GetSignerRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/signer/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#) - .unwrap() + Regex::new( + r#"^/v3/signer/(?P0[23][0-9a-f]{64})/(?P[0-9]{1,10})$"#, + ) + .unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs new file mode 100644 index 00000000000..92e30057d7e --- /dev/null +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -0,0 +1,96 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::getsigner::{self, GetSignerRequestHandler}; +use crate::net::api::tests::{test_rpc, TestRPC}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{ + RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest, TipRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{Error as NetError, ProtocolFamily}; + +fn make_preamble(query: &str) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/signer{query}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let private_key = StacksPrivateKey::new(); + let signer_pubkey = StacksPublicKey::from_private(&private_key); + let signer_pubkey_hex = signer_pubkey.to_hex(); + let cycle_num = thread_rng().next_u32() as u64; + + let mut handler = getsigner::GetSignerRequestHandler::new(); + let mut bad_content_length_preamble = + make_preamble(&format!("/{signer_pubkey_hex}/{cycle_num}")); + bad_content_length_preamble.content_length = Some(1); + let tests = vec![ + ( + make_preamble(&format!("/{signer_pubkey_hex}/{cycle_num}")), + Ok((Some(signer_pubkey), Some(cycle_num))), + ), + ( + make_preamble(&format!("/foo/{cycle_num}")), + Err(NetError::NotFoundError), + ), + ( + make_preamble(&format!("/{signer_pubkey_hex}/bar")), + Err(NetError::NotFoundError), + ), + ( + bad_content_length_preamble, + Err( + HttpError::DecodeError("Invalid Http request: expected 0-length body".into()) + .into(), + ), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok((key, cycle)) => { + assert!(parsed_request.is_ok()); + assert_eq!(handler.signer_pubkey, key); + assert_eq!(handler.reward_cycle, cycle); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ded0360555a..d19854bf02e 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -73,6 +73,7 @@ mod getmicroblocks_indexed; mod getmicroblocks_unconfirmed; mod getneighbors; mod getpoxinfo; +mod getsigner; mod getsortition; mod getstackerdbchunk; mod getstackerdbmetadata; From 84c836902722466e6d3cd98ad9644466ddc121b5 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 16:50:15 -0700 Subject: [PATCH 623/910] fix: apply needed sortdb migrations before 8 --- stackslib/src/chainstate/burn/db/sortdb.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index f446e98a666..53dc2d0547b 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2883,7 +2883,7 @@ impl SortitionDB { sql_pragma(self.conn(), "journal_mode", &"WAL")?; sql_pragma(self.conn(), "foreign_keys", &true)?; - let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; // create first (sentinel) snapshot debug!("Make first snapshot"); @@ -2909,13 +2909,6 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; - // `apply_schema_8_migration` creates new transactions, so - // commit this first. - db_tx.commit()?; - // NOTE: we don't need to provide a migrator here because we're not migrating - self.apply_schema_8_migration(None)?; - let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; - SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2933,6 +2926,14 @@ impl SortitionDB { db_tx.commit()?; + // NOTE: we don't need to provide a migrator here because we're not migrating + self.apply_schema_8_migration(None)?; + + let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; + + db_tx.commit()?; + self.add_indexes()?; debug!("Instantiated SortDB"); From 4d0e3330e3eb501c591beb4eb1f32c72a46b8ca2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 17:08:38 -0700 Subject: [PATCH 624/910] fix: move signer skip broadcast injection logic to own function --- stacks-signer/src/v0/signer.rs | 43 ++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a5f635cf16a..654a00dc66a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -944,24 +944,8 @@ impl Signer { block.header.signer_signature = signatures; #[cfg(any(test, feature = "testing"))] - { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return; - } + if self.test_skip_block_broadcast(&block) { + return; } debug!( "{self}: Broadcasting Stacks block {} to node", @@ -986,6 +970,29 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); From 3296f9865ea8dae40c3a7e1418afd75533d903d7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 17:11:02 -0700 Subject: [PATCH 625/910] fix: check if db_version exists to determine staging_blocks schema --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0fcdaffad81..c0b364eea88 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -35,7 +35,7 @@ use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlo use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, - tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, + table_exists, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; /// The means by which a block is obtained. @@ -666,13 +666,17 @@ impl StacksChainState { pub fn get_nakamoto_staging_blocks_db_version( conn: &Connection, ) -> Result { + let db_version_exists = table_exists(&conn, "db_version")?; + if !db_version_exists { + return Ok(1); + } let qry = "SELECT version FROM db_version ORDER BY version DESC LIMIT 1"; let args = NO_PARAMS; let version: Option = match query_row(&conn, qry, args) { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(1); + return Err(ChainstateError::DBError(DBError::Corruption)); } }; @@ -684,7 +688,7 @@ impl StacksChainState { } None => { debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); - Ok(1) + Err(ChainstateError::DBError(DBError::Corruption)) } } } From 56ae16867c4d7a581e7f4e4c0f7bced1df9f56e0 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 19 Sep 2024 14:31:05 +0300 Subject: [PATCH 626/910] check that epoch3 start burn block is in the missing blocks --- .../src/tests/nakamoto_integrations.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b89e048568..b50f9a459e5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1902,6 +1902,12 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { // Check that we have the expected burn blocks // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks let bhh = u64::from(tip.burn_header_height); + + // Get the Epoch 3.0 activation height (in terms of Bitcoin block height) + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + // Find the gap in burn blocks let mut gap_start = 0; let mut gap_end = 0; @@ -1925,11 +1931,21 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { gap_end ); + // Verify that the gap includes the Epoch 3.0 activation height + assert!( + gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, + "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", + gap_start, + gap_end, + epoch_3_start_height + ); + // Verify blocks before and after the gap test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); info!("Verified burn block ranges, including expected gap for flash blocks"); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); coord_channel .lock() From 95b01c17a26e620a1af4077368d9d486d98a24dd Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 11:38:17 -0500 Subject: [PATCH 627/910] fix: ongoing commit logic + better error messages * correct the ongoing commit logic (and RBF handling) in bitcoin tx submissions * better error messages from send_http_request --- stackslib/src/net/httpcore.rs | 13 +++++++------ .../src/burnchains/bitcoin_regtest_controller.rs | 7 +++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 804add6f331..3b4bf8c9b98 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1953,15 +1953,16 @@ pub fn send_http_request( // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { StacksHttpMessage::Response(response_data) => response_data, - StacksHttpMessage::Error(path, response) => { + StacksHttpMessage::Error(_path, response) => { + let verb = &request.preamble().verb; + let path = &request.preamble().path_and_query_str; + let resp_status_code = response.preamble().status_code; + let resp_body = response.body(); return Err(io::Error::new( io::ErrorKind::Other, format!( - "Request did not succeed ({} != 200). Path: '{}'", - response.preamble().status_code, - &path - ) - .as_str(), + "HTTP '{verb} {path}' did not succeed ({resp_status_code} != 200). Response body = {resp_body:?}" + ), )); } _ => { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 3338e3cf5f6..568e9559c3d 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1662,7 +1662,10 @@ impl BitcoinRegtestController { ) } else { // Case 2) ii): Attempt to RBF - info!("Attempt to replace by fee an outdated leader block commit"); + info!( + "Attempt to replace by fee an outdated leader block commit"; + "ongoing_txids" => ?ongoing_op.txids + ); self.send_block_commit_operation( epoch_id, payload, @@ -1674,7 +1677,7 @@ impl BitcoinRegtestController { ) }; - if res.is_ok() { + if res.is_err() { self.ongoing_block_commit = Some(ongoing_op); } From 56141129f09d858582a454c81a0d5fe6b9439eb5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:11:48 -0700 Subject: [PATCH 628/910] Do not count received valid signatures towards threshold weight when ignore flag set Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 60 ++++++++++--------- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27f..1ac2618a537 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d9624043..c123217ce0c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2227,26 +2227,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2254,26 +2267,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2283,12 +2286,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -4754,7 +4756,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From a531b50ce8c1f9160aa07b9dbbafc3b8caf672aa Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:49:25 -0700 Subject: [PATCH 629/910] CRC: fix mainnet flag Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index cb1d4f8a6de..a0426009771 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -608,7 +608,7 @@ impl Signer { // authenticate the signature -- it must be signed by one of the stacking set let is_valid_sig = self.signer_addresses.iter().any(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); // it only matters that the address hash bytes match stacker_address.bytes == addr.bytes From 065df11f9e8d2928a0ec1ec1163d172b33461312 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 12:48:40 -0700 Subject: [PATCH 630/910] Print a warning if failed to parse the stackers response Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6e3bab341e0..c4ea485406c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -590,9 +590,10 @@ impl StacksClient { .map_err(|e| backoff::Error::transient(e.into()))?; let status = response.status(); if status.is_success() { - return response - .json() - .map_err(|e| backoff::Error::permanent(e.into())); + return response.json().map_err(|e| { + warn!("Failed to parse the GetStackers response: {e}"); + backoff::Error::permanent(e.into()) + }); } let error_data = response.json::().map_err(|e| { warn!("Failed to parse the GetStackers error response: {e}"); From b2acfd72bde0378a94b24d36124f20426ddceaff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 13:26:17 -0700 Subject: [PATCH 631/910] Do not assume every signers signature makes it before miner quits waiting for unnecessary signatures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++-------- 1 file changed, 95 insertions(+), 61 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c123217ce0c..8d8ff07ac0d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4130,31 +4130,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let long_timeout = 60; + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4173,13 +4176,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test .signer_stacks_private_keys .iter() @@ -4191,18 +4194,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4210,7 +4215,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); - loop { + wait_for(long_timeout, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -4235,14 +4240,10 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { } }) .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == rejecting_signers.len()) + }) + .expect("Timed out waiting for block proposal rejections"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4263,13 +4264,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4277,11 +4279,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4297,20 +4300,35 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_2.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] @@ -4351,7 +4369,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4360,13 +4378,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4375,17 +4395,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); @@ -4404,16 +4422,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4433,6 +4454,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4440,13 +4462,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4458,23 +4481,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4491,15 +4510,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4509,6 +4519,30 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_1_prime.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] From 2cdd31b6bc831b4bb7fc888d18a842b8cf1e15a4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 16:58:37 -0400 Subject: [PATCH 632/910] test: move the 2.5 and 3.0 activation heights earlier for this test This allows us to avoid hitting block 240, which is when the stackers get unstacked and the chain stalls, making `partial_tenure_fork` less flaky --- testnet/stacks-node/src/tests/signer/mod.rs | 19 ++++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 11 +++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a792346..95321664926 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -748,9 +748,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8d8ff07ac0d..27f48b69175 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3610,6 +3610,17 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 121; + epochs[7].start_height = 121; + epochs[7].end_height = 151; + epochs[8].start_height = 151; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, From fd4c2379986a6abbd8cb0aa920a512a613619df9 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 16:12:38 -0500 Subject: [PATCH 633/910] feat: add a consolidated endpoint for current and prior sortitions --- stacks-signer/src/chainstate.rs | 35 +-- stacks-signer/src/client/stacks_client.rs | 50 ++++ stackslib/src/net/api/getsortition.rs | 229 ++++++++++++------ .../src/tests/nakamoto_integrations.rs | 2 +- 4 files changed, 207 insertions(+), 109 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4012fd48a08..4bbb9741a54 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; -use crate::client::{ClientError, StacksClient}; +use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; use crate::signerdb::{BlockState, SignerDb}; @@ -138,8 +138,6 @@ pub struct SortitionsView { pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) pub cur_sortition: SortitionState, - /// the hash at which the sortitions view was fetched - pub latest_consensus_hash: ConsensusHash, /// configuration settings for evaluating proposals pub config: ProposalEvalConfig, } @@ -608,42 +606,21 @@ impl SortitionsView { config: ProposalEvalConfig, client: &StacksClient, ) -> Result { - let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash; - - // figure out what cur_sortition will be set to. - // if the latest sortition wasn't successful, query the last one that was. - let latest_success = if latest_state.was_sortition { - latest_state - } else { - info!("Latest state wasn't a sortition: {latest_state:?}"); - let last_sortition_ch = latest_state - .last_sortition_ch - .as_ref() - .ok_or_else(|| ClientError::NoSortitionOnChain)?; - client.get_sortition(last_sortition_ch)? - }; - - // now, figure out what `last_sortition` will be set to. - let last_sortition = latest_success - .last_sortition_ch - .as_ref() - .map(|ch| client.get_sortition(ch)) - .transpose()?; + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; - let cur_sortition = SortitionState::try_from(latest_success)?; + let cur_sortition = SortitionState::try_from(current_sortition)?; let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - let latest_consensus_hash = latest_ch; - Ok(Self { cur_sortition, last_sortition, - latest_consensus_hash, config, }) } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6e3bab341e0..e59438db9fa 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -88,6 +88,15 @@ struct GetStackersErrorResp { err_msg: String, } +/// Result from fetching current and last sortition: +/// two sortition infos +pub struct CurrentAndLastSortition { + /// the latest winning sortition in the current burnchain fork + pub current_sortition: SortitionInfo, + /// the last winning sortition prior to `current_sortition`, if there was one + pub last_sortition: Option, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -484,6 +493,47 @@ impl StacksClient { Ok(tenures) } + /// Get the current winning sortition and the last winning sortition + pub fn get_current_and_last_sortition(&self) -> Result { + debug!("stacks_node_client: Getting current and prior sortition..."); + let path = format!("{}/latest_and_last", self.sortition_info_path()); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let send_request = || { + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + timer.stop_and_record(); + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let mut info_list: VecDeque = response.json()?; + let Some(current_sortition) = info_list.pop_front() else { + return Err(ClientError::UnexpectedResponseFormat( + "Empty SortitionInfo returned".into(), + )); + }; + if !current_sortition.was_sortition { + return Err(ClientError::UnexpectedResponseFormat( + "'Current' SortitionInfo returned which was not a winning sortition".into(), + )); + } + let last_sortition = if current_sortition.last_sortition_ch.is_some() { + let Some(last_sortition) = info_list.pop_back() else { + return Err(ClientError::UnexpectedResponseFormat("'Current' SortitionInfo has `last_sortition_ch` field, but corresponding data not returned".into())); + }; + Some(last_sortition) + } else { + None + }; + Ok(CurrentAndLastSortition { + current_sortition, + last_sortition, + }) + } + /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { debug!("stacks_node_client: Getting latest sortition..."); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5e0557ca26b..7b594530c26 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -28,6 +28,7 @@ use stacks_common::util::HexError; use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; @@ -51,10 +52,13 @@ pub enum QuerySpecifier { BurnchainHeaderHash(BurnchainHeaderHash), BlockHeight(u64), Latest, + /// Fetch the latest sortition *which was a winning sortition* and that sortition's + /// last sortition, returning two SortitionInfo structs. + LatestAndLast, } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; -static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})(/(?P[0-9a-f]{1,64}))?)?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -112,6 +116,7 @@ impl TryFrom<(&str, &str)> for QuerySpecifier { value.1 }; match value.0 { + "latest_and_last" => Ok(Self::LatestAndLast), "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -141,6 +146,74 @@ impl GetSortitionHandler { query: QuerySpecifier::Latest, } } + + fn get_sortition_info( + sortition_sn: BlockSnapshot, + sortdb: &SortitionDB, + ) -> Result { + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = + if !sortition_sn.sortition { + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = + handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle + .get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if stacks_parent_sn.sortition { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition( + sortition_sn.block_height.saturating_sub(1), + )?; + last_sortition.consensus_hash + }; + + ( + sortition_sn.miner_pk_hash.clone(), + Some(stacks_parent_sn.consensus_hash), + Some(block_commit.block_header_hash), + Some(last_sortition_ch), + ) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + } } /// Decode the HTTP request impl HttpRequest for GetSortitionHandler { @@ -169,9 +242,15 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { - self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; - } + match (captures.name("key"), captures.name("value")) { + (Some(key), None) => { + self.query = QuerySpecifier::try_from((key.as_str(), ""))?; + } + (Some(key), Some(value)) => { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; + } + _ => {} + }; Ok(req_contents) } @@ -194,81 +273,37 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = - node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { - let query_result = match self.query { - QuerySpecifier::Latest => { + let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + } + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + } + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + } + QuerySpecifier::LatestAndLast => { + if network.burnchain_tip.sortition { + // optimization: if the burn chain tip had a sortition, just return that Ok(Some(network.burnchain_tip.clone())) - }, - QuerySpecifier::ConsensusHash(ref consensus_hash) => { - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) - }, - QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot(burn_hash) - }, - QuerySpecifier::BlockHeight(burn_height) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot_by_height(burn_height) - }, - }; - let sortition_sn = query_result? - .ok_or_else(|| ChainError::NoSuchBlockError)?; - - let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; - (None, None, None, Some(last_sortition.consensus_hash)) - } else { - let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? - .ok_or_else(|| { - error!( - "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? - .ok_or_else(|| { - warn!( - "Failed to load the snapshot of the winning block commits parent"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - - // try to figure out what the last snapshot in this fork was with a successful - // sortition. - // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { - stacks_parent_sn.consensus_hash.clone() } else { - // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; - last_sortition.consensus_hash - }; - - (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), - Some(last_sortition_ch)) - }; - - Ok(SortitionInfo { - burn_block_hash: sortition_sn.burn_header_hash, - burn_block_height: sortition_sn.block_height, - burn_header_timestamp: sortition_sn.burn_header_timestamp, - sortition_id: sortition_sn.sortition_id, - parent_sortition_id: sortition_sn.parent_sortition_id, - consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, - miner_pk_hash160, - stacks_parent_ch, - last_sortition_ch, - committed_block_hash, - }) - }); + // we actually need to perform a marf lookup to find that last snapshot + // with a sortition + let handle = sortdb.index_handle_at_tip(); + let last_sortition = handle + .get_last_snapshot_with_sortition(network.burnchain_tip.block_height)?; + Ok(Some(last_sortition)) + } + } + }; + let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(sortition_sn, sortdb) + }); let block = match result { Ok(block) => block, @@ -290,8 +325,44 @@ impl RPCRequestHandler for GetSortitionHandler { } }; + let last_sortition_ch = block.last_sortition_ch.clone(); + let mut info_list = vec![block]; + if self.query == QuerySpecifier::LatestAndLast { + // if latest **and** last are requested, lookup the sortition info for last_sortition_ch + if let Some(last_sortition_ch) = last_sortition_ch { + let result = node.with_node_state(|_, sortdb, _, _, _| { + let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_sortition_ch, + )? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(last_sortition_sn, sortdb) + }); + let last_block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot for the `last_sortition_ch`({last_sortition_ch})\n")), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for `last_sortition_ch`({last_sortition_ch}): {:?}\n", &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + info_list.push(last_block); + } + } + let preamble = HttpResponsePreamble::ok_json(&preamble); - let result = HttpResponseContents::try_from_json(&block)?; + let result = HttpResponseContents::try_from_json(&info_list)?; Ok((preamble, result)) } } @@ -302,7 +373,7 @@ impl HttpResponse for GetSortitionHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let sortition_info: SortitionInfo = parse_json(preamble, body)?; + let sortition_info: Vec = parse_json(preamble, body)?; Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 32924ab7b78..4057852c41d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5506,7 +5506,7 @@ fn signer_chainstate() { let time_start = Instant::now(); let proposal = loop { let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); - if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + if proposal.0.header.consensus_hash == sortitions_view.cur_sortition.consensus_hash { break proposal; } if time_start.elapsed() > Duration::from_secs(20) { From 77c1036bf96494a95198d5c7381dc74c8e5887b7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 20:45:14 -0500 Subject: [PATCH 634/910] chore: remove dead code, fix unit test build --- stacks-signer/src/client/stacks_client.rs | 43 +---------------------- stacks-signer/src/tests/chainstate.rs | 1 - 2 files changed, 1 insertion(+), 43 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e59438db9fa..09c0040aeab 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,3 @@ -use std::collections::VecDeque; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -14,6 +13,7 @@ use std::collections::VecDeque; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::VecDeque; use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; @@ -534,47 +534,6 @@ impl StacksClient { }) } - /// Get the sortition information for the latest sortition - pub fn get_latest_sortition(&self) -> Result { - debug!("stacks_node_client: Getting latest sortition..."); - let path = self.sortition_info_path(); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request latest sortition"; "err" => ?e); - e - }) - }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let sortition_info = response.json()?; - Ok(sortition_info) - } - - /// Get the sortition information for a given sortition - pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { - debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); - let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); - let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) - }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let sortition_info = response.json()?; - Ok(sortition_info) - } - /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { debug!("stacks_node_client: Getting peer info..."); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index a13ab24a59d..53f60e9cfe3 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -82,7 +82,6 @@ fn setup_test_environment( }); let view = SortitionsView { - latest_consensus_hash: cur_sortition.consensus_hash, cur_sortition, last_sortition, config: ProposalEvalConfig { From b396798c489361516d96bc432e390a415842fe88 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 21:03:20 -0500 Subject: [PATCH 635/910] add /v3/sortitions to openapi.yaml --- .../api/core-node/get_sortitions.example.json | 15 +++++++ ...t_sortitions_latest_and_prior.example.json | 28 +++++++++++++ docs/rpc/openapi.yaml | 40 +++++++++++++++++++ 3 files changed, 83 insertions(+) create mode 100644 docs/rpc/api/core-node/get_sortitions.example.json create mode 100644 docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json diff --git a/docs/rpc/api/core-node/get_sortitions.example.json b/docs/rpc/api/core-node/get_sortitions.example.json new file mode 100644 index 00000000000..a56fd887b1d --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions.example.json @@ -0,0 +1,15 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + } +] diff --git a/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json new file mode 100644 index 00000000000..db970637ed3 --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json @@ -0,0 +1,28 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + }, + { + "burn_block_hash": "0x496ff02cb63a4850d0bdee5fab69284b6eb0392b4538e1c462f82362c5becfa4", + "burn_block_height": 230, + "burn_header_timestamp": 1726797570, + "sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "parent_sortition_id": "0xf9058692055cbd879d7f71e566e44b905a887b2b182407ed596b5d6499ceae2a", + "consensus_hash": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "last_sortition_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "committed_block_hash": "0x36ee5f7f7271de1c1d4cd830e36320b51e01605547621267ae6e9b4e9b10f95e" + } +] diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 3d4249329e1..e01a0956d10 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,43 @@ paths: schema: type: string + /v3/sortitions/{lookup_kind}/{lookup}: + get: + summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). + tags: + - Blocks + operationId: get_sortitions + description: + Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. + responses: + "200": + description: Information for the given reward cycle + content: + application/json: + example: + $ref: ./api/core-node/get_sortitions.example.json + "200": + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + content: + application/json: + example: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + parameters: + - name: lookup_kind + in: path + description: |- + The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. + Otherwise, the `lookup_kind` should be one of the following strings: + * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. + * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. + * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. + * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block + required: false + schema: + type: string + - name: lookup + in: path + description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` + required: false + schema: + type: string From e857672a8924e76a540b96f0dc6722847274e6b0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 21:29:30 -0500 Subject: [PATCH 636/910] docs: correct multi-example openapi.yaml --- docs/rpc/openapi.yaml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index e01a0956d10..5547d3bcb66 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -685,17 +685,18 @@ paths: Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. responses: "200": - description: Information for the given reward cycle + description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks content: application/json: - example: - $ref: ./api/core-node/get_sortitions.example.json - "200": - description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. - content: - application/json: - example: - $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + examples: + Latest: + description: A single element list is returned when just one sortition is requested + value: + $ref: ./api/core-node/get_sortitions.example.json + LatestAndLast: + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + value: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json parameters: - name: lookup_kind in: path From 7ef8809ad0e54166a5a87527e29ecbd15c50acb1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 22:32:36 -0400 Subject: [PATCH 637/910] test: reduce flakiness in `partial_tenure_fork` integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27f48b69175..034daa9e2d2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3567,7 +3567,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3611,13 +3611,18 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + // Move epoch 2.5 and 3.0 earlier, so we have more time for the // test before re-stacking is required. if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[6].end_height = 121; - epochs[7].start_height = 121; - epochs[7].end_height = 151; - epochs[8].start_height = 151; + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; } else { panic!("Expected epochs to be set"); } @@ -3694,8 +3699,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3851,7 +3856,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } From cb1a47cef615fc480f1eb7abdafc4a16a4c6a9a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 20 Sep 2024 10:50:04 -0500 Subject: [PATCH 638/910] feat: optimize mempool iteration by skipping repeated invocation after mempool exhausted --- stackslib/src/chainstate/stacks/miner.rs | 21 +++++++++++++----- .../stacks/tests/block_construction.rs | 1 + stackslib/src/core/mempool.rs | 22 +++++++++++++------ 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 0195385d3b0..78d6a477819 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2228,10 +2228,10 @@ impl StacksBlockBuilder { debug!("Block transaction selection begins (parent height = {tip_height})"); let result = { - let mut intermediate_result: Result<_, Error> = Ok(0); + let mut loop_result = Ok(()); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; - intermediate_result = mempool.iterate_candidates( + let intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, mempool_settings.clone(), @@ -2390,8 +2390,19 @@ impl StacksBlockBuilder { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } - if intermediate_result.is_err() { - break; + match intermediate_result { + Err(e) => { + loop_result = Err(e); + break; + } + Ok((_txs_considered, stop_reason)) => { + match stop_reason { + MempoolIterationStopReason::NoMoreCandidates => break, + MempoolIterationStopReason::DeadlineReached => break, + // if the iterator function exited, let the loop tick: it checks the block limits + MempoolIterationStopReason::IteratorExited => {} + } + } } if num_considered == 0 { @@ -2399,7 +2410,7 @@ impl StacksBlockBuilder { } } debug!("Block transaction selection finished (parent height {}): {} transactions selected ({} considered)", &tip_height, num_txs, considered.len()); - intermediate_result + loop_result }; mempool.drop_txs(&invalidated_txs)?; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 36997105356..352679c2095 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5072,6 +5072,7 @@ fn paramaterized_mempool_walk_test( }, ) .unwrap() + .0 == 0 { break; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fe75d62bd2c..0dff4796dcb 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -144,6 +144,14 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +pub enum MempoolIterationStopReason { + NoMoreCandidates, + DeadlineReached, + /// If the iteration function supplied to mempool iteration exited + /// (i.e., the transaction evaluator returned an early exit command) + IteratorExited, +} + impl StacksMessageCodec for MemPoolSyncData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { match *self { @@ -1592,7 +1600,7 @@ impl MemPoolDB { output_events: &mut Vec, settings: MemPoolWalkSettings, mut todo: F, - ) -> Result + ) -> Result<(u64, MempoolIterationStopReason), E> where C: ClarityConnection, F: FnMut( @@ -1643,11 +1651,11 @@ impl MemPoolDB { .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))?; - loop { + let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { debug!("Mempool iteration deadline exceeded"; "deadline_ms" => settings.max_walk_time_ms); - break; + break MempoolIterationStopReason::DeadlineReached; } let start_with_no_estimate = @@ -1687,7 +1695,7 @@ impl MemPoolDB { ), None => { debug!("No more transactions to consider in mempool"); - break; + break MempoolIterationStopReason::NoMoreCandidates; } } } @@ -1875,7 +1883,7 @@ impl MemPoolDB { } None => { debug!("Mempool iteration early exit from iterator"); - break; + break MempoolIterationStopReason::IteratorExited; } } @@ -1885,7 +1893,7 @@ impl MemPoolDB { candidate_cache.len() ); candidate_cache.reset(); - } + }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable @@ -1908,7 +1916,7 @@ impl MemPoolDB { "considered_txs" => u128::from(total_considered), "elapsed_ms" => start_time.elapsed().as_millis() ); - Ok(total_considered) + Ok((total_considered, stop_reason)) } pub fn conn(&self) -> &DBConn { From 908c40b3e3f009c3cc95038f86fa75eace8931bc Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:19:27 -0700 Subject: [PATCH 639/910] fix: update log levels with signerDB write errors --- stacks-signer/src/v0/signer.rs | 2 +- stacks-signer/src/v1/signer.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 654a00dc66a..fa34cc4b429 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -782,7 +782,7 @@ impl Signer { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } if let Err(e) = self.signer_db.insert_block(&block_info) { - warn!("{self}: Failed to update block state: {e:?}",); + error!("{self}: Failed to update block state: {e:?}",); panic!("{self} Failed to update block state: {e}"); } } diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 08ccde5a920..aa8fcfb0d2f 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -239,12 +239,13 @@ impl SignerTrait for Signer { self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) { - warn!( + error!( "Failed to write burn block event to signerdb"; "err" => ?e, "burn_header_hash" => %burn_header_hash, "burn_height" => burn_height ); + panic!("Failed to write burn block event to signerdb"); } } } From 85e41f7d602a5bb0c054f4b448410894cdfeb70b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:19:50 -0700 Subject: [PATCH 640/910] Fix: update error messages when looking up staging_blocks version --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index c0b364eea88..91aad5a3253 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -675,7 +675,7 @@ impl StacksChainState { let version: Option = match query_row(&conn, qry, args) { Ok(x) => x, Err(e) => { - debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); + error!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); return Err(ChainstateError::DBError(DBError::Corruption)); } }; @@ -687,7 +687,7 @@ impl StacksChainState { Ok(ver) } None => { - debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); + error!("No version present in Nakamoto staging blocks `db_version` table"); Err(ChainstateError::DBError(DBError::Corruption)) } } From 89fdacb4d06065b81de898b70c4f7da813fa2dda Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:23:54 -0700 Subject: [PATCH 641/910] fix: include pox_treatment in miner_signature_hash --- stackslib/src/chainstate/nakamoto/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54c..24f92ad02b3 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -734,6 +734,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.timestamp)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -1876,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1888,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( From 218bd0b40afd4417b889695619007b464f505e67 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:11:48 -0700 Subject: [PATCH 642/910] Do not count received valid signatures towards threshold weight when ignore flag set Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 60 ++++++++++--------- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27f..1ac2618a537 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d9624043..c123217ce0c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2227,26 +2227,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2254,26 +2267,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2283,12 +2286,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -4754,7 +4756,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From 351f9e6251503f0544a8fee4f06f46eeb06e1cb4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 13:26:17 -0700 Subject: [PATCH 643/910] Do not assume every signers signature makes it before miner quits waiting for unnecessary signatures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++-------- 1 file changed, 95 insertions(+), 61 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c123217ce0c..8d8ff07ac0d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4130,31 +4130,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let long_timeout = 60; + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4173,13 +4176,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test .signer_stacks_private_keys .iter() @@ -4191,18 +4194,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4210,7 +4215,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); - loop { + wait_for(long_timeout, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -4235,14 +4240,10 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { } }) .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == rejecting_signers.len()) + }) + .expect("Timed out waiting for block proposal rejections"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4263,13 +4264,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4277,11 +4279,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4297,20 +4300,35 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_2.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] @@ -4351,7 +4369,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4360,13 +4378,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4375,17 +4395,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); @@ -4404,16 +4422,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4433,6 +4454,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4440,13 +4462,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4458,23 +4481,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4491,15 +4510,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4509,6 +4519,30 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_1_prime.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] From 804f6f3a719cf0c74a34e704d1118c060c7ec254 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 16:58:37 -0400 Subject: [PATCH 644/910] test: move the 2.5 and 3.0 activation heights earlier for this test This allows us to avoid hitting block 240, which is when the stackers get unstacked and the chain stalls, making `partial_tenure_fork` less flaky --- testnet/stacks-node/src/tests/signer/mod.rs | 19 ++++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 11 +++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a792346..95321664926 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -748,9 +748,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8d8ff07ac0d..27f48b69175 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3610,6 +3610,17 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 121; + epochs[7].start_height = 121; + epochs[7].end_height = 151; + epochs[8].start_height = 151; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, From 6ed7d4edd9bdc5c60522e4bfcc512d1a280c1734 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 22:32:36 -0400 Subject: [PATCH 645/910] test: reduce flakiness in `partial_tenure_fork` integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27f48b69175..034daa9e2d2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3567,7 +3567,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3611,13 +3611,18 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + // Move epoch 2.5 and 3.0 earlier, so we have more time for the // test before re-stacking is required. if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[6].end_height = 121; - epochs[7].start_height = 121; - epochs[7].end_height = 151; - epochs[8].start_height = 151; + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; } else { panic!("Expected epochs to be set"); } @@ -3694,8 +3699,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3851,7 +3856,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } From 054033c0b6aba27b5caaa4ab849b44e2dce2ad64 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:11:32 -0700 Subject: [PATCH 646/910] Empty sortition needed a longer wait time before ignoring a block and should wait for a block more definitively Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 48 +++++++--------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d9624043..2c5c8484caf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -231,27 +231,20 @@ impl SignerTest { Some(self.num_stacking_cycles), ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(60); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = self.get_current_reward_cycle() + 1; - while !reward_set_calculated { - let reward_set = self + wait_for(30, || { + Ok(self .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - debug!("Signer set: {:?}", reward_set.unwrap()); - } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + .expect("Failed to check if reward set is calculated") + .map(|reward_set| { + debug!("Signer set: {:?}", reward_set); + }) + .is_some()) + }) + .expect("Timed out waiting for reward set calculation"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state @@ -264,8 +257,7 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); - std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || { + wait_for(30, || { Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) }) .expect("Timed out waiting for network to restart after 3.0 boundary reached"); @@ -275,11 +267,11 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; - Ok(height > height_before) + self.mine_nakamoto_block(Duration::from_secs(30)); + wait_for(30, || { + Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) - .unwrap(); + .expect("Timed out waiting for first Nakamoto block after 3.0 boundary"); info!("Ready to mine Nakamoto blocks!"); } @@ -553,19 +545,9 @@ fn miner_gather_signatures() { let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -2317,7 +2299,7 @@ fn empty_sortition() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(5); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], From 72a56cfa60d2522e9dc3e8da78100f8b2101a605 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:11:32 -0700 Subject: [PATCH 647/910] Empty sortition needed a longer wait time before ignoring a block and should wait for a block more definitively Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 48 +++++++--------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 034daa9e2d2..12302e5e6bb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -231,27 +231,20 @@ impl SignerTest { Some(self.num_stacking_cycles), ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(60); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = self.get_current_reward_cycle() + 1; - while !reward_set_calculated { - let reward_set = self + wait_for(30, || { + Ok(self .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - debug!("Signer set: {:?}", reward_set.unwrap()); - } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + .expect("Failed to check if reward set is calculated") + .map(|reward_set| { + debug!("Signer set: {:?}", reward_set); + }) + .is_some()) + }) + .expect("Timed out waiting for reward set calculation"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state @@ -264,8 +257,7 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); - std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || { + wait_for(30, || { Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) }) .expect("Timed out waiting for network to restart after 3.0 boundary reached"); @@ -275,11 +267,11 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; - Ok(height > height_before) + self.mine_nakamoto_block(Duration::from_secs(30)); + wait_for(30, || { + Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) - .unwrap(); + .expect("Timed out waiting for first Nakamoto block after 3.0 boundary"); info!("Ready to mine Nakamoto blocks!"); } @@ -553,19 +545,9 @@ fn miner_gather_signatures() { let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -2319,7 +2301,7 @@ fn empty_sortition() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(5); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], From 63fa7fa2b1fce294897bcd20ef2be5d28a6881eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:42:41 -0700 Subject: [PATCH 648/910] Fix locally_rejected_blocks_overriden_by_global_acceptance Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 33 ++++++++++++++++------ 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 12302e5e6bb..4238f1c615d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4164,21 +4164,36 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test From 0f8c4b83c31133f565329a4037d05f1bedcea663 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 12:36:04 -0700 Subject: [PATCH 649/910] Cleanup locally and globally rejected tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 81 +++++- testnet/stacks-node/src/tests/signer/v0.rs | 300 +++++++------------- 2 files changed, 188 insertions(+), 193 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 95321664926..a0c8041401d 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -37,6 +37,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -46,7 +47,9 @@ use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::StacksAddress; +use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -678,6 +681,82 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { + // Make sure that ALL signers accepted the block proposal + wait_for(timeout_secs, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify(hash.bits(), &signature) + .expect("Failed to verify signature") + }) + { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == expected_signers.len()) + }) + } + + pub fn wait_for_block_rejections( + &self, + timeout_secs: u64, + expected_signers: &[StacksPublicKey], + reject_code: Option, + ) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if expected_signers.contains(&rejected_pubkey) { + if let Some(reject_code) = reject_code.as_ref() { + if reject_code != &rejection.reason_code { + return None; + } + } + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) + }) + } } fn setup_stx_btc_node ()>( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4238f1c615d..83b1dd3df78 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3956,29 +3956,36 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(20); signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); sender_nonce += 1; let info_after = signer_test.stacks_client.get_peer_info().unwrap(); assert_eq!( @@ -3988,15 +3995,13 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance(30, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .take(num_signers / 2) - .collect(); + let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -4006,42 +4011,12 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); - let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + signer_test + .wait_for_block_rejections(60, &rejecting_signers, Some(RejectCode::TestingDirective)) + .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); let info_after = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4057,13 +4032,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4073,14 +4052,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info_before.stacks_tip_height + 1 ); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let start_time = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); assert_eq!( @@ -4088,6 +4059,10 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4122,13 +4097,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let send_amt = 100; let send_fee = 180; let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = 60; let short_timeout = 30; signer_test.boot_to_epoch_3(); @@ -4148,11 +4130,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info!("Submitted tx {tx} in to mine block N"); wait_for(short_timeout, || { - let info_after = signer_test + Ok(signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) }) .expect("Timed out waiting for N to be mined and processed"); @@ -4171,35 +4154,15 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); // Make sure that ALL signers accepted the block proposal - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let rejecting_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 3 / 10) .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4220,42 +4183,24 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be mined and processed"); - wait_for(long_timeout, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(block_rejections.len() == rejecting_signers.len()) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block proposal rejections"); + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections( + short_timeout, + &rejecting_signers, + Some(RejectCode::TestingDirective), + ) + .expect("Timed out waiting for block rejection of N+1"); // Assert the block was mined let info_after = signer_test @@ -4267,15 +4212,6 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4283,6 +4219,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4297,14 +4241,16 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for stacks block N+2 to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4320,28 +4266,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_ne!(block_n_2, block_n_1); // Make sure that ALL signers accepted the block proposal - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n_2.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); } #[test] @@ -4381,6 +4312,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = 30; signer_test.boot_to_epoch_3(); @@ -4423,10 +4359,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let ignoring_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 7 / 10) .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS @@ -4534,28 +4469,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { assert_ne!(block_n_1_prime, block_n); // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n_1_prime.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] From 5f54144c9ce4e5a926ba497355fdc150ee4f3b53 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 13:55:15 -0700 Subject: [PATCH 650/910] Do not assume every signer rejects for testing directive reasons as they may hit the threshold rejection first Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 6 ----- testnet/stacks-node/src/tests/signer/v0.rs | 26 +++++++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a0c8041401d..a48677e0f64 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -724,7 +724,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, ) -> Result<(), String> { wait_for(timeout_secs, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); @@ -740,11 +739,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], @@ -3976,7 +3977,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - wait_for(30, || { + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test .stacks_client @@ -3996,7 +3997,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); signer_test - .wait_for_block_acceptance(30, &block_n.signer_signature_hash, &all_signers) + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); @@ -4014,8 +4019,9 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected signer_test - .wait_for_block_rejections(60, &rejecting_signers, Some(RejectCode::TestingDirective)) + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); @@ -4032,7 +4038,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - wait_for(30, || { + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test .stacks_client @@ -4061,7 +4067,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { assert_ne!(block_n_1_prime, block_n_1); // Verify that all signers accepted the new block proposal signer_test - .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) .expect("Timed out waiting for block acceptance of N+1'"); } @@ -4195,11 +4205,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .expect("Timed out waiting for stacks block N+1 to be mined"); signer_test - .wait_for_block_rejections( - short_timeout, - &rejecting_signers, - Some(RejectCode::TestingDirective), - ) + .wait_for_block_rejections(short_timeout, &rejecting_signers) .expect("Timed out waiting for block rejection of N+1"); // Assert the block was mined From 550cd52156dd62919b8503cb9a19ed25004630ea Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 20 Sep 2024 17:26:15 -0400 Subject: [PATCH 651/910] test: resolve issues with `bitcoin_reorg_flap_with_follower` --- .../src/tests/neon_integrations.rs | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8043c2032e9..ae661c5f114 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12398,6 +12398,10 @@ fn bitcoin_reorg_flap() { channel.stop_chains_coordinator(); } +/// Advance the bitcoin chain and wait for the miner and any followers to +/// process the next block. +/// NOTE: This only works if the followers are mock-mining, or else the counter +/// will not be updated. fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, @@ -12447,7 +12451,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); - let timeout = None; + let timeout = Some(Duration::from_secs(60)); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12461,10 +12465,12 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let run_loop_stopper = miner_run_loop.get_termination_switch(); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); let mut follower_conf = conf.clone(); + follower_conf.node.mock_mining = true; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; @@ -12483,7 +12489,7 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); - thread::spawn(move || miner_run_loop.start(None, 0)); + let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); // figure out the started node's port @@ -12499,23 +12505,20 @@ fn bitcoin_reorg_flap_with_follower() { ); let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || follower_run_loop.start(None, 0)); + let follower_thread = thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all( - &mut btc_regtest_controller, - &miner_blocks_processed, - &[], - timeout, - ); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &miner_blocks_processed, 60); - // first block will hold our VRF registration + // next block will hold our VRF registration + // Note that the follower will not see its block processed counter bumped here next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, @@ -12609,9 +12612,11 @@ fn bitcoin_reorg_flap_with_follower() { assert_eq!(miner_channel.get_sortitions_processed(), 225); assert_eq!(follower_channel.get_sortitions_processed(), 225); - btcd_controller.stop_bitcoind().unwrap(); - miner_channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); } /// Tests the following: From dc28e8b620ac7a79144c9310e301e4006ca3c084 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:05:56 -0400 Subject: [PATCH 652/910] feat: mine a single block tenure off of a designated chain tip --- .../chainstate/nakamoto/coordinator/tests.rs | 62 +++++++++++++++++-- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6a2a484790c..e56e55754cc 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -538,6 +538,36 @@ impl<'a> TestPeer<'a> { miner_setup: F, after_block: G, ) -> NakamotoBlock + where + F: FnMut(&mut NakamotoBlockBuilder), + G: FnMut(&mut NakamotoBlock) -> bool, + { + let nakamoto_tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + self.mine_single_block_tenure_at_tip( + &nakamoto_tip.index_block_hash(), + sender_key, + tenure_change_tx, + coinbase_tx, + miner_setup, + after_block, + ) + } + + pub fn mine_single_block_tenure_at_tip( + &mut self, + nakamoto_tip: &StacksBlockId, + sender_key: &StacksPrivateKey, + tenure_change_tx: &StacksTransaction, + coinbase_tx: &StacksTransaction, + miner_setup: F, + after_block: G, + ) -> NakamotoBlock where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -547,6 +577,8 @@ impl<'a> TestPeer<'a> { let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let sender_acct = self.get_account(nakamoto_tip, &sender_addr.to_account_principal()); + // do a stx transfer in each block to a given recipient let mut blocks_and_sizes = self.make_nakamoto_tenure_and( tenure_change_tx.clone(), @@ -555,12 +587,11 @@ impl<'a> TestPeer<'a> { miner_setup, |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 1 { - let account = get_account(chainstate, sortdb, &sender_addr); let stx_transfer = make_token_transfer( chainstate, sortdb, &sender_key, - account.nonce, + sender_acct.nonce, 100, 1, &recipient_addr, @@ -607,12 +638,33 @@ impl<'a> TestPeer<'a> { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let nakamoto_tip = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + nakamoto_parent_tenure.last().as_ref().unwrap().block_id() + } else { + let tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + tip.index_block_hash() + }; + + let miner_addr = self.miner.origin_address().unwrap(); + let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); + let tenure_change_tx = self .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); + .make_nakamoto_tenure_change_with_nonce(tenure_change.clone(), miner_acct.nonce); + + let coinbase_tx = + self.miner + .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); - let block = self.mine_single_block_tenure( + let block = self.mine_single_block_tenure_at_tip( + &nakamoto_tip, sender_key, &tenure_change_tx, &coinbase_tx, From 639ab057ebeb4843332d90da12698440c75bee1f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:06:27 -0400 Subject: [PATCH 653/910] feat: get the parent ID of a nakamoto block --- stackslib/src/chainstate/nakamoto/mod.rs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54c..9e30e4bcb3b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -110,8 +110,8 @@ use crate::net::Error as net_error; use crate::util_lib::boot; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{ - query_int, query_row, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, - DBConn, Error as DBError, FromRow, + query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, + tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; use crate::{chainstate, monitoring}; @@ -2479,6 +2479,26 @@ impl NakamotoChainState { Ok(None) } + /// Load the parent block ID of a Nakamoto block + pub fn get_nakamoto_parent_block_id( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT parent_block_id FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let mut result = query_row_columns( + chainstate_conn, + sql, + &[&index_block_hash], + "parent_block_id", + )?; + if result.len() > 1 { + // even though `(consensus_hash,block_hash)` is the primary key, these are hashed to + // produce `index_block_hash`. So, `index_block_hash` is also unique w.h.p. + unreachable!("FATAL: multiple instances of index_block_hash"); + } + Ok(result.pop()) + } + /// Load a Nakamoto header pub fn get_block_header_nakamoto( chainstate_conn: &Connection, From 47171ce51fa0574351d5105fd68558fe5a059415 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:06:44 -0400 Subject: [PATCH 654/910] feat: allow the TestPeer user to specify a nakamoto tenure to build off of when mining --- .../src/chainstate/nakamoto/tests/node.rs | 167 ++++++++++-------- 1 file changed, 97 insertions(+), 70 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index aa00430f891..d23d608ec7d 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -214,6 +214,15 @@ impl TestMiner { &mut self, recipient: Option, vrf_proof: VRFProof, + ) -> StacksTransaction { + self.make_nakamoto_coinbase_with_nonce(recipient, vrf_proof, self.nonce) + } + + pub fn make_nakamoto_coinbase_with_nonce( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + nonce: u64, ) -> StacksTransaction { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -226,7 +235,7 @@ impl TestMiner { ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; - tx_coinbase.auth.set_origin_nonce(self.nonce); + tx_coinbase.auth.set_origin_nonce(nonce); let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); self.sign_as_origin(&mut tx_signer); @@ -237,6 +246,14 @@ impl TestMiner { pub fn make_nakamoto_tenure_change( &mut self, tenure_change: TenureChangePayload, + ) -> StacksTransaction { + self.make_nakamoto_tenure_change_with_nonce(tenure_change, self.nonce) + } + + pub fn make_nakamoto_tenure_change_with_nonce( + &mut self, + tenure_change: TenureChangePayload, + nonce: u64, ) -> StacksTransaction { let mut tx_tenure_change = StacksTransaction::new( TransactionVersion::Testnet, @@ -245,7 +262,7 @@ impl TestMiner { ); tx_tenure_change.chain_id = 0x80000000; tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; - tx_tenure_change.auth.set_origin_nonce(self.nonce); + tx_tenure_change.auth.set_origin_nonce(nonce); let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); self.sign_as_origin(&mut tx_signer); @@ -504,38 +521,50 @@ impl TestStacksNode { }; // the tenure-change contains a pointer to the end of the last tenure, which is currently - // the canonical tip - let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = { - let hdr = NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - if hdr.anchored_header.as_stacks_nakamoto().is_some() { - // building atop nakamoto - let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( - self.chainstate.db(), - &hdr.index_block_hash(), - ) - .unwrap(); - debug!( - "Tenure length of Nakamoto tenure {} is {}; tipped at {}", - &hdr.consensus_hash, - tenure_len, - &hdr.index_block_hash() - ); - (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) - } else { - // building atop epoch2 - debug!( - "Tenure length of epoch2 tenure {} is {}; tipped at {}", - &parent_block_snapshot.consensus_hash, 1, &last_tenure_id - ); + // the canonical tip unless overridden + let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = + if let Some(nakamoto_parent_tenure) = parent_nakamoto_tenure.as_ref() { + let start_block = nakamoto_parent_tenure.first().clone().unwrap(); + let end_block = nakamoto_parent_tenure.last().clone().unwrap(); + let tenure_len = + end_block.header.chain_length + 1 - start_block.header.chain_length; ( - last_tenure_id, - parent_block_snapshot.consensus_hash.clone(), - 1, + end_block.block_id(), + end_block.header.consensus_hash, + tenure_len as u32, ) - } - }; + } else { + let hdr = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + if hdr.anchored_header.as_stacks_nakamoto().is_some() { + // building atop nakamoto + let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( + self.chainstate.db(), + &hdr.index_block_hash(), + ) + .unwrap(); + debug!( + "Tenure length of Nakamoto tenure {} is {}; tipped at {}", + &hdr.consensus_hash, + tenure_len, + &hdr.index_block_hash() + ); + (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) + } else { + // building atop epoch2 + debug!( + "Tenure length of epoch2 tenure {} is {}; tipped at {}", + &parent_block_snapshot.consensus_hash, 1, &last_tenure_id + ); + ( + last_tenure_id, + parent_block_snapshot.consensus_hash.clone(), + 1, + ) + } + }; let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x00; 20]), // will be overwritten @@ -576,7 +605,7 @@ impl TestStacksNode { /// * the block /// * its size /// * its execution cost - /// * a list of malleablized blocks with the same sighash + /// * a list of malleablized blocks with the same contents, if desired pub fn make_nakamoto_tenure_blocks<'a, S, F, G>( chainstate: &mut StacksChainState, sortdb: &mut SortitionDB, @@ -597,6 +626,8 @@ impl TestStacksNode { mut miner_setup: S, mut block_builder: F, mut after_block: G, + malleablize: bool, + mined_canonical: bool, ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> where S: FnMut(&mut NakamotoBlockBuilder), @@ -829,8 +860,9 @@ impl TestStacksNode { coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); - if block_to_store.block_id() == block_id { - // confirm that the chain tip advanced + if block_to_store.block_id() == block_id && mined_canonical { + // confirm that the chain tip advanced -- we intended to mine on the + // canonical tip let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( chainstate.db(), &sortdb, @@ -858,6 +890,11 @@ impl TestStacksNode { } } + if !malleablize { + debug!("Will not produce malleablized blocks"); + break; + } + let num_sigs = block_to_store.header.signer_signature.len(); // force this block to have a different sighash, in addition to different @@ -977,7 +1014,6 @@ impl<'a> TestPeer<'a> { StacksBlockId, Option, Option>, - Option, ) { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { @@ -987,43 +1023,22 @@ impl<'a> TestPeer<'a> { let first_parent = parent_blocks.first().unwrap(); debug!("First parent is {:?}", first_parent); - let first_parent_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &first_parent.header.consensus_hash, - ) - .unwrap() - .unwrap(); - - assert!(first_parent_sn.sortition); - - let parent_sortition_id = SortitionDB::get_block_commit_parent_sortition_id( - sortdb.conn(), - &first_parent_sn.winning_block_txid, - &first_parent_sn.sortition_id, - ) - .unwrap() - .unwrap(); - let parent_sortition = - SortitionDB::get_block_snapshot(sortdb.conn(), &parent_sortition_id) - .unwrap() - .unwrap(); - - debug!( - "First parent Nakamoto block sortition: {:?}", - &parent_sortition + // sanity check -- this parent must correspond to a sortition + assert!( + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap() + .sortition ); - let parent_sortition_opt = Some(parent_sortition); let last_tenure_id = StacksBlockId::new( &first_parent.header.consensus_hash, &first_parent.header.block_hash(), ); - ( - last_tenure_id, - None, - Some(parent_blocks), - parent_sortition_opt, - ) + (last_tenure_id, None, Some(parent_blocks)) } else { // parent may be an epoch 2.x block let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = @@ -1059,7 +1074,7 @@ impl<'a> TestPeer<'a> { // must be a genesis block (testing only!) StacksBlockId(BOOT_BLOCK_HASH.0.clone()) }; - (last_tenure_id, parent_opt, None, parent_sortition_opt) + (last_tenure_id, parent_opt, None) } } @@ -1080,8 +1095,16 @@ impl<'a> TestPeer<'a> { let mut burn_block = TestBurnchainBlock::new(&tip, 0); let mut stacks_node = self.stacks_node.take().unwrap(); - let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = - Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let (last_tenure_id, parent_block_opt, parent_tenure_opt) = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + ( + nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), + None, + Some(nakamoto_parent_tenure.clone()), + ) + } else { + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + }; // find the VRF leader key register tx to use. // it's the one pointed to by the parent tenure @@ -1345,6 +1368,8 @@ impl<'a> TestPeer<'a> { miner_setup, block_builder, after_block, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), ); let just_blocks = blocks @@ -1435,6 +1460,8 @@ impl<'a> TestPeer<'a> { |_| {}, block_builder, |_| true, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), ); let just_blocks = blocks From 2d21bffb83c8d1ddf07daf03d11cd54ba0fa171d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:07:05 -0400 Subject: [PATCH 655/910] chore: expect() --> unwrap_or_else() --- .../src/chainstate/stacks/db/accounts.rs | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 58ffdaeb603..105d3ed516e 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -267,14 +267,26 @@ impl StacksChainState { }) }) .map_err(Error::ClarityError) - .unwrap() + .unwrap_or_else(|e| { + error!( + "FATAL: Failed to query account for {:?}: {:?}", + principal, &e + ); + panic!(); + }) } pub fn get_nonce(clarity_tx: &mut T, principal: &PrincipalData) -> u64 { clarity_tx .with_clarity_db_readonly(|ref mut db| db.get_account_nonce(principal)) .map_err(|x| Error::ClarityError(x.into())) - .unwrap() + .unwrap_or_else(|e| { + error!( + "FATAL: Failed to query account nonce for {:?}: {:?}", + principal, &e + ); + panic!(); + }) } pub fn get_account_ft( @@ -337,7 +349,13 @@ impl StacksChainState { snapshot.save()?; Ok(()) }) - .expect("FATAL: failed to debit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to debit account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Called each time a transaction sends STX to this principal. @@ -358,7 +376,13 @@ impl StacksChainState { info!("{} credited: {} uSTX", principal, new_balance); Ok(()) }) - .expect("FATAL: failed to credit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to credit account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Called during the genesis / boot sequence. @@ -374,7 +398,13 @@ impl StacksChainState { snapshot.save()?; Ok(()) }) - .expect("FATAL: failed to credit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to credit genesis account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Increment an account's nonce @@ -385,11 +415,19 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let next_nonce = cur_nonce.checked_add(1).expect("OUT OF NONCES"); + let next_nonce = cur_nonce + .checked_add(1) + .unwrap_or_else(|| panic!("OUT OF NONCES")); db.set_account_nonce(&principal, next_nonce)?; Ok(()) }) - .expect("FATAL: failed to set account nonce") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to update account nonce for account {:?} from {}: {:?}", + principal, cur_nonce, &e + ); + panic!(); + }) } /// Schedule a miner payment in the future. From cde0547c57f5620ec41029e3195a0d8e699115fb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:07:35 -0400 Subject: [PATCH 656/910] feat: address #5044 by tracking loaded tenure data by stacks tip, and making it so that a descendant of a stacks tip can inherit its cached data --- stackslib/src/net/inv/nakamoto.rs | 157 +++++++++++++++++++++++++----- 1 file changed, 132 insertions(+), 25 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index f24ad1a87ce..9be77949b11 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -35,12 +35,13 @@ use crate::net::{ }; use crate::util_lib::db::Error as DBError; +const TIP_ANCESTOR_SEARCH_DEPTH: u64 = 10; + /// Cached data for a sortition in the sortition DB. /// Caching this allows us to avoid calls to `SortitionDB::get_block_snapshot_consensus()`. #[derive(Clone, Debug, PartialEq)] pub(crate) struct InvSortitionInfo { parent_consensus_hash: ConsensusHash, - block_height: u64, } impl InvSortitionInfo { @@ -57,7 +58,6 @@ impl InvSortitionInfo { Ok(Self { parent_consensus_hash: parent_sn.consensus_hash, - block_height: sn.block_height, }) } } @@ -105,8 +105,14 @@ impl InvTenureInfo { /// in sync. By caching (immutable) tenure data in this struct, we can enusre that this happens /// all the time except for during node bootup. pub struct InvGenerator { - processed_tenures: HashMap>, + /// Map stacks tips to a table of (tenure ID, optional tenure info) + processed_tenures: HashMap>>, + /// Map consensus hashes to sortition data about them sortitions: HashMap, + /// how far back to search for ancestor Stacks blocks when processing a new tip + tip_ancestor_search_depth: u64, + /// count cache misses for `processed_tenures` + cache_misses: u128, } impl InvGenerator { @@ -114,24 +120,134 @@ impl InvGenerator { Self { processed_tenures: HashMap::new(), sortitions: HashMap::new(), + tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, + cache_misses: 0, + } + } + + pub fn with_tip_ancestor_search_depth(mut self, depth: u64) -> Self { + self.tip_ancestor_search_depth = depth; + self + } + + #[cfg(test)] + pub(crate) fn cache_misses(&self) -> u128 { + self.cache_misses + } + + /// Find the highest ancestor of `tip_block_id` that has an entry in `processed_tenures`. + /// Search up to `self.tip_ancestor_search_depth` ancestors back. + /// + /// The intuition here is that `tip_block_id` is the highest block known to the node, and it + /// can advance when new blocks are processed. We associate a set of cached processed tenures with + /// each tip, but if the tip advances, we simply move the cached processed tenures "up to" the + /// new tip instead of reloading them from disk each time. + /// + /// However, searching for an ancestor tip incurs a sqlite DB read, so we want to bound the + /// search depth. In practice, the bound on this depth would be derived from how often the + /// chain tip changes relative to how often we serve up inventory data. The depth should be + /// the maximum expected number of blocks to be processed in-between handling `GetNakamotoInv` + /// messages. + /// + /// If found, then return the ancestor block ID represented in `self.processed_tenures`. + /// If not, then reutrn None. + pub(crate) fn find_ancestor_processed_tenures( + &self, + chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, + ) -> Result, NetError> { + let mut cursor = tip_block_id.clone(); + for _ in 0..self.tip_ancestor_search_depth { + let parent_id_opt = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor)?; + let Some(parent_id) = parent_id_opt else { + return Ok(None); + }; + if self.processed_tenures.contains_key(&parent_id) { + return Ok(Some(parent_id)); + } + cursor = parent_id; } + Ok(None) } - /// Get a processed tenure. If it's not cached, then load it. - /// Returns Some(..) if there existed a tenure-change tx for this given consensus hash - fn get_processed_tenure( + /// Get a processed tenure. If it's not cached, then load it from disk. + /// + /// Loading it is expensive, so once loaded, store it with the cached processed tenure map + /// associated with `tip_block_id`. + /// + /// If there is no such map, then see if a recent ancestor of `tip_block_id` is represented. If + /// so, then remove that map and associate it with `tip_block_id`. This way, as the blockchain + /// advances, cached tenure information for the same Stacks fork stays associated with that + /// fork's chain tip (assuming this code gets run sufficiently often relative to the + /// advancement of the `tip_block_id` tip value). + /// + /// Returns Ok(Some(..)) if there existed a tenure-change tx for this given consensus hash + /// Returns Ok(None) if not + /// Returns Err(..) on DB error + pub(crate) fn get_processed_tenure( &mut self, chainstate: &StacksChainState, tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - // TODO: MARF-aware cache - // not cached so go load it - let loaded_info_opt = - InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; - self.processed_tenures - .insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); - Ok(loaded_info_opt) + if self.processed_tenures.get(tip_block_id).is_none() { + // this tip has no known table. + // does it have an ancestor with a table? If so, then move its ancestor's table to this + // tip. Otherwise, make a new table. + if let Some(ancestor_tip_id) = + self.find_ancestor_processed_tenures(chainstate, tip_block_id)? + { + let ancestor_tenures = self + .processed_tenures + .remove(&ancestor_tip_id) + .unwrap_or_else(|| { + panic!("FATAL: did not have ancestor tip reported by search"); + }); + + self.processed_tenures + .insert(tip_block_id.clone(), ancestor_tenures); + } else { + self.processed_tenures + .insert(tip_block_id.clone(), HashMap::new()); + } + } + + let Some(tenure_infos) = self.processed_tenures.get_mut(tip_block_id) else { + unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); + }; + + // this tip has a known table + if let Some(loaded_tenure_info) = tenure_infos.get_mut(tenure_id_consensus_hash) { + // we've loaded this tenure info before for this tip + return Ok(loaded_tenure_info.clone()); + } else { + // we have not loaded the tenure info for this tip, so go get it + let loaded_info_opt = + InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; + tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); + + self.cache_misses = self.cache_misses.saturating_add(1); + return Ok(loaded_info_opt); + } + } + + /// Get sortition info, loading it from our cache if needed + pub(crate) fn get_sortition_info( + &mut self, + sortdb: &SortitionDB, + cur_consensus_hash: &ConsensusHash, + ) -> Result<&InvSortitionInfo, NetError> { + if !self.sortitions.contains_key(cur_consensus_hash) { + let loaded_info = InvSortitionInfo::load(sortdb, cur_consensus_hash)?; + self.sortitions + .insert(cur_consensus_hash.clone(), loaded_info); + }; + + Ok(self + .sortitions + .get(cur_consensus_hash) + .expect("infallible: just inserted this data")) } /// Generate an block inventory bit vector for a reward cycle. @@ -210,19 +326,10 @@ impl InvGenerator { // done scanning this reward cycle break; } - let cur_sortition_info = if let Some(info) = self.sortitions.get(&cur_consensus_hash) { - info - } else { - let loaded_info = InvSortitionInfo::load(sortdb, &cur_consensus_hash)?; - self.sortitions - .insert(cur_consensus_hash.clone(), loaded_info); - self.sortitions - .get(&cur_consensus_hash) - .expect("infallible: just inserted this data") - }; - let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash.clone(); + let cur_sortition_info = self.get_sortition_info(sortdb, &cur_consensus_hash)?; + let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash; - debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); + debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, parent_sortition_consensus_hash = {}", cur_height, &cur_consensus_hash, &cur_tenure_opt, &parent_sortition_consensus_hash); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... From 927a099e226a3bf70e7720dbc73ecff24888953f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:08:11 -0400 Subject: [PATCH 657/910] feat: API for choosing which Nakamoto tenure to mine on --- stackslib/src/net/mod.rs | 47 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 6ad3c5501ca..df3560f3b15 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2275,6 +2275,10 @@ pub mod test { >, /// list of malleablized blocks produced when mining. pub malleablized_blocks: Vec, + pub mine_malleablized_blocks: bool, + /// tenure-start block of tenure to mine on. + /// gets consumed on the call to begin_nakamoto_tenure + pub nakamoto_parent_tenure_opt: Option>, } impl<'a> TestPeer<'a> { @@ -2689,6 +2693,8 @@ pub mod test { coord: coord, indexer: Some(indexer), malleablized_blocks: vec![], + mine_malleablized_blocks: true, + nakamoto_parent_tenure_opt: None, } } @@ -3509,6 +3515,10 @@ pub mod test { self.sortdb.as_mut().unwrap() } + pub fn sortdb_ref(&mut self) -> &SortitionDB { + self.sortdb.as_ref().unwrap() + } + pub fn with_db_state(&mut self, f: F) -> Result where F: FnOnce( @@ -4198,6 +4208,43 @@ pub mod test { } } } + + /// Set the nakamoto tenure to mine on + pub fn mine_nakamoto_on(&mut self, parent_tenure: Vec) { + self.nakamoto_parent_tenure_opt = Some(parent_tenure); + } + + /// Clear the tenure to mine on. This causes the miner to build on the canonical tip + pub fn mine_nakamoto_on_canonical_tip(&mut self) { + self.nakamoto_parent_tenure_opt = None; + } + + /// Get an account off of a tip + pub fn get_account( + &mut self, + tip: &StacksBlockId, + account: &PrincipalData, + ) -> StacksAccount { + let sortdb = self.sortdb.take().expect("FATAL: sortdb not restored"); + let mut node = self + .stacks_node + .take() + .expect("FATAL: chainstate not restored"); + + let acct = node + .chainstate + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(&node.chainstate, tip).unwrap(), + tip, + |clarity_tx| StacksChainState::get_account(clarity_tx, account), + ) + .unwrap() + .unwrap(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + acct + } } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { From 64a272bfebf646d1cf6d1c3690c5ba725a8bf602 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:08:22 -0400 Subject: [PATCH 658/910] feat: unit tests for inv generator with caching behavior --- stackslib/src/net/tests/inv/nakamoto.rs | 598 +++++++++++++++++++++++- 1 file changed, 595 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index fd9f1dcc1f6..2f600272070 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -46,7 +46,7 @@ use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; -use crate::net::test::{TestEventObserver, TestPeer}; +use crate::net::test::{to_addr, TestEventObserver, TestPeer}; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; use crate::net::{ Error as NetError, GetNakamotoInvData, HandshakeData, NakamotoInvData, NeighborAddress, @@ -806,7 +806,7 @@ fn test_nakamoto_inv_sync_state_machine() { vec![ true, false, false, false, false, false, false, true, true, true, ], - // atlernating rc + // alternating rc vec![ false, true, false, true, false, true, false, true, true, true, ], @@ -938,7 +938,7 @@ fn test_nakamoto_inv_sync_across_epoch_change() { vec![ true, false, false, false, false, false, false, true, true, true, ], - // atlernating rc + // alternating rc vec![ false, true, false, true, false, true, false, true, true, true, ], @@ -1077,3 +1077,595 @@ fn test_nakamoto_inv_sync_across_epoch_change() { ); } } + +#[test] +fn test_nakamoto_make_tenure_inv_in_forks() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + + // + // ---------------------- basic operations ---------------------- + // + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let first_burn_block_height = sortdb.first_block_height; + + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let naka_tip = peer.network.stacks_tip.block_id(); + let first_naka_tip = naka_tip.clone(); + let first_sort_tip = sort_tip.clone(); + + // find the first block in this tenure + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + let naka_tenure_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip, + &naka_tip_header.consensus_hash, + ) + .unwrap() + .unwrap(); + let (naka_tenure_start_block, _) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&naka_tenure_start_header.index_block_hash()) + .unwrap() + .unwrap(); + + assert_eq!(invgen.cache_misses(), 0); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, vec![true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, vec![true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + // + // ---------------------- the inv generator can keep up with new blocks ---------------------- + // + + let mut expected_bits = vec![true, true]; + let mut expected_cache_misses = 13; + let mut naka_tip_block = None; + + for i in 0..3 { + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced {}: {:?}", + &naka_block.block_id(), + &naka_block + ); + + peer.refresh_burnchain_view(); + let naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + // only one additional cache miss + expected_bits.push(true); + expected_cache_misses += 1; + + assert_eq!(bits, expected_bits); + assert_eq!(invgen.cache_misses(), expected_cache_misses); + + naka_tip_block = Some(naka_block); + } + + let naka_tip_block = naka_tip_block.unwrap(); + + peer.refresh_burnchain_view(); + let naka_tip = peer.network.stacks_tip.block_id(); + + // + // ---------------------- the inv generator can track multiple forks at once ---------------------- + // + + peer.mine_nakamoto_on(vec![naka_tenure_start_block.clone()]); + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + let new_naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + // this will not have reorged + assert_eq!(naka_tip, new_naka_tip); + + // load inv off of the canonical tip. + // It should show a missed sortition. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, true, true, true, false]); + assert_eq!(invgen.cache_misses(), 17); + + // load inv off of the non-canonical tip. + // it should show the last 3 canonical tenures as missing, and this forked block as present + let bits = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.block_id(), + tip_rc, + ) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &fork_naka_block.block_id(), + tip_rc, + &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, false, false, false, true]); + assert_eq!(invgen.cache_misses(), 21); + + // add more to the fork + peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); + + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + let new_naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + // this will not have reorged (yet) + assert_eq!(naka_tip, new_naka_tip); + + // load inv off of the canonical tip. + // It should show two missed sortitions, for each fork. + // only one additional cache miss + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, true, true, true, false, false]); + assert_eq!(invgen.cache_misses(), 22); + + // load inv off of the non-canonical tip again. + // it should show the last 3 last canonical tenures as missing, and this forked block as + // present. Only one additional cache miss should manifest. + let bits = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.block_id(), + tip_rc, + ) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &fork_naka_block.block_id(), + tip_rc, + &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + // only one more cache miss + assert_eq!(bits, [true, true, false, false, false, true, true]); + assert_eq!(invgen.cache_misses(), 23); + + // load inv off of the canonical tip again. + // It should show two missed sortitions. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + // no new cache misses + assert_eq!(bits, [true, true, true, true, true, false, false]); + assert_eq!(invgen.cache_misses(), 23); + + // + // ---------------------- the inv generator will search only a maximum depth before giving up ---------------------- + // + + // advance the canonical chain by 3 more blocks, so the delta between `first_naka_tip` and + // `naka_tip` is now 6 blocks + peer.mine_nakamoto_on(vec![naka_tip_block.clone()]); + for i in 0..3 { + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced {}: {:?}", + &naka_block.block_id(), + &naka_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![naka_block.clone()]); + } + let naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // new inv generator with a search depth of 3 + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(3); + + // load an old tip on the canonical chain + let bits = invgen + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + // load a descendant that is 6 blocks higher + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + assert_eq!( + bits, + [true, true, true, true, true, false, false, true, true, true] + ); + + // all 10 tenures were loaded, because we had to search more than 5 blocks back + assert_eq!(invgen.cache_misses(), 12); + + // new inv generator with a search depth of 10 + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(10); + + // load an old tip on the canonical chain + let bits = invgen + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + // load a descendant that is 6 blocks higher + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + assert_eq!( + bits, + [true, true, true, true, true, false, false, true, true, true] + ); + + // reused old canonical tip information + assert_eq!(invgen.cache_misses(), 9); +} + +#[test] +fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + // sparce rc + vec![ + true, false, false, false, false, false, false, true, true, true, + ], + // alternating rc + vec![ + false, true, false, true, false, true, false, true, true, true, + ], + // sparse rc + vec![ + false, false, false, false, false, false, true, true, true, true, + ], + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let first_burn_block_height = sortdb.first_block_height; + + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let naka_tip = peer.network.stacks_tip.block_id(); + let first_naka_tip = naka_tip.clone(); + let first_sort_tip = sort_tip.clone(); + + // find the first block in this tenure + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + let naka_tenure_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip, + &naka_tip_header.consensus_hash, + ) + .unwrap() + .unwrap(); + let (naka_tenure_start_block, _) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&naka_tenure_start_header.index_block_hash()) + .unwrap() + .unwrap(); + + assert_eq!(invgen.cache_misses(), 0); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!(invgen.cache_misses(), 17); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!(invgen.cache_misses(), 23); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!(invgen.cache_misses(), 27); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + // load them all again. cache misses should remain the same. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); +} From a2c912db208e4df6c71ecdb3c52a24f801d8bcec Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 15:09:37 -0700 Subject: [PATCH 659/910] CRC: move monitor_signers to its own mod Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 4 +- stacks-signer/src/client/stacks_client.rs | 22 +- stacks-signer/src/lib.rs | 4 + stacks-signer/src/main.rs | 319 +-------------------- stacks-signer/src/monitor_signers.rs | 331 ++++++++++++++++++++++ stacks-signer/src/utils.rs | 24 ++ 6 files changed, 379 insertions(+), 325 deletions(-) create mode 100644 stacks-signer/src/monitor_signers.rs create mode 100644 stacks-signer/src/utils.rs diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 37e9218a9d3..c691e7bb697 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -269,10 +269,10 @@ pub struct MonitorSignersArgs { /// Whether the node is mainnet. Default is false. #[arg(long, default_value = "false")] pub mainnet: bool, - /// Set the polling interval in seconds. Default is 60 seconds. + /// Set the polling interval in seconds. #[arg(long, short, default_value = "60")] pub interval: u64, - /// Max age in seconds before a signer message is considered stale. Default is 1200 seconds. + /// Max age in seconds before a signer message is considered stale. #[arg(long, short, default_value = "1200")] pub max_age: u64, } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 25431164fc9..5898258f608 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -228,16 +228,18 @@ impl StacksClient { // Get the signer writers from the stacker-db to find the signer slot id let stackerdb_signer_slots = self.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; - let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); - for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { - signer_slot_ids.insert( - address, - SignerSlotID( - u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), - ), - ); - } - Ok(signer_slot_ids) + Ok(stackerdb_signer_slots + .into_iter() + .enumerate() + .map(|(index, (address, _))| { + ( + address, + SignerSlotID( + u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), + ), + ) + }) + .collect()) } /// Get the vote for a given round, reward cycle, and signer address diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index c61ae397312..9d8a22a3206 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -29,12 +29,16 @@ pub mod cli; pub mod client; /// The configuration module for the signer pub mod config; +/// The signer monitor for observing signer behaviours in the network +pub mod monitor_signers; /// The monitoring server for the signer pub mod monitoring; /// The primary runloop for the signer pub mod runloop; /// The signer state module pub mod signerdb; +/// The util module for the signer +pub mod utils; /// The v0 implementation of the signer. This does not include WSTS support pub mod v0; /// The v1 implementation of the singer. This includes WSTS support diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 513382e843b..5b118db646f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,41 +26,30 @@ extern crate serde; extern crate serde_json; extern crate toml; -use std::collections::HashMap; use std::io::{self, Write}; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; -use clarity::codec::read_next; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use clarity::types::StacksEpochId; +use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; -use clarity::vm::types::QualifiedContractIdentifier; -use libsigner::v0::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; +use libsigner::SignerSession; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; +use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, error, info, warn}; +use stacks_common::{debug, error}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::GlobalConfig; +use stacks_signer::monitor_signers::SignerMonitor; +use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; -/// Create a new stacker db session -fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { - let mut session = StackerDBSession::new(host, contract.clone()); - session.connect(host.to_string(), contract).unwrap(); - session -} - /// Write the chunk to stdout fn write_chunk_to_stdout(chunk_opt: Option>) { if let Some(chunk) = chunk_opt.as_ref() { @@ -209,302 +198,6 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } -struct SignerMonitor { - stacks_client: StacksClient, - cycle_state: RewardCycleState, - args: MonitorSignersArgs, -} - -#[derive(Debug, Default, Clone)] -struct RewardCycleState { - signers_slots: HashMap, - signers_keys: HashMap, - signers_addresses: HashMap, - signers_weights: HashMap, - slot_ids: Vec, - /// Reward cycle is not known until the first successful call to the node - reward_cycle: Option, -} - -impl SignerMonitor { - fn new(args: MonitorSignersArgs) -> Self { - url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( - StacksPrivateKey::new(), // We don't need a private key to read - args.host.clone(), - "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); - Self { - stacks_client, - cycle_state: RewardCycleState::default(), - args, - } - } - - fn refresh_state(&mut self) -> Result { - let reward_cycle = self - .stacks_client - .get_current_reward_cycle_info()? - .reward_cycle; - if Some(reward_cycle) == self.cycle_state.reward_cycle { - // The reward cycle has not changed. Nothing to refresh. - return Ok(false); - } - self.cycle_state.reward_cycle = Some(reward_cycle); - - self.cycle_state.signers_keys.clear(); - self.cycle_state.signers_addresses.clear(); - - self.cycle_state.signers_slots = - self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - - let entries = self - .stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| { - panic!("No signers found for the current reward cycle {reward_cycle}") - }); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); - self.cycle_state - .signers_keys - .insert(stacks_address, public_key); - self.cycle_state - .signers_weights - .insert(stacks_address, entry.weight); - } - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { - self.cycle_state - .signers_addresses - .insert(*slot_id, *signer_address); - } - - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { - self.cycle_state - .signers_addresses - .insert(*slot_id, *signer_address); - self.cycle_state.slot_ids.push(slot_id.0); - } - Ok(true) - } - - fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { - if missing_signers.is_empty() { - return; - } - let formatted_signers = missing_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if missing_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - let missing_weight = missing_signers - .iter() - .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) - .sum::(); - let total_weight = self.cycle_state.signers_weights.values().sum::(); - let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; - warn!( - "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { - if stale_signers.is_empty() { - return; - } - let formatted_signers = stale_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if stale_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "No new updates from {} of {} signer(s) in over {} seconds", - stale_signers.len(), - self.cycle_state.signers_addresses.len(), - self.args.max_age; - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - fn print_unexpected_messages( - &self, - unexpected_messages: &HashMap, - ) { - if unexpected_messages.is_empty() { - return; - } - let formatted_signers = unexpected_messages - .iter() - .map(|(addr, (msg, slot))| { - format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") - }) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if unexpected_messages.contains_key(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Unexpected messages from {} of {} signer(s).", - unexpected_messages.len(), - self.cycle_state.signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - /// Start monitoring the signers stackerdb slots for expected new messages - pub fn start(&mut self) -> Result<(), ClientError> { - self.refresh_state()?; - let nmb_signers = self.cycle_state.signers_keys.len(); - let interval_ms = self.args.interval * 1000; - let reward_cycle = self - .cycle_state - .reward_cycle - .expect("BUG: reward cycle not set"); - let contract = - MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); - info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", - self.args.interval, self.args.max_age - ); - let mut session = stackerdb_session(&self.args.host, contract); - info!("Confirming messages for {nmb_signers} registered signers"; - "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - let mut last_messages = HashMap::with_capacity(nmb_signers); - let mut last_updates = HashMap::with_capacity(nmb_signers); - loop { - info!("Polling signers stackerdb for new messages..."); - let mut missing_signers = Vec::with_capacity(nmb_signers); - let mut stale_signers = Vec::with_capacity(nmb_signers); - let mut unexpected_messages = HashMap::new(); - - if self.refresh_state()? { - let reward_cycle = self - .cycle_state - .reward_cycle - .expect("BUG: reward cycle not set"); - let contract = MessageSlotID::BlockResponse - .stacker_db_contract(self.args.mainnet, reward_cycle); - info!( - "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", - ); - session = stackerdb_session(&self.args.host, contract); - // Clear the last messages and signer last update times. - last_messages.clear(); - last_updates.clear(); - } - let new_messages: Vec<_> = session - .get_latest_chunks(&self.cycle_state.slot_ids)? - .into_iter() - .map(|chunk_opt| { - chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) - }) - .collect(); - - for (signer_message_opt, slot_id) in - new_messages.into_iter().zip(&self.cycle_state.slot_ids) - { - let signer_slot_id = SignerSlotID(*slot_id); - let signer_address = *self - .cycle_state - .signers_addresses - .get(&signer_slot_id) - .expect("BUG: missing signer address for given slot id"); - let Some(signer_message) = signer_message_opt else { - missing_signers.push(signer_address); - continue; - }; - if let Some(last_message) = last_messages.get(&signer_slot_id) { - if last_message == &signer_message { - continue; - } - } - let epoch = self.stacks_client.get_node_epoch()?; - if epoch < StacksEpochId::Epoch25 { - return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); - } - if (epoch == StacksEpochId::Epoch25 - && !matches!(signer_message, SignerMessage::MockSignature(_))) - || (epoch > StacksEpochId::Epoch25 - && !matches!(signer_message, SignerMessage::BlockResponse(_))) - { - unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); - continue; - } - last_messages.insert(signer_slot_id, signer_message); - last_updates.insert(signer_slot_id, std::time::Instant::now()); - } - for (slot_id, last_update_time) in last_updates.iter() { - if last_update_time.elapsed().as_secs() > self.args.max_age { - let address = self - .cycle_state - .signers_addresses - .get(slot_id) - .expect("BUG: missing signer address for given slot id"); - stale_signers.push(*address); - } - } - if missing_signers.is_empty() - && stale_signers.is_empty() - && unexpected_messages.is_empty() - { - info!( - "All {} signers are sending messages as expected.", - nmb_signers - ); - } else { - self.print_missing_signers(&missing_signers); - self.print_stale_signers(&stale_signers); - self.print_unexpected_messages(&unexpected_messages); - } - sleep_ms(interval_ms); - } - } -} - fn main() { let cli = Cli::parse(); diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs new file mode 100644 index 00000000000..fdddef64ea4 --- /dev/null +++ b/stacks-signer/src/monitor_signers.rs @@ -0,0 +1,331 @@ +use std::collections::HashMap; + +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; +use clarity::util::sleep_ms; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; +use libsigner::SignerSession; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; + +use crate::cli::MonitorSignersArgs; +use crate::client::{ClientError, SignerSlotID, StacksClient}; +use crate::utils::stackerdb_session; + +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// The `SignerMonitor` struct is used to monitor the signers stackerdb slots for expected new messages +pub struct SignerMonitor { + /// The client being used to monitor stackerdb messages + stacks_client: StacksClient, + /// The current view of the reward cycle + cycle_state: RewardCycleState, + /// The arguments used to configure the monitor + args: MonitorSignersArgs, +} + +#[derive(Debug, Default, Clone)] +/// The `RewardCycleState` struct is used to store the current reward cycle view +pub struct RewardCycleState { + signers_slots: HashMap, + signers_keys: HashMap, + signers_addresses: HashMap, + signers_weights: HashMap, + slot_ids: Vec, + /// Reward cycle is not known until the first successful call to the node + reward_cycle: Option, +} + +impl SignerMonitor { + /// Create a new `SignerMonitor` instance from the given command line args + pub fn new(args: MonitorSignersArgs) -> Self { + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to read + args.host.clone(), + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + Self { + stacks_client, + cycle_state: RewardCycleState::default(), + args, + } + } + + fn refresh_state(&mut self) -> Result { + let reward_cycle = self + .stacks_client + .get_current_reward_cycle_info()? + .reward_cycle; + if Some(reward_cycle) == self.cycle_state.reward_cycle { + // The reward cycle has not changed. Nothing to refresh. + return Ok(false); + } + self.cycle_state.reward_cycle = Some(reward_cycle); + + self.cycle_state.signers_keys.clear(); + self.cycle_state.signers_addresses.clear(); + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + self.cycle_state + .signers_keys + .insert(stacks_address, public_key); + self.cycle_state + .signers_weights + .insert(stacks_address, entry.weight); + } + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + } + + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + self.cycle_state.slot_ids.push(slot_id.0); + } + Ok(true) + } + + fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { + if missing_signers.is_empty() { + return; + } + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + let missing_weight = missing_signers + .iter() + .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) + .sum::(); + let total_weight = self.cycle_state.signers_weights.values().sum::(); + let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; + warn!( + "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { + if stale_signers.is_empty() { + return; + } + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + self.cycle_state.signers_addresses.len(), + self.args.max_age; + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_unexpected_messages( + &self, + unexpected_messages: &HashMap, + ) { + if unexpected_messages.is_empty() { + return; + } + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") + }) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} signer(s).", + unexpected_messages.len(), + self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + /// Start monitoring the signers stackerdb slots for expected new messages + pub fn start(&mut self) -> Result<(), ClientError> { + self.refresh_state()?; + let nmb_signers = self.cycle_state.signers_keys.len(); + let interval_ms = self.args.interval * 1000; + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = + MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", + self.args.interval, self.args.max_age + ); + let mut session = stackerdb_session(&self.args.host, contract); + info!("Confirming messages for {nmb_signers} registered signers"; + "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); + let mut last_messages = HashMap::with_capacity(nmb_signers); + let mut last_updates = HashMap::with_capacity(nmb_signers); + loop { + info!("Polling signers stackerdb for new messages..."); + let mut missing_signers = Vec::with_capacity(nmb_signers); + let mut stale_signers = Vec::with_capacity(nmb_signers); + let mut unexpected_messages = HashMap::new(); + + if self.refresh_state()? { + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", + ); + session = stackerdb_session(&self.args.host, contract); + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); + } + let new_messages: Vec<_> = session + .get_latest_chunks(&self.cycle_state.slot_ids)? + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + + for (signer_message_opt, slot_id) in + new_messages.into_iter().zip(&self.cycle_state.slot_ids) + { + let signer_slot_id = SignerSlotID(*slot_id); + let signer_address = *self + .cycle_state + .signers_addresses + .get(&signer_slot_id) + .expect("BUG: missing signer address for given slot id"); + let Some(signer_message) = signer_message_opt else { + missing_signers.push(signer_address); + continue; + }; + if let Some(last_message) = last_messages.get(&signer_slot_id) { + if last_message == &signer_message { + continue; + } + } + let epoch = self.stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); + } + if (epoch == StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::MockSignature(_))) + || (epoch > StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::BlockResponse(_))) + { + unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); + continue; + } + last_messages.insert(signer_slot_id, signer_message); + last_updates.insert(signer_slot_id, std::time::Instant::now()); + } + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > self.args.max_age { + let address = self + .cycle_state + .signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } + } + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + nmb_signers + ); + } else { + self.print_missing_signers(&missing_signers); + self.print_stale_signers(&stale_signers); + self.print_unexpected_messages(&unexpected_messages); + } + sleep_ms(interval_ms); + } + } +} diff --git a/stacks-signer/src/utils.rs b/stacks-signer/src/utils.rs new file mode 100644 index 00000000000..955177e02d7 --- /dev/null +++ b/stacks-signer/src/utils.rs @@ -0,0 +1,24 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::types::QualifiedContractIdentifier; +use libsigner::{SignerSession, StackerDBSession}; + +/// Create a new stacker db session +pub fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { + let mut session = StackerDBSession::new(host, contract.clone()); + session.connect(host.to_string(), contract).unwrap(); + session +} From 970b8c431334ae1bef5de9ea02a832433d95e5d4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 15:11:54 -0700 Subject: [PATCH 660/910] CRC: get mainnet flag from stacks_client directly Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 33 ++++++++++++----------- stacks-signer/src/cli.rs | 3 --- stacks-signer/src/client/stacks_client.rs | 24 ++++++++++++++++- stacks-signer/src/monitor_signers.rs | 16 +++++------ 4 files changed, 48 insertions(+), 28 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index ae565207a73..7209398c1ce 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -275,6 +275,8 @@ pub struct PeerInfo { pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, + /// The network id + pub network_id: u32, } impl StacksMessageCodec for PeerInfo { @@ -287,6 +289,7 @@ impl StacksMessageCodec for PeerInfo { fd.write_all(self.server_version.as_bytes()) .map_err(CodecError::WriteError)?; write_next(fd, &self.pox_consensus)?; + write_next(fd, &self.network_id)?; Ok(()) } @@ -305,6 +308,7 @@ impl StacksMessageCodec for PeerInfo { ) })?; let pox_consensus = read_next::(fd)?; + let network_id = read_next(fd)?; Ok(Self { burn_block_height, stacks_tip_consensus_hash, @@ -312,6 +316,7 @@ impl StacksMessageCodec for PeerInfo { stacks_tip_height, server_version, pox_consensus, + network_id, }) } } @@ -321,18 +326,15 @@ impl StacksMessageCodec for PeerInfo { pub struct MockProposal { /// The view of the stacks node peer information at the time of the mock proposal pub peer_info: PeerInfo, - /// The chain id for the mock proposal - pub chain_id: u32, /// The miner's signature across the peer info signature: MessageSignature, } impl MockProposal { /// Create a new mock proposal data struct from the provided peer info, chain id, and private key. - pub fn new(peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey) -> Self { + pub fn new(peer_info: PeerInfo, stacks_private_key: &StacksPrivateKey) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - chain_id, peer_info, }; sig.sign(stacks_private_key) @@ -342,7 +344,8 @@ impl MockProposal { /// The signature hash for the mock proposal pub fn miner_signature_hash(&self) -> Sha256Sum { - let domain_tuple = make_structured_data_domain("mock-miner", "1.0.0", self.chain_id); + let domain_tuple = + make_structured_data_domain("mock-miner", "1.0.0", self.peer_info.network_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( @@ -375,7 +378,8 @@ impl MockProposal { /// The signature hash including the miner's signature. Used by signers. fn signer_signature_hash(&self) -> Sha256Sum { - let domain_tuple = make_structured_data_domain("mock-signer", "1.0.0", self.chain_id); + let domain_tuple = + make_structured_data_domain("mock-signer", "1.0.0", self.peer_info.network_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( @@ -413,18 +417,15 @@ impl MockProposal { impl StacksMessageCodec for MockProposal { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let peer_info = PeerInfo::consensus_deserialize(fd)?; - let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; Ok(Self { peer_info, - chain_id, signature, }) } @@ -1024,6 +1025,12 @@ mod test { let stacks_tip_height = thread_rng().next_u64(); let server_version = "0.0.0".to_string(); let pox_consensus_byte: u8 = thread_rng().gen(); + let network_byte: u8 = thread_rng().gen_range(0..=1); + let network_id = if network_byte == 1 { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_MAINNET + }; PeerInfo { burn_block_height, stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), @@ -1031,19 +1038,13 @@ mod test { stacks_tip_height, server_version, pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + network_id, } } fn random_mock_proposal() -> MockProposal { - let chain_byte: u8 = thread_rng().gen_range(0..=1); - let chain_id = if chain_byte == 1 { - CHAIN_ID_TESTNET - } else { - CHAIN_ID_MAINNET - }; let peer_info = random_peer_data(); MockProposal { peer_info, - chain_id, signature: MessageSignature::empty(), } } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index c691e7bb697..3b74635cbcb 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -266,9 +266,6 @@ pub struct MonitorSignersArgs { /// The Stacks node to connect to #[arg(long)] pub host: String, - /// Whether the node is mainnet. Default is false. - #[arg(long, default_value = "false")] - pub mainnet: bool, /// Set the polling interval in seconds. #[arg(long, short, default_value = "60")] pub interval: u64, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5898258f608..31301e8b0ae 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -75,7 +75,7 @@ pub struct StacksClient { /// The chain we are interacting with chain_id: u32, /// Whether we are mainnet or not - mainnet: bool, + pub mainnet: bool, /// The Client used to make HTTP connects stacks_node_client: reqwest::blocking::Client, /// the auth password for the stacks node @@ -135,6 +135,28 @@ impl StacksClient { } } + /// Create a new signer StacksClient and attempt to connect to the stacks node to determine the version + pub fn try_from_host( + stacks_private_key: StacksPrivateKey, + node_host: String, + auth_password: String, + ) -> Result { + let mut stacks_client = Self::new(stacks_private_key, node_host, auth_password, true); + let pubkey = StacksPublicKey::from_private(&stacks_private_key); + let info = stacks_client.get_peer_info()?; + if info.network_id == CHAIN_ID_MAINNET { + stacks_client.mainnet = true; + stacks_client.chain_id = CHAIN_ID_MAINNET; + stacks_client.tx_version = TransactionVersion::Mainnet; + } else { + stacks_client.mainnet = false; + stacks_client.chain_id = CHAIN_ID_TESTNET; + stacks_client.tx_version = TransactionVersion::Testnet; + } + stacks_client.stacks_address = StacksAddress::p2pkh(stacks_client.mainnet, &pubkey); + Ok(stacks_client) + } + /// Get our signer address pub const fn get_signer_address(&self) -> &StacksAddress { &self.stacks_address diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index fdddef64ea4..7f03e96b5da 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -51,15 +51,15 @@ pub struct RewardCycleState { } impl SignerMonitor { - /// Create a new `SignerMonitor` instance from the given command line args + /// Create a new `SignerMonitor` instance pub fn new(args: MonitorSignersArgs) -> Self { url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( + let stacks_client = StacksClient::try_from_host( StacksPrivateKey::new(), // We don't need a private key to read args.host.clone(), "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); + ) + .expect("Failed to connect to provided host."); Self { stacks_client, cycle_state: RewardCycleState::default(), @@ -93,7 +93,7 @@ impl SignerMonitor { for entry in entries { let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + let stacks_address = StacksAddress::p2pkh(self.stacks_client.mainnet, &public_key); self.cycle_state .signers_keys .insert(stacks_address, public_key); @@ -228,8 +228,8 @@ impl SignerMonitor { .cycle_state .reward_cycle .expect("BUG: reward cycle not set"); - let contract = - MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.stacks_client.mainnet, reward_cycle); info!( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", self.args.interval, self.args.max_age @@ -252,7 +252,7 @@ impl SignerMonitor { .reward_cycle .expect("BUG: reward cycle not set"); let contract = MessageSlotID::BlockResponse - .stacker_db_contract(self.args.mainnet, reward_cycle); + .stacker_db_contract(self.stacks_client.mainnet, reward_cycle); info!( "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", ); From 5d860c60b7b2cdb6c69c9e1466028904d240394a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 22 Sep 2024 07:03:22 -0500 Subject: [PATCH 661/910] test: add response unit test for getsortition --- stackslib/src/net/api/tests/getsortition.rs | 72 ++++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 8541b73eb6d..e112fde4a03 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -19,10 +19,14 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; use stacks_common::types::net::PeerHost; -use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier, SortitionInfo}; +use crate::net::api::tests::test_rpc; use crate::net::connection::ConnectionOptions; -use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; -use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::http::{ + Error as HttpError, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponsePayload, HttpVersion, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest}; use crate::net::Error as NetError; fn make_preamble(query: &str) -> HttpRequestPreamble { @@ -99,3 +103,65 @@ fn test_parse_request() { } } } + +#[test] +fn response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 1, + "/v3/sortitions should return a single entry" + ); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions/latest_and_last".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 2, + "/v3/sortitions/latest_and_last should return 2 entries" + ); + let first_entry: SortitionInfo = serde_json::from_value(info_array[0].clone()) + .expect("Response array elements should parse to SortitionInfo"); + let second_entry: SortitionInfo = serde_json::from_value(info_array[1].clone()) + .expect("Response array elements should parse to SortitionInfo"); + assert!(first_entry.was_sortition); + assert!(second_entry.was_sortition); + assert_eq!( + first_entry.last_sortition_ch.as_ref().unwrap(), + &second_entry.consensus_hash, + ); +} From 24bb08c71e09c1af3a1bf9f0d1b0afb2cc776936 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 22 Sep 2024 20:51:16 -0400 Subject: [PATCH 662/910] fix: use `max_unspent_utxos` in `get_all_utxos` --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 98f504cf99b..a3a6201813c 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -668,7 +668,7 @@ impl BitcoinRegtestController { max_conf.into(), filter_addresses.clone().into(), true.into(), - json!({ "minimumAmount": minimum_amount }), + json!({ "minimumAmount": minimum_amount, "maximumCount": self.config.burnchain.max_unspent_utxos }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), From 2140b4433aebc75ab099e3044c6d1b6fbce22a68 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 14:46:57 +0000 Subject: [PATCH 663/910] fix: typo Co-authored-by: Brice Dobry --- stackslib/src/net/neighbors/comms.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index f3e160ff578..ed0e03f5c69 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -520,7 +520,7 @@ impl NeighborComms for PeerNetworkComms { .map(|event_ref| *event_ref) } - /// Remove a connecting neighbor because it conected + /// Remove a connecting neighbor because it connected fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK) { self.connecting.remove(&nk.to_neighbor_key(network)); } From 70c8656ef2308881a7f89952a2dc445a1d3662ec Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 08:18:27 -0700 Subject: [PATCH 664/910] CRC: fix build error and move copyright text to top of monitor_signers file Signed-off-by: Jacinta Ferrant --- stacks-signer/src/monitor_signers.rs | 30 ++++++++++++++-------------- testnet/stacks-node/src/neon_node.rs | 4 ++-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index 7f03e96b5da..4bc017fa278 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -1,18 +1,3 @@ -use std::collections::HashMap; - -use clarity::codec::read_next; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use clarity::types::StacksEpochId; -use clarity::util::sleep_ms; -use libsigner::v0::messages::{MessageSlotID, SignerMessage}; -use libsigner::SignerSession; -use slog::{slog_info, slog_warn}; -use stacks_common::{info, warn}; - -use crate::cli::MonitorSignersArgs; -use crate::client::{ClientError, SignerSlotID, StacksClient}; -use crate::utils::stackerdb_session; - // Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify @@ -28,6 +13,21 @@ use crate::utils::stackerdb_session; // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; + +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; +use clarity::util::sleep_ms; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; +use libsigner::SignerSession; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; + +use crate::cli::MonitorSignersArgs; +use crate::client::{ClientError, SignerSlotID, StacksClient}; +use crate::utils::stackerdb_session; + /// The `SignerMonitor` struct is used to monitor the signers stackerdb slots for expected new messages pub struct SignerMonitor { /// The client being used to monitor stackerdb messages diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 793605d1c45..dcfa855c9b3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2268,6 +2268,7 @@ impl BlockMinerThread { stacks_tip_height, pox_consensus, server_version, + network_id: self.config.get_burnchain_config().chain_id, } } @@ -2392,8 +2393,7 @@ impl BlockMinerThread { return Ok(()); } let election_sortition = last_winner_snapshot.consensus_hash; - let mock_proposal = - MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); + let mock_proposal = MockProposal::new(peer_info, &mining_key); info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); From 21defc635e94ecac03583fcef9437fec15a4e533 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 16:32:45 +0000 Subject: [PATCH 665/910] fix: typo Co-authored-by: Brice Dobry --- stackslib/src/net/inv/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 9be77949b11..68fd7500776 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -150,7 +150,7 @@ impl InvGenerator { /// messages. /// /// If found, then return the ancestor block ID represented in `self.processed_tenures`. - /// If not, then reutrn None. + /// If not, then return None. pub(crate) fn find_ancestor_processed_tenures( &self, chainstate: &StacksChainState, From 896cae0b0a47fc50162ce88143e1200c8d6d769d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 12:46:33 -0400 Subject: [PATCH 666/910] fix: error and then panic --- stackslib/src/chainstate/stacks/db/accounts.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 105d3ed516e..7c81410e873 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -415,9 +415,11 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let next_nonce = cur_nonce - .checked_add(1) - .unwrap_or_else(|| panic!("OUT OF NONCES")); + let next_nonce = cur_nonce.checked_add(1).unwrap_or_else(|| { + error!("OUT OF NONCES"); + panic!(); + }); + db.set_account_nonce(&principal, next_nonce)?; Ok(()) }) From 89b9d84c7f6283f92fee828f292258547e0f35f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 13:07:28 -0400 Subject: [PATCH 667/910] fix: get_mut() --> get() --- stackslib/src/net/inv/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 68fd7500776..15940e23be3 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -218,7 +218,7 @@ impl InvGenerator { }; // this tip has a known table - if let Some(loaded_tenure_info) = tenure_infos.get_mut(tenure_id_consensus_hash) { + if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip return Ok(loaded_tenure_info.clone()); } else { From 091afbfab03d659020ec4b50db3ca40b3850bfba Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 10:24:14 -0700 Subject: [PATCH 668/910] CRC: update v3_signer_endpoint test to confirm exact number of blocks Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/mod.rs | 23 +++-- .../src/tests/nakamoto_integrations.rs | 85 ++++++++++++++----- 2 files changed, 79 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c111f4f212e..d006ae2184f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3316,7 +3316,7 @@ impl NakamotoChainState { StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; - let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + let params = params![signer_pubkey.to_hex(), reward_cycle]; tx.execute(sql, params)?; } Ok(()) @@ -3331,7 +3331,7 @@ impl NakamotoChainState { ) -> Result { let sql = "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; - let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + let params = params![signer_pubkey.to_hex(), reward_cycle]; chainstate_db .query_row(sql, params, |row| row.get("blocks_signed")) .optional() @@ -4135,12 +4135,19 @@ impl NakamotoChainState { if let Some(signer_calculation) = signer_set_calc { Self::write_reward_set(chainstate_tx, &new_block_id, &signer_calculation.reward_set)?; - let cycle_number = pox_constants - .reward_cycle_of_prepare_phase( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) - .or_else(|| reward_cycle.map(|cycle| cycle + 1)); + let cycle_number = if let Some(cycle) = pox_constants.reward_cycle_of_prepare_phase( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) { + Some(cycle) + } else { + pox_constants + .block_height_to_reward_cycle( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) + .map(|cycle| cycle + 1) + }; if let Some(cycle) = cycle_number { reward_set_data = Some(RewardSetData::new( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5981660e506..50856087028 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8025,11 +8025,23 @@ fn v3_signer_api_endpoint() { let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); let signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events test_observer::spawn(); @@ -8070,16 +8082,13 @@ fn v3_signer_api_endpoint() { ); info!("------------------------- Reached Epoch 3.0 -------------------------"); - blind_signer(&conf, &signers, proposals_submitted); - wait_for_first_naka_block_commit(60, &commits_submitted); - // TODO (hack) instantiate the sortdb in the burnchain _ = btc_regtest_controller.sortdb_mut(); info!("------------------------- Setup finished, run test -------------------------"); - let naka_tenures = 20; + let naka_tenures = conf.burnchain.pox_reward_length.unwrap().into(); let pre_naka_reward_cycle = 1; let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -8102,11 +8111,23 @@ fn v3_signer_api_endpoint() { let blocks_signed_pre_naka = get_v3_signer(&signer_pubkey, pre_naka_reward_cycle); assert_eq!(blocks_signed_pre_naka, 0); - // Keep track of reward cycles encountered - let mut reward_cycles = HashSet::new(); + let block_height = btc_regtest_controller.get_headers_height(); + let first_reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + let second_reward_cycle = first_reward_cycle.saturating_add(1); + let second_reward_cycle_start = btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(second_reward_cycle) + .saturating_sub(1); + + let nmb_naka_blocks_in_first_cycle = second_reward_cycle_start - block_height; + let nmb_naka_blocks_in_second_cycle = naka_tenures - nmb_naka_blocks_in_first_cycle; // Mine some nakamoto tenures - for _ in 0..naka_tenures { + for _i in 0..naka_tenures { next_block_and_mine_commit( &mut btc_regtest_controller, 60, @@ -8114,23 +8135,45 @@ fn v3_signer_api_endpoint() { &commits_submitted, ) .unwrap(); - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - reward_cycles.insert(reward_cycle); } + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); - // Make sure we got a couple cycles - assert!(reward_cycles.len() > 1); - assert!(!reward_cycles.contains(&pre_naka_reward_cycle)); + assert_eq!(reward_cycle, second_reward_cycle); - // Since we have only one signer, it must be signing at least 1 block per reward cycle - for reward_cycle in reward_cycles.into_iter() { - let blocks_signed = get_v3_signer(&signer_pubkey, reward_cycle); - assert_ne!(blocks_signed, 0); - } + // Assert that we mined a single block (the commit op) per tenure + let nmb_signed_first_cycle = get_v3_signer(&signer_pubkey, first_reward_cycle); + let nmb_signed_second_cycle = get_v3_signer(&signer_pubkey, second_reward_cycle); + + assert_eq!(nmb_signed_first_cycle, nmb_naka_blocks_in_first_cycle); + assert_eq!(nmb_signed_second_cycle, nmb_naka_blocks_in_second_cycle); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra stacks block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + Ok(coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + > blocks_processed_before) + }) + .unwrap(); + // Assert that we mined an additional block in the second cycle + assert_eq!( + get_v3_signer(&signer_pubkey, second_reward_cycle), + nmb_naka_blocks_in_second_cycle + 1 + ); info!("------------------------- Test finished, clean up -------------------------"); From 5bf852e3e7b86f2d5eb9c5b254c3bf5f7f03a994 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 23 Sep 2024 10:26:27 -0700 Subject: [PATCH 669/910] Merge remote-tracking branch 'origin/develop' into feat/monitor-signers-cli-command --- .github/workflows/bitcoin-tests.yml | 7 +- .../api/core-node/get_sortitions.example.json | 15 + ...t_sortitions_latest_and_prior.example.json | 28 ++ docs/rpc/openapi.yaml | 41 ++ stacks-signer/src/chainstate.rs | 35 +- stacks-signer/src/client/stacks_client.rs | 63 +-- stacks-signer/src/tests/chainstate.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 5 +- stackslib/src/net/api/getsortition.rs | 229 +++++---- stackslib/src/net/api/tests/getsortition.rs | 72 ++- .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../src/nakamoto_node/sign_coordinator.rs | 12 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 35 +- .../src/tests/nakamoto_integrations.rs | 438 +++++++++++++++++- .../src/tests/neon_integrations.rs | 72 ++- testnet/stacks-node/src/tests/signer/mod.rs | 94 +++- testnet/stacks-node/src/tests/signer/v0.rs | 433 +++++++++-------- 17 files changed, 1175 insertions(+), 407 deletions(-) create mode 100644 docs/rpc/api/core-node/get_sortitions.example.json create mode 100644 docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e618eedebe6..2d02b176690 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -75,9 +75,13 @@ jobs: - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay - tests::neon_integrations::listunspent_max_utxos + - tests::neon_integrations::bitcoin_reorg_flap + - tests::neon_integrations::bitcoin_reorg_flap_with_follower + - tests::neon_integrations::start_stop_bitcoind - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration + - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb @@ -122,9 +126,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - # Do not run this one until we figure out why it fails in CI - # - tests::neon_integrations::bitcoin_reorg_flap - # - tests::neon_integrations::bitcoin_reorg_flap_with_follower # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/docs/rpc/api/core-node/get_sortitions.example.json b/docs/rpc/api/core-node/get_sortitions.example.json new file mode 100644 index 00000000000..a56fd887b1d --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions.example.json @@ -0,0 +1,15 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + } +] diff --git a/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json new file mode 100644 index 00000000000..db970637ed3 --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json @@ -0,0 +1,28 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + }, + { + "burn_block_hash": "0x496ff02cb63a4850d0bdee5fab69284b6eb0392b4538e1c462f82362c5becfa4", + "burn_block_height": 230, + "burn_header_timestamp": 1726797570, + "sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "parent_sortition_id": "0xf9058692055cbd879d7f71e566e44b905a887b2b182407ed596b5d6499ceae2a", + "consensus_hash": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "last_sortition_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "committed_block_hash": "0x36ee5f7f7271de1c1d4cd830e36320b51e01605547621267ae6e9b4e9b10f95e" + } +] diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 3d4249329e1..5547d3bcb66 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,44 @@ paths: schema: type: string + /v3/sortitions/{lookup_kind}/{lookup}: + get: + summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). + tags: + - Blocks + operationId: get_sortitions + description: + Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. + responses: + "200": + description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks + content: + application/json: + examples: + Latest: + description: A single element list is returned when just one sortition is requested + value: + $ref: ./api/core-node/get_sortitions.example.json + LatestAndLast: + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + value: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + parameters: + - name: lookup_kind + in: path + description: |- + The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. + Otherwise, the `lookup_kind` should be one of the following strings: + * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. + * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. + * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. + * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block + required: false + schema: + type: string + - name: lookup + in: path + description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` + required: false + schema: + type: string diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4012fd48a08..4bbb9741a54 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; -use crate::client::{ClientError, StacksClient}; +use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; use crate::signerdb::{BlockState, SignerDb}; @@ -138,8 +138,6 @@ pub struct SortitionsView { pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) pub cur_sortition: SortitionState, - /// the hash at which the sortitions view was fetched - pub latest_consensus_hash: ConsensusHash, /// configuration settings for evaluating proposals pub config: ProposalEvalConfig, } @@ -608,42 +606,21 @@ impl SortitionsView { config: ProposalEvalConfig, client: &StacksClient, ) -> Result { - let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash; - - // figure out what cur_sortition will be set to. - // if the latest sortition wasn't successful, query the last one that was. - let latest_success = if latest_state.was_sortition { - latest_state - } else { - info!("Latest state wasn't a sortition: {latest_state:?}"); - let last_sortition_ch = latest_state - .last_sortition_ch - .as_ref() - .ok_or_else(|| ClientError::NoSortitionOnChain)?; - client.get_sortition(last_sortition_ch)? - }; - - // now, figure out what `last_sortition` will be set to. - let last_sortition = latest_success - .last_sortition_ch - .as_ref() - .map(|ch| client.get_sortition(ch)) - .transpose()?; + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; - let cur_sortition = SortitionState::try_from(latest_success)?; + let cur_sortition = SortitionState::try_from(current_sortition)?; let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - let latest_consensus_hash = latest_ch; - Ok(Self { cur_sortition, last_sortition, - latest_consensus_hash, config, }) } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 31301e8b0ae..7b490144fce 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,5 +1,3 @@ -use std::collections::{HashMap, VecDeque}; - // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -15,6 +13,8 @@ use std::collections::{HashMap, VecDeque}; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{HashMap, VecDeque}; + use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ @@ -88,6 +88,15 @@ struct GetStackersErrorResp { err_msg: String, } +/// Result from fetching current and last sortition: +/// two sortition infos +pub struct CurrentAndLastSortition { + /// the latest winning sortition in the current burnchain fork + pub current_sortition: SortitionInfo, + /// the last winning sortition prior to `current_sortition`, if there was one + pub last_sortition: Option, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -531,10 +540,10 @@ impl StacksClient { Ok(tenures) } - /// Get the sortition information for the latest sortition - pub fn get_latest_sortition(&self) -> Result { - debug!("stacks_node_client: Getting latest sortition..."); - let path = self.sortition_info_path(); + /// Get the current winning sortition and the last winning sortition + pub fn get_current_and_last_sortition(&self) -> Result { + debug!("stacks_node_client: Getting current and prior sortition..."); + let path = format!("{}/latest_and_last", self.sortition_info_path()); let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client.get(&path).send().map_err(|e| { @@ -547,29 +556,29 @@ impl StacksClient { if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } - let sortition_info = response.json()?; - Ok(sortition_info) - } - - /// Get the sortition information for a given sortition - pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { - debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); - let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); - let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) + let mut info_list: VecDeque = response.json()?; + let Some(current_sortition) = info_list.pop_front() else { + return Err(ClientError::UnexpectedResponseFormat( + "Empty SortitionInfo returned".into(), + )); }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); + if !current_sortition.was_sortition { + return Err(ClientError::UnexpectedResponseFormat( + "'Current' SortitionInfo returned which was not a winning sortition".into(), + )); } - let sortition_info = response.json()?; - Ok(sortition_info) + let last_sortition = if current_sortition.last_sortition_ch.is_some() { + let Some(last_sortition) = info_list.pop_back() else { + return Err(ClientError::UnexpectedResponseFormat("'Current' SortitionInfo has `last_sortition_ch` field, but corresponding data not returned".into())); + }; + Some(last_sortition) + } else { + None + }; + Ok(CurrentAndLastSortition { + current_sortition, + last_sortition, + }) } /// Get the current peer info data from the stacks node diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index f79bcadc3f4..a390c27edcb 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -82,7 +82,6 @@ fn setup_test_environment( }); let view = SortitionsView { - latest_consensus_hash: cur_sortition.consensus_hash, cur_sortition, last_sortition, config: ProposalEvalConfig { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54c..24f92ad02b3 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -734,6 +734,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.timestamp)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -1876,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1888,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5e0557ca26b..7b594530c26 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -28,6 +28,7 @@ use stacks_common::util::HexError; use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; @@ -51,10 +52,13 @@ pub enum QuerySpecifier { BurnchainHeaderHash(BurnchainHeaderHash), BlockHeight(u64), Latest, + /// Fetch the latest sortition *which was a winning sortition* and that sortition's + /// last sortition, returning two SortitionInfo structs. + LatestAndLast, } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; -static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})(/(?P[0-9a-f]{1,64}))?)?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -112,6 +116,7 @@ impl TryFrom<(&str, &str)> for QuerySpecifier { value.1 }; match value.0 { + "latest_and_last" => Ok(Self::LatestAndLast), "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -141,6 +146,74 @@ impl GetSortitionHandler { query: QuerySpecifier::Latest, } } + + fn get_sortition_info( + sortition_sn: BlockSnapshot, + sortdb: &SortitionDB, + ) -> Result { + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = + if !sortition_sn.sortition { + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = + handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle + .get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if stacks_parent_sn.sortition { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition( + sortition_sn.block_height.saturating_sub(1), + )?; + last_sortition.consensus_hash + }; + + ( + sortition_sn.miner_pk_hash.clone(), + Some(stacks_parent_sn.consensus_hash), + Some(block_commit.block_header_hash), + Some(last_sortition_ch), + ) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + } } /// Decode the HTTP request impl HttpRequest for GetSortitionHandler { @@ -169,9 +242,15 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { - self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; - } + match (captures.name("key"), captures.name("value")) { + (Some(key), None) => { + self.query = QuerySpecifier::try_from((key.as_str(), ""))?; + } + (Some(key), Some(value)) => { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; + } + _ => {} + }; Ok(req_contents) } @@ -194,81 +273,37 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = - node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { - let query_result = match self.query { - QuerySpecifier::Latest => { + let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + } + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + } + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + } + QuerySpecifier::LatestAndLast => { + if network.burnchain_tip.sortition { + // optimization: if the burn chain tip had a sortition, just return that Ok(Some(network.burnchain_tip.clone())) - }, - QuerySpecifier::ConsensusHash(ref consensus_hash) => { - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) - }, - QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot(burn_hash) - }, - QuerySpecifier::BlockHeight(burn_height) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot_by_height(burn_height) - }, - }; - let sortition_sn = query_result? - .ok_or_else(|| ChainError::NoSuchBlockError)?; - - let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; - (None, None, None, Some(last_sortition.consensus_hash)) - } else { - let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? - .ok_or_else(|| { - error!( - "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? - .ok_or_else(|| { - warn!( - "Failed to load the snapshot of the winning block commits parent"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - - // try to figure out what the last snapshot in this fork was with a successful - // sortition. - // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { - stacks_parent_sn.consensus_hash.clone() } else { - // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; - last_sortition.consensus_hash - }; - - (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), - Some(last_sortition_ch)) - }; - - Ok(SortitionInfo { - burn_block_hash: sortition_sn.burn_header_hash, - burn_block_height: sortition_sn.block_height, - burn_header_timestamp: sortition_sn.burn_header_timestamp, - sortition_id: sortition_sn.sortition_id, - parent_sortition_id: sortition_sn.parent_sortition_id, - consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, - miner_pk_hash160, - stacks_parent_ch, - last_sortition_ch, - committed_block_hash, - }) - }); + // we actually need to perform a marf lookup to find that last snapshot + // with a sortition + let handle = sortdb.index_handle_at_tip(); + let last_sortition = handle + .get_last_snapshot_with_sortition(network.burnchain_tip.block_height)?; + Ok(Some(last_sortition)) + } + } + }; + let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(sortition_sn, sortdb) + }); let block = match result { Ok(block) => block, @@ -290,8 +325,44 @@ impl RPCRequestHandler for GetSortitionHandler { } }; + let last_sortition_ch = block.last_sortition_ch.clone(); + let mut info_list = vec![block]; + if self.query == QuerySpecifier::LatestAndLast { + // if latest **and** last are requested, lookup the sortition info for last_sortition_ch + if let Some(last_sortition_ch) = last_sortition_ch { + let result = node.with_node_state(|_, sortdb, _, _, _| { + let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_sortition_ch, + )? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(last_sortition_sn, sortdb) + }); + let last_block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot for the `last_sortition_ch`({last_sortition_ch})\n")), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for `last_sortition_ch`({last_sortition_ch}): {:?}\n", &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + info_list.push(last_block); + } + } + let preamble = HttpResponsePreamble::ok_json(&preamble); - let result = HttpResponseContents::try_from_json(&block)?; + let result = HttpResponseContents::try_from_json(&info_list)?; Ok((preamble, result)) } } @@ -302,7 +373,7 @@ impl HttpResponse for GetSortitionHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let sortition_info: SortitionInfo = parse_json(preamble, body)?; + let sortition_info: Vec = parse_json(preamble, body)?; Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 8541b73eb6d..e112fde4a03 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -19,10 +19,14 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; use stacks_common::types::net::PeerHost; -use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier, SortitionInfo}; +use crate::net::api::tests::test_rpc; use crate::net::connection::ConnectionOptions; -use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; -use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::http::{ + Error as HttpError, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponsePayload, HttpVersion, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest}; use crate::net::Error as NetError; fn make_preamble(query: &str) -> HttpRequestPreamble { @@ -99,3 +103,65 @@ fn test_parse_request() { } } } + +#[test] +fn response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 1, + "/v3/sortitions should return a single entry" + ); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions/latest_and_last".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 2, + "/v3/sortitions/latest_and_last should return 2 entries" + ); + let first_entry: SortitionInfo = serde_json::from_value(info_array[0].clone()) + .expect("Response array elements should parse to SortitionInfo"); + let second_entry: SortitionInfo = serde_json::from_value(info_array[1].clone()) + .expect("Response array elements should parse to SortitionInfo"); + assert!(first_entry.was_sortition); + assert!(second_entry.was_sortition); + assert_eq!( + first_entry.last_sortition_ch.as_ref().unwrap(), + &second_entry.consensus_hash, + ); +} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 98f504cf99b..994636c97c3 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2831,7 +2831,7 @@ impl BitcoinRPCRequest { Ok(()) } - fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { + pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27f..1ac2618a537 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 6619152f9ff..621f92aa476 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -11,6 +11,7 @@ use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; +use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; @@ -19,12 +20,14 @@ use crate::Config; #[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), + StopFailed(String), } impl std::fmt::Display for BitcoinCoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), } } } @@ -109,25 +112,25 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { - let mut command = Command::new("bitcoin-cli"); - command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); - - self.add_rpc_cli_args(&mut command); - - command.arg("stop"); - - let mut process = match command.spawn() { - Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), + let payload = BitcoinRPCRequest { + method: "stop".to_string(), + params: vec![], + id: "stacks".to_string(), + jsonrpc: "2.0".to_string(), }; - let mut out_reader = BufReader::new(process.stdout.take().unwrap()); - let mut line = String::new(); - while let Ok(bytes_read) = out_reader.read_line(&mut line) { - if bytes_read == 0 { - break; + let res = BitcoinRPCRequest::send(&self.config, payload) + .map_err(|e| BitcoinCoreError::StopFailed(format!("{e:?}")))?; + + if let Some(err) = res.get("error") { + if !err.is_null() { + return Err(BitcoinCoreError::StopFailed(format!("{err}"))); } - eprintln!("{line}"); + } else { + return Err(BitcoinCoreError::StopFailed(format!( + "Invalid response: {:?}", + res + ))); } } Ok(()) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4cd5ceb9974..314200e7482 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -930,6 +930,161 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// Boot the chain to just before the Epoch 3.0 boundary to allow for flash blocks +/// This function is similar to `boot_to_epoch_3`, but it stops at epoch 3 start height - 2, +/// allowing for flash blocks to occur when the epoch changes. +/// +/// * `stacker_sks` - private keys for sending large `stack-stx` transactions to activate pox-4 +/// * `signer_sks` - corresponding signer keys for the stackers +pub fn boot_to_pre_epoch_3_boundary( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + self_signing: &mut Option<&mut TestSigners>, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + assert_eq!(stacker_sks.len(), signer_sks.len()); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let current_height = btc_regtest_controller.get_headers_height(); + info!( + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(btc_regtest_controller, &blocks_processed); + + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } + // stack enough to activate pox-4 + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &signer_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 12_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(signer_sk); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(12), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + // Run until the prepare phase + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + prepare_phase_start, + &naka_conf, + ); + + // We need to vote on the aggregate public key if this test is self signing + if let Some(signers) = self_signing { + // Get the aggregate key + let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); + let aggregate_public_key = + clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + // Vote on the aggregate public key + for signer_sk in signer_sks_unique.values() { + let signer_index = + get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) + .unwrap(); + let voting_tx = tests::make_contract_call( + signer_sk, + 0, + 300, + &StacksAddress::burn_address(false), + SIGNERS_VOTING_NAME, + SIGNERS_VOTING_FUNCTION_NAME, + &[ + clarity::vm::Value::UInt(u128::try_from(signer_index).unwrap()), + aggregate_public_key.clone(), + clarity::vm::Value::UInt(0), + clarity::vm::Value::UInt(reward_cycle as u128 + 1), + ], + ); + submit_tx(&http_origin, &voting_tx); + } + } + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3.start_height - 2, + &naka_conf, + ); + + info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); +} + fn get_signer_index( stacker_set: &GetStackersResponse, signer_key: &Secp256k1PublicKey, @@ -1521,6 +1676,287 @@ fn simple_neon_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, +/// having flash blocks when epoch updates and expects everything to work normally, +/// then switches to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration_with_flash_blocks_on_epoch_3() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_pre_epoch_3_boundary( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let block_height_before_mining = tip.block_height; + + // Mine 3 Bitcoin blocks rapidly without waiting for Stacks blocks to be processed. + // These blocks won't be considered "mined" until the next_block_and_wait call. + for _i in 0..3 { + btc_regtest_controller.build_next_block(1); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Verify that the canonical burn chain tip hasn't advanced yet + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + assert_eq!(tip.block_height, block_height_before_mining); + } + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + // Mine a new block and wait for it to be processed. + // This should update the canonical burn chain tip to include all 4 new blocks. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Verify that the burn chain tip has advanced by 4 blocks + assert_eq!( + tip.block_height, + block_height_before_mining + 4, + "Burn chain tip should have advanced by 4 blocks" + ); + + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + // Check that we have the expected burn blocks + // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks + let bhh = u64::from(tip.burn_header_height); + + // Get the Epoch 3.0 activation height (in terms of Bitcoin block height) + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + // Find the gap in burn blocks + let mut gap_start = 0; + let mut gap_end = 0; + for i in 220..=bhh { + if test_observer::contains_burn_block_range(i..=i).is_err() { + if gap_start == 0 { + gap_start = i; + } + gap_end = i; + } else if gap_start != 0 { + break; + } + } + + // Verify that there's a gap of exactly 3 blocks + assert_eq!( + gap_end - gap_start + 1, + 3, + "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", + gap_start, + gap_end + ); + + // Verify that the gap includes the Epoch 3.0 activation height + assert!( + gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, + "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", + gap_start, + gap_end, + epoch_3_start_height + ); + + // Verify blocks before and after the gap + test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); + test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + + info!("Verified burn block ranges, including expected gap for flash blocks"); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. @@ -5517,7 +5953,7 @@ fn signer_chainstate() { let time_start = Instant::now(); let proposal = loop { let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); - if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + if proposal.0.header.consensus_hash == sortitions_view.cur_sortition.consensus_hash { break proposal; } if time_start.elapsed() > Duration::from_secs(20) { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d5098915eff..3dd299c8616 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12398,6 +12398,10 @@ fn bitcoin_reorg_flap() { channel.stop_chains_coordinator(); } +/// Advance the bitcoin chain and wait for the miner and any followers to +/// process the next block. +/// NOTE: This only works if the followers are mock-mining, or else the counter +/// will not be updated. fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, @@ -12447,7 +12451,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); - let timeout = None; + let timeout = Some(Duration::from_secs(60)); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12461,10 +12465,12 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let run_loop_stopper = miner_run_loop.get_termination_switch(); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); let mut follower_conf = conf.clone(); + follower_conf.node.mock_mining = true; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; @@ -12483,7 +12489,7 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); - thread::spawn(move || miner_run_loop.start(None, 0)); + let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); // figure out the started node's port @@ -12499,23 +12505,20 @@ fn bitcoin_reorg_flap_with_follower() { ); let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || follower_run_loop.start(None, 0)); + let follower_thread = thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all( - &mut btc_regtest_controller, - &miner_blocks_processed, - &[], - timeout, - ); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &miner_blocks_processed, 60); - // first block will hold our VRF registration + // next block will hold our VRF registration + // Note that the follower will not see its block processed counter bumped here next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, @@ -12609,9 +12612,11 @@ fn bitcoin_reorg_flap_with_follower() { assert_eq!(miner_channel.get_sortitions_processed(), 225); assert_eq!(follower_channel.get_sortitions_processed(), 225); - btcd_controller.stop_bitcoind().unwrap(); - miner_channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); } /// Tests the following: @@ -12841,3 +12846,44 @@ fn listunspent_max_utxos() { let utxos = res.expect("Failed to get utxos"); assert_eq!(utxos.num_utxos(), 10); } + +#[test] +#[ignore] +/// Test out stopping bitcoind and restarting it +fn start_stop_bitcoind() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); + + thread::sleep(Duration::from_secs(5)); + + btcd_controller + .start_bitcoind() + .expect("Failed to start bitcoind"); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a792346..a48677e0f64 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -37,6 +37,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -46,7 +47,9 @@ use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::StacksAddress; +use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -678,6 +681,76 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { + // Make sure that ALL signers accepted the block proposal + wait_for(timeout_secs, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify(hash.bits(), &signature) + .expect("Failed to verify signature") + }) + { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == expected_signers.len()) + }) + } + + pub fn wait_for_block_rejections( + &self, + timeout_secs: u64, + expected_signers: &[StacksPublicKey], + ) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if expected_signers.contains(&rejected_pubkey) { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) + }) + } } fn setup_stx_btc_node ()>( @@ -748,9 +821,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2c5c8484caf..9f9f8d1a41e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2209,26 +2209,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2236,26 +2249,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2265,12 +2268,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -3547,7 +3549,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3590,6 +3592,22 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, @@ -3663,8 +3681,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3820,7 +3838,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } @@ -3934,33 +3952,41 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let send_fee = 180; let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(20); signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); sender_nonce += 1; let info_after = signer_test.stacks_client.get_peer_info().unwrap(); assert_eq!( @@ -3970,15 +3996,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .take(num_signers / 2) - .collect(); + let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -3988,42 +4016,13 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); - let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected + signer_test + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); let info_after = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4039,13 +4038,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4055,14 +4058,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info_before.stacks_tip_height + 1 ); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let start_time = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); assert_eq!( @@ -4070,6 +4065,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4104,37 +4107,48 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let send_amt = 100; let send_fee = 180; let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test + + wait_for(short_timeout, || { + Ok(signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4143,27 +4157,22 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted + let rejecting_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 3 / 10) .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4171,58 +4180,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4233,23 +4218,23 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4257,18 +4242,21 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for stacks block N+2 to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4277,20 +4265,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); } #[test] @@ -4330,8 +4318,13 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4340,13 +4333,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4355,27 +4350,24 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let ignoring_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 7 / 10) .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS @@ -4384,16 +4376,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4413,6 +4408,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4420,13 +4416,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4438,23 +4435,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4471,15 +4464,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4489,6 +4473,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4736,7 +4725,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From b734407d50cbbad6f41a128a2d0ae766cb6dac6a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 13:26:26 -0700 Subject: [PATCH 670/910] CRC: add response testing and fix corresponding bug Signed-off-by: Jacinta Ferrant --- stackslib/src/net/api/getsigner.rs | 4 +- stackslib/src/net/api/tests/getsigner.rs | 53 +++++++++++++++++++ .../src/tests/nakamoto_integrations.rs | 9 ++-- 3 files changed, 60 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 90bcc796bfb..a09c051b24c 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -151,7 +151,7 @@ impl RPCRequestHandler for GetSignerRequestHandler { ) }); - let response = match result { + let blocks_signed = match result { Ok(response) => response, Err(error) => { return StacksHttpResponse::new_error( @@ -163,6 +163,8 @@ impl RPCRequestHandler for GetSignerRequestHandler { } }; + let response = GetSignerResponse { blocks_signed }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&response)?; diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index 92e30057d7e..ffaa486f27c 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -94,3 +94,56 @@ fn test_try_parse_request() { } } } + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + // Copy pasta of the test setup values + let cycle_num = 5; + let public_key = StacksPublicKey::from_hex( + "0243311589af63c2adda04fcd7792c038a05c12a4fe40351b3eb1612ff6b2e5a0e", + ) + .unwrap(); + + let random_private_key = StacksPrivateKey::new(); + let random_public_key = StacksPublicKey::from_private(&random_private_key); + + let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); + + let mut requests = vec![]; + + // Query existing signer + let info = StacksHttpRequest::new_getsigner( + addr.into(), + &public_key, + cycle_num, + TipRequest::SpecificTip(nakamoto_chain_tip), + ); + requests.push(info); + + // query random signer that doesn't exist + let request = StacksHttpRequest::new_getsigner( + addr.into(), + &random_public_key, + cycle_num, + TipRequest::SpecificTip(nakamoto_chain_tip), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // Existing signer + let response = responses.remove(0); + info!("response: {:?}", &response); + let signer_response = response.decode_signer().unwrap(); + assert_eq!(signer_response.blocks_signed, 40); + + // Signer doesn't exist so it should not have signed anything + let response = responses.remove(0); + info!("response: {:?}", &response); + let signer_response = response.decode_signer().unwrap(); + assert_eq!(signer_response.blocks_signed, 0); +} diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 51e73751eb1..4964c199cf8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -67,6 +67,7 @@ use stacks::core::{ use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::get_tenures_fork_info::TenureForkingInfo; +use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, @@ -8555,11 +8556,9 @@ fn v3_signer_api_endpoint() { info!("Send request: GET {url}"); reqwest::blocking::get(url) .unwrap_or_else(|e| panic!("GET request failed: {e}")) - .text() - .inspect(|response| info!("Recieved response: GET {url} -> {response}")) - .expect("Empty response") - .parse::() - .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) + .json::() + .unwrap() + .blocks_signed }; // Check reward cycle 1, should be 0 (pre-nakamoto) From 3d9e13ab9d7fb625275105175f69cc9df7f1ca6d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 23 Sep 2024 19:21:52 -0400 Subject: [PATCH 671/910] docs: fix openapi.yaml --- docs/rpc/openapi.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 611ec1cb7b1..c4dd06721ce 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -731,7 +731,6 @@ paths: description: Hex-encoded compressed Secp256k1 public key of signer schema: type: string - parameters: - name: cycle_number in: path required: true From c8cda2945aa1dcb34f0669278c0cd7c73006cba8 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 15:52:47 +0200 Subject: [PATCH 672/910] fix: change pox-set-offset behavior to match the correct order --- pox-locking/src/events.rs | 5 +++-- stackslib/src/chainstate/stacks/boot/pox_4_tests.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 0a1dc9d3c47..f757b549132 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -117,9 +117,10 @@ fn create_event_info_data_code( // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // // "is-in-next-pox-set" == effective-height <= (reward-length - prepare-length) - // "<=" since the txs of the first block of the prepare phase are considered in the pox-set + // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, + // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" - (pox-set-offset (if (<= + (pox-set-offset (if (< (mod (- %height% (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length)) (- (var-get pox-reward-cycle-length) (var-get pox-prepare-cycle-length)) ) u0 u1)) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 0968cc4de3f..8fee5bd5b39 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -2462,7 +2462,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } - // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined) + // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined, whatever txs we create now won't be included in the reward set) while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height + 1) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2519,7 +2519,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { let steph_stacking_receipt = txs.get(&steph_stacking.txid()).unwrap().clone(); assert_eq!(steph_stacking_receipt.events.len(), 2); let steph_stacking_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_cycle)), + ("start-cycle-id", Value::UInt(next_cycle + 1)), // +1 because steph stacked in the block before the prepare phase (too late) ( "end-cycle-id", Value::some(Value::UInt(next_cycle + steph_lock_period)).unwrap(), From 476f47f2383c11454596ed670f93d3832e005317 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 16:24:13 +0200 Subject: [PATCH 673/910] chore: update comment --- pox-locking/src/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index f757b549132..04e3955dad4 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -116,7 +116,7 @@ fn create_event_info_data_code( // If a given burn block height is in a prepare phase, then the stacker will be in the _next_ reward cycle, so bump the cycle by 1 // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // - // "is-in-next-pox-set" == effective-height <= (reward-length - prepare-length) + // "is-in-next-pox-set" == effective-height < (reward-length - prepare-length) // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" From 2f64e1fa7fe19bd8035fc1c548b9f1451bb51f62 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 16:24:51 +0200 Subject: [PATCH 674/910] chore: update comment --- pox-locking/src/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 04e3955dad4..2e80ff87618 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -116,7 +116,7 @@ fn create_event_info_data_code( // If a given burn block height is in a prepare phase, then the stacker will be in the _next_ reward cycle, so bump the cycle by 1 // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // - // "is-in-next-pox-set" == effective-height < (reward-length - prepare-length) + // "is-in-next-pox-set" == effective-height < (cycle-length - prepare-length) // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" From d1281115b8bf7523543206b3711eae59a48fe015 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 24 Sep 2024 11:35:51 -0400 Subject: [PATCH 675/910] test: add `BITCOIND_TEST=1` for running tests in VSCode --- .vscode/settings.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index e648ed3e542..ab8db95f5d9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,7 @@ { "lldb.adapterType": "native", - "lldb.launch.sourceLanguages": ["rust"] + "lldb.launch.sourceLanguages": ["rust"], + "rust-analyzer.runnables.extraEnv": { + "BITCOIND_TEST": "1" + } } \ No newline at end of file From 3260a2ccc32d229db36bf62c09e55c7658769e0e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 24 Sep 2024 09:18:08 -0700 Subject: [PATCH 676/910] Add signing_in_0th_tenure_of_reward_cycle test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 113 +++++++++++++++++++++ 2 files changed, 114 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a6d4dff460c..8986594e0ea 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -114,6 +114,7 @@ jobs: - tests::signer::v0::partial_tenure_fork - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover + - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41e..9dd8b961654 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -35,6 +35,7 @@ use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, S use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; @@ -4813,3 +4814,115 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n); } + +#[test] +#[ignore] +/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle +/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating +/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. +/// +fn signing_in_0th_tenure_of_reward_cycle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + ); + let signer_public_keys = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let long_timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle + 1; + // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) + let next_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + next_reward_cycle_height_boundary, + num_signers, + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = &format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Send request: GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .json::() + .unwrap() + .blocks_signed + }; + + assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a stacks block in the 0th block of the new reward cycle + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let _tx = submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + > blocks_before) + }) + .unwrap(); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 1); + } + assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); +} From ca392482a17b9e9183ec4dbc09352f0f43e5bec3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 13:45:59 -0400 Subject: [PATCH 677/910] fix: query nakamoto burnchain operations by tenure-start block ID, and only do so on tenure-start (not tenure-extend) --- stackslib/src/chainstate/nakamoto/mod.rs | 174 +++++++++++++++++++++-- 1 file changed, 160 insertions(+), 14 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 53b1c485551..e97fefafffb 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; @@ -104,7 +104,9 @@ use crate::clarity_vm::clarity::{ ClarityInstance, ClarityTransactionConnection, Error as ClarityError, PreCommitClarityBlock, }; use crate::clarity_vm::database::SortitionDBRef; -use crate::core::{BOOT_BLOCK_HASH, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD}; +use crate::core::{ + BOOT_BLOCK_HASH, BURNCHAIN_TX_SEARCH_WINDOW, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD, +}; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; use crate::util_lib::boot; @@ -3251,14 +3253,18 @@ impl NakamotoChainState { if let Some(block_reward) = block_reward { StacksChainState::insert_miner_payment_schedule(headers_tx.deref_mut(), block_reward)?; } - StacksChainState::store_burnchain_txids( - headers_tx.deref(), - &index_block_hash, - burn_stack_stx_ops, - burn_transfer_stx_ops, - burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, - )?; + + // NOTE: this is a no-op if the block isn't a tenure-start block + if new_tenure { + StacksChainState::store_burnchain_txids( + headers_tx.deref(), + &index_block_hash, + burn_stack_stx_ops, + burn_transfer_stx_ops, + burn_delegate_stx_ops, + burn_vote_for_aggregate_key_ops, + )?; + } if let Some(matured_miner_payouts) = mature_miner_payouts_opt { let rewarded_miner_block_id = StacksBlockId::new( @@ -3360,6 +3366,145 @@ impl NakamotoChainState { .map_err(ChainstateError::from) } + /// Find all of the TXIDs of Stacks-on-burnchain operations processed in the given Stacks fork. + /// In Nakamoto, we index these TXIDs by the tenure-start block ID + pub(crate) fn get_burnchain_txids_in_ancestor_tenures( + conn: &mut SDBI, + tip_consensus_hash: &ConsensusHash, + tip_block_hash: &BlockHeaderHash, + search_window: u64, + ) -> Result, ChainstateError> { + let tip = StacksBlockId::new(tip_consensus_hash, tip_block_hash); + let mut cursor = tip_consensus_hash.clone(); + let mut ret = HashSet::new(); + for _ in 0..search_window { + let Some(tenure_start_block_id) = conn.get_tenure_start_block_id(&tip, &cursor)? else { + break; + }; + let txids = StacksChainState::get_burnchain_txids_for_block( + conn.sqlite(), + &tenure_start_block_id, + )?; + ret.extend(txids.into_iter()); + + let Some(parent_tenure_id) = conn.get_parent_tenure_consensus_hash(&tip, &cursor)? + else { + break; + }; + + cursor = parent_tenure_id; + } + Ok(ret) + } + + /// Get all Stacks-on-burnchain operations that we haven't processed yet + pub(crate) fn get_stacks_on_burnchain_operations( + conn: &mut SDBI, + parent_consensus_hash: &ConsensusHash, + parent_block_hash: &BlockHeaderHash, + sortdb_conn: &Connection, + burn_tip: &BurnchainHeaderHash, + burn_tip_height: u64, + ) -> Result< + ( + Vec, + Vec, + Vec, + Vec, + ), + ChainstateError, + > { + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, burn_tip_height)? + .expect("FATAL: no epoch defined for current burnchain tip height"); + + // only consider transactions in Stacks 3.0 + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + return Ok((vec![], vec![], vec![], vec![])); + } + + let epoch_start_height = cur_epoch.start_height; + + let search_window: u8 = + if epoch_start_height + u64::from(BURNCHAIN_TX_SEARCH_WINDOW) > burn_tip_height { + burn_tip_height + .saturating_sub(epoch_start_height) + .try_into() + .expect("FATAL: search window exceeds u8") + } else { + BURNCHAIN_TX_SEARCH_WINDOW + }; + + debug!( + "Search the last {} sortitions for burnchain-hosted stacks operations before {} ({})", + search_window, burn_tip, burn_tip_height + ); + let ancestor_burnchain_header_hashes = SortitionDB::get_ancestor_burnchain_header_hashes( + sortdb_conn, + burn_tip, + search_window.into(), + )?; + let processed_burnchain_txids = + NakamotoChainState::get_burnchain_txids_in_ancestor_tenures( + conn, + parent_consensus_hash, + parent_block_hash, + search_window.into(), + )?; + + // Find the *new* transactions -- the ones that we *haven't* seen in this Stacks + // fork yet. Note that we search for the ones that we have seen by searching back + // `BURNCHAIN_TX_SEARCH_WINDOW` tenures, whose sortitions may span more + // than `BURNCHAIN_TX_SEARCH_WINDOW` burnchain blocks. The inclusion of txids for + // burnchain transactions in the latter query is not a problem, because these txids + // are used to *exclude* transactions from the last `BURNCHAIN_TX_SEARCH_WINDOW` + // burnchain blocks. These excluded txids, if they were mined outside of this + // window, are *already* excluded. + + let mut all_stacking_burn_ops = vec![]; + let mut all_transfer_burn_ops = vec![]; + let mut all_delegate_burn_ops = vec![]; + let mut all_vote_for_aggregate_key_ops = vec![]; + + // go from oldest burn header hash to newest + for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { + let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh)?; + let transfer_ops = SortitionDB::get_transfer_stx_ops(sortdb_conn, ancestor_bhh)?; + let delegate_ops = SortitionDB::get_delegate_stx_ops(sortdb_conn, ancestor_bhh)?; + let vote_for_aggregate_key_ops = + SortitionDB::get_vote_for_aggregate_key_ops(sortdb_conn, ancestor_bhh)?; + + for stacking_op in stacking_ops.into_iter() { + if !processed_burnchain_txids.contains(&stacking_op.txid) { + all_stacking_burn_ops.push(stacking_op); + } + } + + for transfer_op in transfer_ops.into_iter() { + if !processed_burnchain_txids.contains(&transfer_op.txid) { + all_transfer_burn_ops.push(transfer_op); + } + } + + for delegate_op in delegate_ops.into_iter() { + if !processed_burnchain_txids.contains(&delegate_op.txid) { + all_delegate_burn_ops.push(delegate_op); + } + } + + for vote_op in vote_for_aggregate_key_ops.into_iter() { + if !processed_burnchain_txids.contains(&vote_op.txid) { + all_vote_for_aggregate_key_ops.push(vote_op); + } + } + } + Ok(( + all_stacking_burn_ops, + all_transfer_burn_ops, + all_delegate_burn_ops, + all_vote_for_aggregate_key_ops, + )) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -3432,10 +3577,11 @@ impl NakamotoChainState { }; let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = - if new_tenure || tenure_extend { - StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( - chainstate_tx, - &parent_index_hash, + if new_tenure { + NakamotoChainState::get_stacks_on_burnchain_operations( + chainstate_tx.as_tx(), + &parent_consensus_hash, + &parent_header_hash, sortition_dbconn.sqlite_conn(), &burn_header_hash, burn_header_height.into(), From 4e5518114e39cff6b0dcaf08b42587d0e8f9555d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 13:46:33 -0400 Subject: [PATCH 678/910] chore: document the 2.x-nature of loading burnchain operations for Stacks --- stackslib/src/chainstate/stacks/db/mod.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index dfba727a3ec..857bfaead42 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -867,6 +867,8 @@ const CHAINSTATE_SCHEMA_3: &'static [&'static str] = &[ // proessed r#" CREATE TABLE burnchain_txids( + -- in epoch 2.x, this is the index block hash of the Stacks block. + -- in epoch 3.x, this is the index block hash of the tenure-start block. index_block_hash TEXT PRIMARY KEY, -- this is a JSON-encoded list of txids txids TEXT NOT NULL @@ -2494,7 +2496,7 @@ impl StacksChainState { } /// Get the burnchain txids for a given index block hash - fn get_burnchain_txids_for_block( + pub(crate) fn get_burnchain_txids_for_block( conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, Error> { @@ -2516,6 +2518,7 @@ impl StacksChainState { } /// Get the txids of the burnchain operations applied in the past N Stacks blocks. + /// Only works for epoch 2.x pub fn get_burnchain_txids_in_ancestors( conn: &Connection, index_block_hash: &StacksBlockId, @@ -2532,7 +2535,10 @@ impl StacksChainState { Ok(ret) } - /// Store all on-burnchain STX operations' txids by index block hash + /// Store all on-burnchain STX operations' txids by index block hash. + /// `index_block_hash` is the tenure-start block. + /// * For epoch 2.x, this is simply the block ID + /// * for epoch 3.x and later, this is the first block in the tenure. pub fn store_burnchain_txids( tx: &DBTx, index_block_hash: &StacksBlockId, From 8ddddae02251dbc6ad5e8a6ddc6cb1e2cbab046a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 24 Sep 2024 11:11:36 -0700 Subject: [PATCH 679/910] Add positive integer for pox_sync_sample_secs and wait_on_interim_blocks for multiple_miners* tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 3 -- stacks-signer/src/client/stacks_client.rs | 16 +++--- .../src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 12 ++--- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++++++++++--------- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5dedfe82e3b..dd8b1527cc6 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -89,9 +89,6 @@ pub enum ClientError { /// Invalid response from the stacks node #[error("Invalid response from the stacks node: {0}")] InvalidResponse(String), - /// A successful sortition has not occurred yet - #[error("The Stacks chain has not processed any successful sortitions yet")] - NoSortitionOnChain, /// A successful sortition's info response should be parseable into a SortitionState #[error("A successful sortition's info response should be parseable into a SortitionState")] UnexpectedSortitionInfo, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7b490144fce..ea848352923 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -33,7 +33,7 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; -use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersResponse}; +use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; @@ -84,6 +84,7 @@ pub struct StacksClient { #[derive(Deserialize)] struct GetStackersErrorResp { + #[allow(dead_code)] err_type: String, err_msg: String, } @@ -655,14 +656,11 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)) - } else { - warn!("Got error response ({status}): {}", error_data.err_msg); - Err(backoff::Error::permanent(ClientError::RequestFailure( - status, - ))) - } + + warn!("Got error response ({status}): {}", error_data.err_msg); + Err(backoff::Error::permanent(ClientError::RequestFailure( + status, + ))) }; let stackers_response = retry_with_exponential_backoff::<_, ClientError, GetStackersResponse>(send_request)?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5f..665d8804579 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -584,7 +584,7 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); conf.burnchain.poll_time_secs = 1; - conf.node.pox_sync_sample_secs = 0; + conf.node.pox_sync_sample_secs = 5; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465e..421ffbb53f3 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -375,14 +375,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Tue, 24 Sep 2024 13:38:17 -0700 Subject: [PATCH 680/910] feat: add integration test for stx-transfer and delegate burn ops in Nakamoto --- .github/workflows/bitcoin-tests.yml | 2 +- .../src/tests/nakamoto_integrations.rs | 170 +++++++++++++++++- 2 files changed, 167 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a6d4dff460c..839ada1ef91 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -114,7 +114,7 @@ jobs: - tests::signer::v0::partial_tenure_fork - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - - tests::nakamoto_integrations::stack_stx_burn_op_integration_test + - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state - tests::nakamoto_integrations::check_block_times diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5f..bd669c7180c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -34,7 +34,8 @@ use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, + BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, + VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::OnChainRewardSetProvider; @@ -4012,7 +4013,17 @@ fn follower_bootup_across_multiple_cycles() { #[test] #[ignore] -fn stack_stx_burn_op_integration_test() { +/// Test out various burn operations being processed in Nakamoto. +/// +/// There are 4 burn ops submitted: +/// +/// - stx-transfer +/// - delegate-stx +/// - stack-stx +/// +/// Additionally, a stack-stx without a signer key is submitted, which should +/// not be processed in Nakamoto. +fn burn_ops_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4027,10 +4038,26 @@ fn stack_stx_burn_op_integration_test() { let signer_sk_2 = Secp256k1PrivateKey::new(); let signer_addr_2 = tests::to_addr(&signer_sk_2); + let stacker_sk_1 = Secp256k1PrivateKey::new(); + let stacker_addr_1 = tests::to_addr(&stacker_sk_1); + + let stacker_sk_2 = Secp256k1PrivateKey::new(); + let stacker_addr_2 = tests::to_addr(&stacker_sk_2); + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); let stacker_sk = setup_stacker(&mut naka_conf); + // Add the initial balances to the other accounts + naka_conf.add_initial_balance( + PrincipalData::from(stacker_addr_1.clone()).to_string(), + 1000000, + ); + naka_conf.add_initial_balance( + PrincipalData::from(stacker_addr_2.clone()).to_string(), + 1000000, + ); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -4135,7 +4162,49 @@ fn stack_stx_burn_op_integration_test() { .is_ok(), "Pre-stx operation should submit successfully" ); - info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); + + let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); + info!("Submitting third pre-stx op"); + let pre_stx_op_3 = PreStxOp { + output: stacker_addr_1.clone(), + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::PreStx(pre_stx_op_3), + &mut miner_signer_3, + 1 + ) + .is_ok(), + "Pre-stx operation should submit successfully" + ); + + info!("Submitting fourth pre-stx op"); + let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); + let pre_stx_op_4 = PreStxOp { + output: stacker_addr_2.clone(), + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::PreStx(pre_stx_op_4), + &mut miner_signer_4, + 1 + ) + .is_ok(), + "Pre-stx operation should submit successfully" + ); + info!("Submitted 4 pre-stx ops at block {block_height}, mining a few blocks..."); // Mine until the next prepare phase let block_height = btc_regtest_controller.get_headers_height(); @@ -4216,6 +4285,8 @@ fn stack_stx_burn_op_integration_test() { let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4247,6 +4318,55 @@ fn stack_stx_burn_op_integration_test() { info!("Signer 1 addr: {}", signer_addr_1.to_b58()); info!("Signer 2 addr: {}", signer_addr_2.to_b58()); + info!("Submitting transfer STX op"); + let transfer_stx_op = TransferStxOp { + sender: stacker_addr_1.clone(), + recipient: stacker_addr_2.clone(), + transfered_ustx: 10000, + memo: vec![], + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::TransferStx(transfer_stx_op), + &mut stacker_burnop_signer_1, + 1 + ) + .is_ok(), + "Transfer STX operation should submit successfully" + ); + + info!("Submitting delegate STX op"); + let del_stx_op = DelegateStxOp { + sender: stacker_addr_2.clone(), + delegate_to: stacker_addr_1.clone(), + reward_addr: None, + delegated_ustx: 100_000, + // to be filled in + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + until_burn_height: None, + }; + + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::DelegateStx(del_stx_op), + &mut stacker_burnop_signer_2, + 1 + ) + .is_ok(), + "Delegate STX operation should submit successfully" + ); + let pox_info = get_pox_info(&http_origin).unwrap(); let min_stx = pox_info.next_cycle.min_threshold_ustx; @@ -4318,6 +4438,8 @@ fn stack_stx_burn_op_integration_test() { } let mut stack_stx_found = false; + let mut transfer_stx_found = false; + let mut delegate_stx_found = false; let mut stack_stx_burn_op_tx_count = 0; let blocks = test_observer::get_blocks(); info!("stack event observer num blocks: {:?}", blocks.len()); @@ -4332,6 +4454,45 @@ fn stack_stx_burn_op_integration_test() { if raw_tx == "0x00" { info!("Found a burn op: {:?}", tx); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); + if burnchain_op.contains_key("transfer_stx") { + let transfer_stx_obj = burnchain_op.get("transfer_stx").unwrap(); + let sender_obj = transfer_stx_obj.get("sender").unwrap(); + let sender = sender_obj.get("address").unwrap().as_str().unwrap(); + let recipient_obj = transfer_stx_obj.get("recipient").unwrap(); + let recipient = recipient_obj.get("address").unwrap().as_str().unwrap(); + let transfered_ustx = transfer_stx_obj + .get("transfered_ustx") + .unwrap() + .as_u64() + .unwrap(); + assert_eq!(sender, stacker_addr_1.to_string()); + assert_eq!(recipient, stacker_addr_2.to_string()); + assert_eq!(transfered_ustx, 10000); + info!( + "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", + sender, recipient, transfered_ustx + ); + transfer_stx_found = true; + continue; + } + if burnchain_op.contains_key("delegate_stx") { + info!("Got delegate STX op: {:?}", burnchain_op); + let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); + let sender_obj = delegate_stx_obj.get("sender").unwrap(); + let sender = sender_obj.get("address").unwrap().as_str().unwrap(); + let delegate_to_obj = delegate_stx_obj.get("delegate_to").unwrap(); + let delegate_to = delegate_to_obj.get("address").unwrap().as_str().unwrap(); + let delegated_ustx = delegate_stx_obj + .get("delegated_ustx") + .unwrap() + .as_u64() + .unwrap(); + assert_eq!(sender, stacker_addr_2.to_string()); + assert_eq!(delegate_to, stacker_addr_1.to_string()); + assert_eq!(delegated_ustx, 100_000); + delegate_stx_found = true; + continue; + } if !burnchain_op.contains_key("stack_stx") { warn!("Got unexpected burnchain op: {:?}", burnchain_op); panic!("unexpected btc transaction type"); @@ -4378,7 +4539,8 @@ fn stack_stx_burn_op_integration_test() { stack_stx_burn_op_tx_count, 1, "Stack-stx tx without a signer_key shouldn't have been submitted" ); - + assert!(transfer_stx_found, "Expected transfer STX op"); + assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb_conn).unwrap(); From ed09847f9727b4d6de467eabfb575b624b7b3ca7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 17:21:37 -0400 Subject: [PATCH 681/910] chore: add unit tests for all burn ops in nakamoto --- .../chainstate/nakamoto/coordinator/tests.rs | 377 +++++++++++++++++- 1 file changed, 373 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index e56e55754cc..cf016adb7d0 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -28,17 +28,20 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, SIGNER_SLOTS_PER_USER, }; use stacks_common::types::chainstate::{ - StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::{Address, StacksEpoch, StacksEpochId}; +use stacks_common::types::{Address, StacksEpoch, StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; -use crate::burnchains::PoxConstants; +use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; +use crate::chainstate::burn::operations::{ + BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, StackStxOp, TransferStxOp, + VoteForAggregateKeyOp, +}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::fault_injection::*; @@ -58,6 +61,7 @@ use crate::chainstate::stacks::boot::test::{ }; use crate::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; +use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, @@ -2595,3 +2599,368 @@ fn process_next_nakamoto_block_deadlock() { // Wait for the blocker and miner threads to finish miner_thread.join().unwrap(); } + +/// Test stacks-on-burnchain op discovery and usage +#[test] +fn test_stacks_on_burnchain_ops() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let recipient_private_key = StacksPrivateKey::from_seed(&[3]); + let recipient_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&recipient_private_key)], + ) + .unwrap(); + + let agg_private_key = StacksPrivateKey::from_seed(&[4]); + let agg_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&agg_private_key)], + ) + .unwrap(); + + // make enough signers and signing keys so we can create a block and a malleablized block that + // are both valid + let (mut test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + let observer = TestEventObserver::new(); + let mut peer = boot_nakamoto( + function_name!(), + vec![(addr.into(), 100_000_000)], + &mut test_signers, + &test_stackers, + Some(&observer), + ); + + let mut all_blocks: Vec = vec![]; + let mut all_burn_ops = vec![]; + let mut consensus_hashes = vec![]; + let mut fee_counts = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); + + let mut extra_burn_ops = vec![]; + let mut bitpatterns = HashMap::new(); // map consensus hash to txid bit pattern + + let cur_reward_cycle = peer + .config + .burnchain + .block_height_to_reward_cycle(peer.get_burn_block_height()) + .unwrap(); + + peer.refresh_burnchain_view(); + let first_stacks_height = peer.network.stacks_tip.height; + + for i in 0..10 { + peer.refresh_burnchain_view(); + let block_height = peer.get_burn_block_height(); + + // parent tip + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); + + let (mut burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + + let mut new_burn_ops = vec![]; + new_burn_ops.push(BlockstackOperationType::DelegateStx(DelegateStxOp { + sender: addr.clone(), + delegate_to: recipient_addr.clone(), + reward_addr: None, + delegated_ustx: 1, + until_burn_height: None, + + // mocked + txid: Txid([i as u8; 32]), + vtxindex: 1, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::StackStx(StackStxOp { + sender: addr.clone(), + reward_addr: PoxAddress::Standard( + recipient_addr.clone(), + Some(AddressHashMode::SerializeP2PKH), + ), + stacked_ustx: 1, + num_cycles: 1, + signer_key: Some(StacksPublicKeyBuffer::from_public_key( + &StacksPublicKey::from_private(&recipient_private_key), + )), + max_amount: Some(1), + auth_id: Some(i as u32), + + // mocked + txid: Txid([(i as u8) | 0x80; 32]), + vtxindex: 2, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::TransferStx(TransferStxOp { + sender: addr.clone(), + recipient: recipient_addr.clone(), + transfered_ustx: 1, + memo: vec![0x2], + + // mocked + txid: Txid([(i as u8) | 0x40; 32]), + vtxindex: 3, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::VoteForAggregateKey( + VoteForAggregateKeyOp { + sender: addr.clone(), + aggregate_key: StacksPublicKeyBuffer::from_public_key( + &StacksPublicKey::from_private(&agg_private_key), + ), + round: i as u32, + reward_cycle: cur_reward_cycle + 1, + signer_index: 1, + signer_key: StacksPublicKeyBuffer::from_public_key(&StacksPublicKey::from_private( + &recipient_private_key, + )), + + // mocked + txid: Txid([(i as u8) | 0xc0; 32]), + vtxindex: 4, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + }, + )); + + extra_burn_ops.push(new_burn_ops.clone()); + burn_ops.append(&mut new_burn_ops); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + bitpatterns.insert(consensus_hash.clone(), i as u8); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + // make sure all our burnchain ops are processed and stored. + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let ancestor_burnchain_header_hashes = SortitionDB::get_ancestor_burnchain_header_hashes( + peer.sortdb().conn(), + &burn_tip.burn_header_hash, + 6, + ) + .unwrap(); + let processed_burnchain_txids = + NakamotoChainState::get_burnchain_txids_in_ancestor_tenures( + &mut peer.chainstate().index_conn(), + &stacks_tip_ch, + &stacks_tip_bh, + 6, + ) + .unwrap(); + + let mut expected_burnchain_txids = HashSet::new(); + for j in (i as u64).saturating_sub(6)..i { + expected_burnchain_txids.insert(Txid([j as u8; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0x80; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0x40; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0xc0; 32])); + } + assert_eq!(processed_burnchain_txids, expected_burnchain_txids); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 10 { + let mut txs = vec![]; + + debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + txs.push(stx_transfer); + + let last_block_opt = blocks_so_far + .last() + .as_ref() + .map(|(block, _size, _cost)| block.header.block_id()); + + let mut final_txs = vec![]; + if let Some(last_block) = last_block_opt.as_ref() { + let tenure_extension = tenure_change.extend( + consensus_hash.clone(), + last_block.clone(), + blocks_so_far.len() as u32, + ); + let tenure_extension_tx = + miner.make_nakamoto_tenure_change(tenure_extension.clone()); + final_txs.push(tenure_extension_tx); + } + final_txs.append(&mut txs); + final_txs + } else { + vec![] + } + }, + ); + + let fees = blocks_and_sizes + .iter() + .map(|(block, _, _)| { + block + .txs + .iter() + .map(|tx| tx.get_tx_fee() as u128) + .sum::() + }) + .sum::(); + + consensus_hashes.push(consensus_hash); + fee_counts.push(fees); + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // check that our tenure-extends have been getting applied + let (highest_tenure, sort_tip) = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), + ) + .unwrap() + .unwrap(); + (tenure, tip) + }; + + let last_block = blocks.last().as_ref().cloned().unwrap(); + assert_eq!( + highest_tenure.tenure_id_consensus_hash, + last_block.header.consensus_hash + ); + assert_eq!( + highest_tenure.burn_view_consensus_hash, + sort_tip.consensus_hash + ); + assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); + assert_eq!(highest_tenure.coinbase_height, 12 + i); + assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); + assert_eq!( + highest_tenure.num_blocks_confirmed, + (blocks.len() as u32) - 1 + ); + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + // check receipts for burn ops + let mut observed_burn_txids = HashSet::new(); + let observed_blocks = observer.get_blocks(); + for block in observed_blocks.into_iter() { + let block_height = block.metadata.anchored_header.height(); + if block_height < first_stacks_height { + continue; + } + + let mut is_tenure_start = false; + let mut block_burn_txids = HashSet::new(); + for receipt in block.receipts.into_iter() { + match receipt.transaction { + TransactionOrigin::Burn(op) => { + block_burn_txids.insert(op.txid().clone()); + } + TransactionOrigin::Stacks(tx) => { + if let TransactionPayload::TenureChange(txp) = &tx.payload { + if txp.cause == TenureChangeCause::BlockFound { + is_tenure_start = true; + } + } + } + } + } + + // no burnchain blocks processed for non-tenure-start blocks + if !is_tenure_start { + assert_eq!(block_burn_txids.len(), 0); + continue; + } + + // this tenure-start block only processed "new" burnchain ops + let mut expected_burnchain_txids = HashSet::new(); + let bitpattern = *bitpatterns.get(&block.metadata.consensus_hash).unwrap(); + expected_burnchain_txids.insert(Txid([bitpattern; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0x80; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0x40; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0xc0; 32])); + + debug!("At block {}: {:?}", block_height, &block_burn_txids); + debug!("Expected: {:?}", &expected_burnchain_txids); + assert_eq!(block_burn_txids, expected_burnchain_txids); + + observed_burn_txids.extend(block_burn_txids.into_iter()); + } + + // all extra burn ops are represented + for extra_burn_ops_per_block in extra_burn_ops.into_iter() { + for extra_burn_op in extra_burn_ops_per_block.into_iter() { + assert!(observed_burn_txids.contains(&extra_burn_op.txid())); + } + } + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + + peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks, 2); +} From 425ba9b6d3cb6f7511e29a9088cff08464db96bd Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 24 Sep 2024 14:46:12 -0700 Subject: [PATCH 682/910] feat: ensure burn ops on included in tenure_change block --- .../src/tests/nakamoto_integrations.rs | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index bd669c7180c..17b829557fc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4044,6 +4044,10 @@ fn burn_ops_integration_test() { let stacker_sk_2 = Secp256k1PrivateKey::new(); let stacker_addr_2 = tests::to_addr(&stacker_sk_2); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let mut sender_nonce = 0; + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4057,6 +4061,10 @@ fn burn_ops_integration_test() { PrincipalData::from(stacker_addr_2.clone()).to_string(), 1000000, ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 100_000_000, + ); test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; @@ -4426,7 +4434,8 @@ fn burn_ops_integration_test() { info!("Submitted 2 stack STX ops at height {block_height}, mining a few blocks..."); - // the second block should process the vote, after which the balances should be unchanged + // the second block should process the ops + // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { next_block_and_mine_commit( &mut btc_regtest_controller, @@ -4435,6 +4444,29 @@ fn burn_ops_integration_test() { &commits_submitted, ) .unwrap(); + for interim_block_ix in 0..2 { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, 200, &stacker_addr_1.into(), 10000); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + } } let mut stack_stx_found = false; @@ -4449,10 +4481,12 @@ fn burn_ops_integration_test() { "stack event observer num transactions: {:?}", transactions.len() ); - for tx in transactions.iter() { + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { info!("Found a burn op: {:?}", tx); + assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { let transfer_stx_obj = burnchain_op.get("transfer_stx").unwrap(); @@ -4472,6 +4506,7 @@ fn burn_ops_integration_test() { "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", sender, recipient, transfered_ustx ); + assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } @@ -4490,6 +4525,7 @@ fn burn_ops_integration_test() { assert_eq!(sender, stacker_addr_2.to_string()); assert_eq!(delegate_to, stacker_addr_1.to_string()); assert_eq!(delegated_ustx, 100_000); + assert!(!delegate_stx_found, "Delegate STX op should be unique"); delegate_stx_found = true; continue; } @@ -4529,8 +4565,16 @@ fn burn_ops_integration_test() { .expect_result_ok() .expect("Expected OK result for stack-stx op"); + assert!(!stack_stx_found, "Stack STX op should be unique"); stack_stx_found = true; stack_stx_burn_op_tx_count += 1; + } else { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + } } } } From f394e644aed108236764c8ab7733a2c5892f2daa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 24 Sep 2024 17:12:30 -0500 Subject: [PATCH 683/910] feat: add 2 heuristics to miner for nakamoto * for the first block in a tenure, just mine an empty block * estimate the time it takes to eval a tx, and see if it will interfere with block deadline --- stackslib/src/chainstate/stacks/miner.rs | 52 +++++++++++++++++++++++- stackslib/src/core/mempool.rs | 49 ++++++++++++++++++++-- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 78d6a477819..cf6e83b484c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -18,6 +18,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::ThreadId; +use std::time::Instant; use std::{cmp, fs, mem}; use clarity::vm::analysis::{CheckError, CheckErrors}; @@ -2211,6 +2212,15 @@ impl StacksBlockBuilder { ); } + // nakamoto miner tenure start heuristic: + // mine an empty block so you can start your tenure quickly! + if let Some(tx) = initial_txs.first() { + if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { + debug!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); + return Ok((false, tx_events)); + } + } + mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; @@ -2221,6 +2231,7 @@ impl StacksBlockBuilder { let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; + let mut update_timings = vec![]; let deadline = ts_start + u128::from(max_miner_time_ms); let mut num_txs = 0; @@ -2250,10 +2261,27 @@ impl StacksBlockBuilder { if block_limit_hit == BlockLimitFunction::LIMIT_REACHED { return Ok(None); } - if get_epoch_time_ms() >= deadline { + let time_now = get_epoch_time_ms(); + if time_now >= deadline { debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms); return Ok(None); } + if let Some(time_estimate) = txinfo.metadata.time_estimate_ms { + if time_now.saturating_add(time_estimate.into()) > deadline { + debug!("Mining tx would cause us to exceed our deadline, skipping"; + "txid" => %txinfo.tx.txid(), + "deadline" => deadline, + "now" => time_now, + "estimate" => time_estimate); + return Ok(Some( + TransactionResult::skipped( + &txinfo.tx, + "Transaction would exceed deadline.".into(), + ) + .convert_to_event(), + )); + } + } // skip transactions early if we can if considered.contains(&txinfo.tx.txid()) { @@ -2303,6 +2331,7 @@ impl StacksBlockBuilder { considered.insert(txinfo.tx.txid()); num_considered += 1; + let tx_start = Instant::now(); let tx_result = builder.try_mine_tx_with_len( epoch_tx, &txinfo.tx, @@ -2314,6 +2343,21 @@ impl StacksBlockBuilder { let result_event = tx_result.convert_to_event(); match tx_result { TransactionResult::Success(TransactionSuccess { receipt, .. }) => { + if txinfo.metadata.time_estimate_ms.is_none() { + // use i64 to avoid running into issues when storing in + // rusqlite. + let time_estimate_ms: i64 = tx_start + .elapsed() + .as_millis() + .try_into() + .unwrap_or_else(|_| i64::MAX); + let time_estimate_ms: u64 = time_estimate_ms + .try_into() + // should be unreachable + .unwrap_or_else(|_| 0); + update_timings.push((txinfo.tx.txid(), time_estimate_ms)); + } + num_txs += 1; if update_estimator { if let Err(e) = estimator.notify_event( @@ -2386,6 +2430,12 @@ impl StacksBlockBuilder { }, ); + if !update_timings.is_empty() { + if let Err(e) = mempool.update_tx_time_estimates(&update_timings) { + warn!("Error while updating time estimates for mempool"; "err" => ?e); + } + } + if to_drop_and_blacklist.len() > 0 { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0dff4796dcb..9b917b608ab 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -460,6 +460,7 @@ pub struct MemPoolTxMetadata { pub last_known_origin_nonce: Option, pub last_known_sponsor_nonce: Option, pub accept_time: u64, + pub time_estimate_ms: Option, } impl MemPoolTxMetadata { @@ -594,6 +595,7 @@ impl FromRow for MemPoolTxMetadata { let sponsor_nonce = u64::from_column(row, "sponsor_nonce")?; let last_known_sponsor_nonce = u64::from_column(row, "last_known_sponsor_nonce")?; let last_known_origin_nonce = u64::from_column(row, "last_known_origin_nonce")?; + let time_estimate_ms: Option = row.get("time_estimate_ms")?; Ok(MemPoolTxMetadata { txid, @@ -609,6 +611,7 @@ impl FromRow for MemPoolTxMetadata { last_known_origin_nonce, last_known_sponsor_nonce, accept_time, + time_estimate_ms, }) } } @@ -624,10 +627,7 @@ impl FromRow for MemPoolTxInfo { return Err(db_error::ParseError); } - Ok(MemPoolTxInfo { - tx: tx, - metadata: md, - }) + Ok(MemPoolTxInfo { tx, metadata: md }) } } @@ -803,6 +803,16 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ "#, ]; +const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ + r#" + -- ALLOW NULL + ALTER TABLE mempool ADD COLUMN time_estimate_ms NUMBER; + "#, + r#" + INSERT INTO schema_version (version) VALUES (7) + "#, +]; + const MEMPOOL_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", @@ -1287,6 +1297,9 @@ impl MemPoolDB { MemPoolDB::instantiate_nonces(tx)?; } 6 => { + MemPoolDB::instantiate_schema_7(tx)?; + } + 7 => { break; } _ => { @@ -1363,6 +1376,16 @@ impl MemPoolDB { Ok(()) } + /// Add the nonce table + #[cfg_attr(test, mutants::skip)] + fn instantiate_schema_7(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_7_TIME_ESTIMATES { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + #[cfg_attr(test, mutants::skip)] pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -2650,6 +2673,24 @@ impl MemPoolDB { Ok(()) } + /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. + /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be + /// reported as present, which is exactly what we want because we don't want these transactions + /// to be seen again (so we don't want anyone accidentally "helpfully" pushing them to us, nor + /// do we want the mempool sync logic to "helpfully" re-discover and re-download them). + pub fn update_tx_time_estimates(&mut self, txs: &[(Txid, u64)]) -> Result<(), db_error> { + let sql = "UPDATE mempool SET time_estimate_ms = ? WHERE txid = ?"; + let mempool_tx = self.tx_begin()?; + for (txid, time_estimate_ms) in txs.iter() { + mempool_tx + .tx + .execute(sql, params![time_estimate_ms, txid])?; + } + mempool_tx.commit()?; + + Ok(()) + } + /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be /// reported as present, which is exactly what we want because we don't want these transactions From cdb539446ef3b4234c0bd74dbd78552cd300a918 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 24 Sep 2024 20:31:12 -0500 Subject: [PATCH 684/910] use * in mempool SELECT --- stackslib/src/core/mempool.rs | 16 +--------------- stackslib/src/core/tests/mod.rs | 2 +- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9b917b608ab..28560d9a806 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2015,21 +2015,7 @@ impl MemPoolDB { nonce: u64, ) -> Result, db_error> { let sql = format!( - "SELECT - txid, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - tx_fee, - length, - consensus_hash, - block_header_hash, - height, - accept_time, - last_known_sponsor_nonce, - last_known_origin_nonce - FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", + "SELECT * FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", if is_origin { "origin" } else { "sponsor" } ); let args = params![addr.to_string(), u64_to_sql(nonce)?]; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 72b29cc0979..01fcac9e897 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1381,7 +1381,7 @@ fn mempool_do_not_replace_tx() { .unwrap_err(); assert!(match err_resp { MemPoolRejection::ConflictingNonceInMempool => true, - _ => false, + e => panic!("Failed: {e:?}"), }); assert!(MemPoolDB::db_has_tx(&mempool_tx, &prior_txid).unwrap()); From b995e274f9e50f477269bdba6a2c0cf3c7463a5b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 08:38:50 -0500 Subject: [PATCH 685/910] chore: fix signer_set_rollover test --- stackslib/src/core/mempool.rs | 6 +----- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 28560d9a806..61306a9764a 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2659,11 +2659,7 @@ impl MemPoolDB { Ok(()) } - /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. - /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be - /// reported as present, which is exactly what we want because we don't want these transactions - /// to be seen again (so we don't want anyone accidentally "helpfully" pushing them to us, nor - /// do we want the mempool sync logic to "helpfully" re-discover and re-download them). + /// Update the time estimates for the supplied txs in the mempool db pub fn update_tx_time_estimates(&mut self, txs: &[(Txid, u64)]) -> Result<(), db_error> { let sql = "UPDATE mempool SET time_estimate_ms = ? WHERE txid = ?"; let mempool_tx = self.tx_begin()?; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41e..13c3acce443 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2974,6 +2974,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_headers_height(); + let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); for stacker_sk in new_signer_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, @@ -3017,6 +3018,13 @@ fn signer_set_rollover() { submit_tx(&http_origin, &stacking_tx); } + wait_for(60, || { + Ok(accounts_to_check + .iter() + .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + }) + .expect("Timed out waiting for stacking txs to be mined"); + signer_test.mine_nakamoto_block(short_timeout); let next_reward_cycle = reward_cycle.saturating_add(1); From 8dd4771fd87cd4aceebd7b9641ba5bb729ee32db Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 08:08:03 -0700 Subject: [PATCH 686/910] CRC: remove potential race condition in signing_in_0th_tenure_of_reward_cycle Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 134 +++++++-------------- 1 file changed, 43 insertions(+), 91 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9dd8b961654..2ec72082a65 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1992,35 +1992,29 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - while signer_test.get_current_reward_cycle() != final_reward_cycle { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || Ok(true), - ) - .unwrap(); - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting to enter the next reward cycle" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let result = signer_test.get_current_reward_cycle() == final_reward_cycle; + if !result { + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + } + Ok(result) + }) + .expect("Timed out waiting to enter the next reward cycle"); - while test_observer::get_burn_blocks() - .last() - .unwrap() - .get("burn_block_height") - .unwrap() - .as_u64() - .unwrap() - < final_reward_cycle_height_boundary + 1 - { - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting for burn block events" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let blocks = test_observer::get_burn_blocks() + .last() + .unwrap() + .get("burn_block_height") + .unwrap() + .as_u64() + .unwrap(); + Ok(blocks > final_reward_cycle_height_boundary) + }) + .expect("Timed out waiting for burn block events"); signer_test.wait_for_cycle(30, final_reward_cycle); @@ -2078,21 +2072,11 @@ fn retry_on_rejection() { let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - loop { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - - sleep_ms(10_000); - + wait_for(30, || { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if tip.sortition { - break; - } - } + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); // mine a nakamoto block let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -2534,12 +2518,10 @@ fn mock_sign_epoch_25() { { let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller @@ -2747,12 +2729,10 @@ fn multiple_miners_mock_sign_epoch_25() { { let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller @@ -4539,21 +4519,11 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - loop { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - - sleep_ms(10_000); - + wait_for(30, || { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if tip.sortition { - break; - } - } + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; @@ -4833,15 +4803,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let signer_public_keys = signer_test .signer_stacks_private_keys .iter() @@ -4888,28 +4850,18 @@ fn signing_in_0th_tenure_of_reward_cycle() { } info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - for signer in &signer_public_keys { let blocks_signed = get_v3_signer(&signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } - let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - - // submit a tx so that the miner will mine a stacks block in the 0th block of the new reward cycle - let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let _tx = submit_tx(&http_origin, &transfer_tx); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); wait_for(30, || { Ok(signer_test From 541d13b4cc69a15ae3767499e19e99455064506e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 10:11:10 -0500 Subject: [PATCH 687/910] fix: bug in the tenure extending logic -- only include tenure change tx in the first block after extension --- stackslib/src/chainstate/stacks/miner.rs | 2 +- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 ++++++++++---- .../stacks-node/src/tests/nakamoto_integrations.rs | 8 +++++--- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index cf6e83b484c..d3298855da5 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2216,7 +2216,7 @@ impl StacksBlockBuilder { // mine an empty block so you can start your tenure quickly! if let Some(tx) = initial_txs.first() { if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { - debug!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); + info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); return Ok((false, tx_events)); } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19d..5e3f72ee20e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1260,6 +1260,12 @@ impl BlockMinerThread { tenure_change_tx: None, }); }; + if self.last_block_mined.is_some() { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { @@ -1289,10 +1295,10 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; - "burn_view_consensus_hash" => %burn_view_consensus_hash, - "parent_block_id" => %parent_block_id, - "num_blocks_so_far" => num_blocks_so_far, + info!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, ); payload = payload.extend( *burn_view_consensus_hash, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5f..870a00719a5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6382,7 +6382,8 @@ fn continue_tenure_extend() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let http_origin = naka_conf.node.data_url.clone(); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); @@ -6571,12 +6572,13 @@ fn continue_tenure_extend() { &signers, ); - wait_for(5, || { + wait_for(25, || { let blocks_processed = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(blocks_processed > blocks_processed_before && sender_nonce >= 1) }) .unwrap(); From 93603978cdb8a3f64239e436fe37685a016145d7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 08:19:50 -0700 Subject: [PATCH 688/910] Do not use a test_observer in boot_to_epoch_3 to enable use with multi node tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 8 ++++---- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- testnet/stacks-node/src/tests/signer/v1.rs | 6 ++++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 421ffbb53f3..b7f39feba67 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -63,7 +63,6 @@ use wsts::state_machine::PublicKeys; use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -364,9 +363,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest MinedNakamotoBlockEvent { + fn mine_nakamoto_block(&mut self, timeout: Duration) { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); + let info_before = self.stacks_client.get_peer_info().unwrap(); next_block_and_mine_commit( &mut self.running_nodes.btc_regtest_controller, timeout.as_secs(), @@ -376,7 +376,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -384,7 +385,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { agg_key: &Point, timeout: Duration, ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout); + let new_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let signer_sighash = new_block.signer_signature_hash.clone(); let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); @@ -1130,7 +1131,8 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); - let last_block = signer_test.mine_nakamoto_block(timeout); + signer_test.mine_nakamoto_block(timeout); + let last_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let proposed_signer_signature_hash = signer_test .wait_for_validate_ok_response(short_timeout) .signer_signature_hash; From cb2f4909344b21c14b0b5ec3a761f73c8eb246a3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 25 Sep 2024 11:30:02 -0400 Subject: [PATCH 689/910] fix: don't ban peers for sending us nakamoto blocks we can't yet handle --- stackslib/src/net/relay.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 94b63d0382f..d262d07c425 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1725,9 +1725,6 @@ impl Relayer { "Failed to validate Nakamoto blocks pushed from {:?}: {:?}", neighbor_key, &e ); - - // punish this peer - bad_neighbors.push((*neighbor_key).clone()); break; } From 82be35a8be28a66d3b46c08405bdd659fbc1c2a1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 10:02:48 -0700 Subject: [PATCH 690/910] Remove wsts from testnet/stacks-node Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/signer_set.rs | 132 +- stacks-signer/src/client/mod.rs | 134 +- stacks-signer/src/client/stackerdb.rs | 2 +- stacks-signer/src/config.rs | 17 - stacks-signer/src/lib.rs | 4 +- stacks-signer/src/runloop.rs | 16 +- stacks-signer/src/v0/signer.rs | 48 +- stacks-signer/src/v1/coordinator.rs | 230 --- stacks-signer/src/v1/mod.rs | 29 - stacks-signer/src/v1/signer.rs | 1764 ----------------- stacks-signer/src/v1/stackerdb_manager.rs | 326 --- testnet/stacks-node/Cargo.toml | 2 - .../stacks-node/src/nakamoto_node/miner.rs | 16 +- .../src/nakamoto_node/sign_coordinator.rs | 428 +--- .../src/tests/nakamoto_integrations.rs | 25 +- testnet/stacks-node/src/tests/signer/mod.rs | 123 +- testnet/stacks-node/src/tests/signer/v0.rs | 6 +- testnet/stacks-node/src/tests/signer/v1.rs | 1155 ----------- 19 files changed, 121 insertions(+), 4337 deletions(-) delete mode 100644 stacks-signer/src/v1/coordinator.rs delete mode 100644 stacks-signer/src/v1/mod.rs delete mode 100644 stacks-signer/src/v1/signer.rs delete mode 100644 stacks-signer/src/v1/stackerdb_manager.rs delete mode 100644 testnet/stacks-node/src/tests/signer/v1.rs diff --git a/Cargo.lock b/Cargo.lock index b9b45849d31..e1d78fec15d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3437,7 +3437,6 @@ dependencies = [ "tracing-subscriber", "url", "warp", - "wsts", ] [[package]] diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs index fdcb857faf8..f47ac454aa5 100644 --- a/libsigner/src/signer_set.rs +++ b/libsigner/src/signer_set.rs @@ -13,125 +13,77 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; + use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; -use hashbrown::{HashMap, HashSet}; use stacks_common::types::chainstate::{StacksAddress, StacksPublicKey}; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; -use wsts::state_machine::PublicKeys; -/// A reward set parsed into the structures required by WSTS party members and coordinators. +/// A reward set parsed into relevant structures #[derive(Debug, Clone)] pub struct SignerEntries { - /// The signer addresses mapped to signer id - pub signer_ids: HashMap, - /// The signer ids mapped to public key and key ids mapped to public keys - pub public_keys: PublicKeys, - /// The signer ids mapped to key ids - pub signer_key_ids: HashMap>, - /// The signer ids mapped to wsts public keys - pub signer_public_keys: HashMap, - /// The signer ids mapped to a hash set of key ids - /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups - pub coordinator_key_ids: HashMap>, + /// The signer addresses mapped to signer ID + pub signer_addr_to_id: HashMap, + /// The signer IDs mapped to addresses. Uses a BTreeMap to ensure *reward cycle order* + pub signer_id_to_addr: BTreeMap, + /// signer ID mapped to public key + pub signer_id_to_pk: HashMap, + /// public_key mapped to signer ID + pub signer_pk_to_id: HashMap, + /// The signer public keys + pub signer_pks: Vec, + /// The signer addresses + pub signer_addresses: Vec, + /// The signer address mapped to signing weight + pub signer_addr_to_weight: HashMap, } /// Parsing errors for `SignerEntries` #[derive(Debug)] pub enum Error { /// A member of the signing set has a signing key buffer - /// which does not represent a ecdsa public key. + /// which does not represent a valid Stacks public key BadSignerPublicKey(String), /// The number of signers was greater than u32::MAX SignerCountOverflow, } impl SignerEntries { - /// Try to parse the reward set defined by `NakamotoSignEntry` into the structures required - /// by WSTS party members and coordinators. + /// Try to parse the reward set defined by `NakamotoSignEntry` into the SignerEntries struct pub fn parse(is_mainnet: bool, reward_set: &[NakamotoSignerEntry]) -> Result { - let mut weight_end = 1; - let mut signer_key_ids = HashMap::with_capacity(reward_set.len()); - let mut signer_public_keys = HashMap::with_capacity(reward_set.len()); - let mut coordinator_key_ids = HashMap::with_capacity(4000); - let mut signer_ids = HashMap::with_capacity(reward_set.len()); - let mut wsts_signers = HashMap::new(); - let mut wsts_key_ids = HashMap::new(); + let mut signer_pk_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_id_to_pk = HashMap::with_capacity(reward_set.len()); + let mut signer_addr_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_pks = Vec::with_capacity(reward_set.len()); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::with_capacity(reward_set.len()); for (i, entry) in reward_set.iter().enumerate() { let signer_id = u32::try_from(i).map_err(|_| Error::SignerCountOverflow)?; - let ecdsa_pk = - ecdsa::PublicKey::try_from(entry.signing_key.as_slice()).map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to ecdsa::PublicKey: {e}" - )) - })?; - let signer_public_key = Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())) - .map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to wsts::Point: {e}" - )) - })?; - let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + let signer_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) .map_err(|e| { Error::BadSignerPublicKey(format!( "Failed to convert signing key to StacksPublicKey: {e}" )) })?; - let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); - signer_ids.insert(stacks_address, signer_id); - - signer_public_keys.insert(signer_id, signer_public_key); - let weight_start = weight_end; - weight_end = weight_start + entry.weight; - let key_ids: HashSet = (weight_start..weight_end).collect(); - for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk); - } - signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); - coordinator_key_ids.insert(signer_id, key_ids); - wsts_signers.insert(signer_id, ecdsa_pk); + let stacks_address = StacksAddress::p2pkh(is_mainnet, &signer_public_key); + signer_addr_to_id.insert(stacks_address, signer_id); + signer_id_to_pk.insert(signer_id, signer_public_key); + signer_pk_to_id.insert(signer_public_key, signer_id); + signer_pks.push(signer_public_key); + signer_id_to_addr.insert(signer_id, stacks_address); + signer_addr_to_weight.insert(stacks_address, entry.weight); + signer_addresses.push(stacks_address); } Ok(Self { - signer_ids, - public_keys: PublicKeys { - signers: wsts_signers, - key_ids: wsts_key_ids, - }, - signer_key_ids, - signer_public_keys, - coordinator_key_ids, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }) } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_keys(&self) -> Result { - self.public_keys - .key_ids - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_signers(&self) -> Result { - self.public_keys - .signers - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_signing_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 7_f64 / 10_f64).ceil() as u32) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_dkg_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 9_f64 / 10_f64).ceil() as u32) - } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5dedfe82e3b..c36f73a3f9c 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -123,6 +123,7 @@ where #[cfg(test)] pub(crate) mod tests { + use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; @@ -138,20 +139,16 @@ pub(crate) mod tests { use clarity::vm::costs::ExecutionCost; use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; - use hashbrown::{HashMap, HashSet}; use libsigner::SignerEntries; use rand::distributions::Standard; use rand::{thread_rng, Rng}; - use rand_core::{OsRng, RngCore}; + use rand_core::RngCore; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{Hash160, Sha256Sum}; - use wsts::curve::ecdsa; - use wsts::curve::point::{Compressed, Point}; - use wsts::curve::scalar::Scalar; - use wsts::state_machine::PublicKeys; + use wsts::curve::point::Point; use super::*; use crate::config::{GlobalConfig, SignerConfig}; @@ -456,112 +453,59 @@ pub(crate) mod tests { /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config - pub fn generate_signer_config( - config: &GlobalConfig, - num_signers: u32, - num_keys: u32, - ) -> SignerConfig { + pub fn generate_signer_config(config: &GlobalConfig, num_signers: u32) -> SignerConfig { assert!( num_signers > 0, "Cannot generate 0 signers...Specify at least 1 signer." ); - assert!( - num_keys > 0, - "Cannot generate 0 keys for the provided signers...Specify at least 1 key." - ); - let mut public_keys = PublicKeys { - signers: HashMap::new(), - key_ids: HashMap::new(), - }; + + let weight_per_signer = 100 / num_signers; + let mut remaining_weight = 100 % num_signers; + let reward_cycle = thread_rng().next_u64(); - let rng = &mut OsRng; - let num_keys = num_keys / num_signers; - let remaining_keys = num_keys % num_signers; - let mut coordinator_key_ids = HashMap::new(); - let mut signer_key_ids = HashMap::new(); - let mut signer_ids = HashMap::new(); - let mut start_key_id = 1u32; - let mut end_key_id = start_key_id; - let mut signer_public_keys = HashMap::new(); - let mut signer_slot_ids = vec![]; - let ecdsa_private_key = config.ecdsa_private_key; - let ecdsa_public_key = - ecdsa::PublicKey::new(&ecdsa_private_key).expect("Failed to create ecdsa public key"); - // Key ids start from 1 hence the wrapping adds everywhere + + let mut signer_pk_to_id = HashMap::new(); + let mut signer_id_to_pk = HashMap::new(); + let mut signer_addr_to_id = HashMap::new(); + let mut signer_pks = Vec::new(); + let mut signer_slot_ids = Vec::new(); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::new(); + for signer_id in 0..num_signers { - end_key_id = if signer_id.wrapping_add(1) == num_signers { - end_key_id.wrapping_add(remaining_keys) + let private_key = if signer_id == 0 { + config.stacks_private_key } else { - end_key_id.wrapping_add(num_keys) + StacksPrivateKey::new() }; - if signer_id == 0 { - public_keys.signers.insert(signer_id, ecdsa_public_key); - let signer_public_key = - Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, ecdsa_public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, ecdsa_public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - start_key_id = end_key_id; - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(ecdsa_public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); - signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); - - continue; - } - let private_key = Scalar::random(rng); - let public_key = ecdsa::PublicKey::new(&private_key).unwrap(); - let signer_public_key = - Point::try_from(&Compressed::from(public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); + let public_key = StacksPublicKey::from_private(&private_key); + + signer_id_to_pk.insert(signer_id, public_key); + signer_pk_to_id.insert(public_key, signer_id); + let address = StacksAddress::p2pkh(false, &public_key); + signer_addr_to_id.insert(address, signer_id); + signer_pks.push(public_key); signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); - start_key_id = end_key_id; + signer_id_to_addr.insert(signer_id, address); + signer_addr_to_weight.insert(address, weight_per_signer + remaining_weight); + signer_addresses.push(address); + remaining_weight = 0; // The first signer gets the extra weight if there is any. All other signers only get the weight_per_signer } SignerConfig { reward_cycle, signer_id: 0, signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers - key_ids: signer_key_ids.get(&0).cloned().unwrap_or_default(), signer_entries: SignerEntries { - public_keys, - coordinator_key_ids, - signer_key_ids, - signer_ids, - signer_public_keys, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }, signer_slot_ids, - ecdsa_private_key: config.ecdsa_private_key, stacks_private_key: config.stacks_private_key, node_host: config.node_host.to_string(), mainnet: config.network.is_mainnet(), diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index f2b574ef4fb..b3f6528232b 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -257,7 +257,7 @@ mod tests { Some(9000), ); let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); + let signer_config = generate_signer_config(&config, 5); let mut stackerdb = StackerDB::from(&signer_config); let header = NakamotoBlockHeader::empty(); diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d5..802c362b86c 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -29,9 +29,7 @@ use stacks_common::address::{ }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use stacks_common::types::PrivateKey; use stacks_common::util::hash::Hash160; -use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; @@ -122,14 +120,10 @@ pub struct SignerConfig { pub signer_id: u32, /// The signer stackerdb slot id (may be different from signer_id) pub signer_slot_id: SignerSlotID, - /// This signer's key ids - pub key_ids: Vec, /// The registered signers for this reward cycle pub signer_entries: SignerEntries, /// The signer slot ids of all signers registered for this reward cycle pub signer_slot_ids: Vec, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The private key for this signer pub stacks_private_key: StacksPrivateKey, /// The node host for this signer @@ -166,8 +160,6 @@ pub struct GlobalConfig { pub node_host: String, /// endpoint to the event receiver pub endpoint: SocketAddr, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The signer's Stacks private key pub stacks_private_key: StacksPrivateKey, /// The signer's Stacks address @@ -295,14 +287,6 @@ impl TryFrom for GlobalConfig { raw_data.stacks_private_key.clone(), ) })?; - - let ecdsa_private_key = - Scalar::try_from(&stacks_private_key.to_bytes()[..32]).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); let stacks_address = @@ -341,7 +325,6 @@ impl TryFrom for GlobalConfig { node_host: raw_data.node_host, endpoint, stacks_private_key, - ecdsa_private_key, stacks_address, network: raw_data.network, event_timeout, diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9d8a22a3206..8bac540e7a0 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -39,10 +39,8 @@ pub mod runloop; pub mod signerdb; /// The util module for the signer pub mod utils; -/// The v0 implementation of the signer. This does not include WSTS support +/// The v0 implementation of the signer. pub mod v0; -/// The v1 implementation of the singer. This includes WSTS support -pub mod v1; #[cfg(test)] mod tests; diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 4a22c15bb78..d8d159a0868 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -301,7 +301,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo ); return Ok(None); }; - let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { + let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); @@ -310,20 +310,13 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - let key_ids = signer_entries - .signer_key_ids - .get(signer_id) - .cloned() - .unwrap_or_default(); Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, - key_ids, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, - ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), mainnet: self.config.network.is_mainnet(), @@ -608,8 +601,11 @@ mod tests { } let parsed_entries = SignerEntries::parse(false, &signer_entries).unwrap(); - assert_eq!(parsed_entries.signer_ids.len(), nmb_signers); - let mut signer_ids = parsed_entries.signer_ids.into_values().collect::>(); + assert_eq!(parsed_entries.signer_id_to_pk.len(), nmb_signers); + let mut signer_ids = parsed_entries + .signer_id_to_pk + .into_keys() + .collect::>(); signer_ids.sort(); assert_eq!( signer_ids, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fa34cc4b429..7c94ec908c2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -12,7 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; @@ -81,7 +81,7 @@ pub struct Signer { /// The reward cycle this signer belongs to pub reward_cycle: u64, /// Reward set signer addresses and their weights - pub signer_weights: HashMap, + pub signer_weights: HashMap, /// SignerDB for state management pub signer_db: SignerDb, /// Configuration for proposal evaluation @@ -292,40 +292,13 @@ impl From for Signer { SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); - // compute signer addresses *in reward cycle order* - let signer_ids_and_addrs: BTreeMap<_, _> = signer_config - .signer_entries - .signer_ids - .iter() - .map(|(addr, id)| (*id, *addr)) - .collect(); - - let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); - - let signer_weights = signer_addresses - .iter() - .map(|addr| { - let Some(signer_id) = signer_config.signer_entries.signer_ids.get(addr) else { - panic!("Malformed config: no signer ID for {}", addr); - }; - let Some(key_ids) = signer_config.signer_entries.signer_key_ids.get(signer_id) - else { - panic!( - "Malformed config: no key IDs for signer ID {} ({})", - signer_id, addr - ); - }; - (*addr, key_ids.len()) - }) - .collect(); - Self { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_addresses, - signer_weights, + signer_addresses: signer_config.signer_entries.signer_addresses.clone(), + signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, @@ -679,22 +652,17 @@ impl Signer { &self, addrs: impl Iterator, ) -> u32 { - let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { + addrs.fold(0u32, |signing_weight, stacker_address| { let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) - }); - u32::try_from(signing_weight) - .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")) + }) } /// Compute the total signing weight fn compute_signature_total_weight(&self) -> u32 { - let total_weight = self - .signer_weights + self.signer_weights .values() - .fold(0usize, |acc, val| acc.saturating_add(*val)); - u32::try_from(total_weight) - .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) + .fold(0u32, |acc, val| acc.saturating_add(*val)) } /// Handle an observed rejection from another signer diff --git a/stacks-signer/src/v1/coordinator.rs b/stacks-signer/src/v1/coordinator.rs deleted file mode 100644 index 7fc2d238c48..00000000000 --- a/stacks-signer/src/v1/coordinator.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use slog::slog_debug; -use stacks_common::debug; -use stacks_common::types::chainstate::ConsensusHash; -use stacks_common::util::hash::Sha256Sum; -use wsts::curve::ecdsa; -use wsts::state_machine::PublicKeys; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_OPERATION_TIMEOUT_SECS: u64 = 300; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_TENURE_TIMEOUT_SECS: u64 = 600; - -/// The coordinator selector -#[derive(Clone, Debug)] -pub struct CoordinatorSelector { - /// The ordered list of potential coordinators for a specific consensus hash - coordinator_ids: Vec, - /// The current coordinator id - coordinator_id: u32, - /// The current coordinator index into the coordinator ids list - coordinator_index: usize, - /// The last message received time for the current coordinator - pub last_message_time: Option, - /// The time the coordinator started its tenure - tenure_start: Instant, - /// The public keys of the coordinators - public_keys: PublicKeys, -} - -impl From for CoordinatorSelector { - /// Create a new Coordinator selector from the given list of public keys - fn from(public_keys: PublicKeys) -> Self { - let coordinator_ids = - Self::calculate_coordinator_ids(&public_keys, &ConsensusHash::empty()); - let coordinator_id = *coordinator_ids - .first() - .expect("FATAL: No registered signers"); - let coordinator_index = 0; - let last_message_time = None; - let tenure_start = Instant::now(); - Self { - coordinator_ids, - coordinator_id, - coordinator_index, - last_message_time, - tenure_start, - public_keys, - } - } -} - -/// Whether or not to rotate to new coordinators in `update_coordinator` -const ROTATE_COORDINATORS: bool = false; - -impl CoordinatorSelector { - /// Update the coordinator id - fn update_coordinator(&mut self, new_coordinator_ids: Vec) { - self.last_message_time = None; - self.coordinator_index = if new_coordinator_ids != self.coordinator_ids { - // We have advanced our block height and should select from the new list - let mut new_index: usize = 0; - self.coordinator_ids = new_coordinator_ids; - let new_coordinator_id = *self - .coordinator_ids - .first() - .expect("FATAL: No registered signers"); - if ROTATE_COORDINATORS && new_coordinator_id == self.coordinator_id { - // If the newly selected coordinator is the same as the current and we have more than one available, advance immediately to the next - if self.coordinator_ids.len() > 1 { - new_index = new_index.saturating_add(1); - } - } - new_index - } else if ROTATE_COORDINATORS { - self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() - } else { - self.coordinator_index - }; - self.coordinator_id = *self - .coordinator_ids - .get(self.coordinator_index) - .expect("FATAL: Invalid number of registered signers"); - self.tenure_start = Instant::now(); - self.last_message_time = None; - } - - /// Check the coordinator timeouts and update the selected coordinator accordingly - /// Returns the resulting coordinator ID. (Note: it may be unchanged) - pub fn refresh_coordinator(&mut self, pox_consensus_hash: &ConsensusHash) -> u32 { - let new_coordinator_ids = - Self::calculate_coordinator_ids(&self.public_keys, pox_consensus_hash); - if let Some(time) = self.last_message_time { - if time.elapsed().as_secs() > COORDINATOR_OPERATION_TIMEOUT_SECS { - // We have not received a message in a while from this coordinator. - // We should consider the operation finished and use a new coordinator id. - self.update_coordinator(new_coordinator_ids); - } - } else if self.tenure_start.elapsed().as_secs() > COORDINATOR_TENURE_TIMEOUT_SECS - || new_coordinator_ids != self.coordinator_ids - { - // Our tenure has been exceeded or we have advanced our block height and should select from the new list - self.update_coordinator(new_coordinator_ids); - } - self.coordinator_id - } - - /// Get the current coordinator id and public key - pub fn get_coordinator(&self) -> (u32, ecdsa::PublicKey) { - ( - self.coordinator_id, - *self - .public_keys - .signers - .get(&self.coordinator_id) - .expect("FATAL: missing public key for selected coordinator id"), - ) - } - - /// Calculate the ordered list of coordinator ids by comparing the provided public keys - pub fn calculate_coordinator_ids( - public_keys: &PublicKeys, - pox_consensus_hash: &ConsensusHash, - ) -> Vec { - debug!("Using pox_consensus_hash {pox_consensus_hash:?} for selecting coordinator"); - // Create combined hash of each signer's public key with pox_consensus_hash - let mut selection_ids = public_keys - .signers - .iter() - .map(|(&id, pk)| { - let pk_bytes = pk.to_bytes(); - let mut buffer = - Vec::with_capacity(pk_bytes.len() + pox_consensus_hash.as_bytes().len()); - buffer.extend_from_slice(&pk_bytes[..]); - buffer.extend_from_slice(pox_consensus_hash.as_bytes()); - let digest = Sha256Sum::from_data(&buffer).as_bytes().to_vec(); - (id, digest) - }) - .collect::>(); - - // Sort the selection IDs based on the hash - selection_ids.sort_by_key(|(_, hash)| hash.clone()); - // Return only the ids - selection_ids.iter().map(|(id, _)| *id).collect() - } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::client::tests::{generate_random_consensus_hash, generate_signer_config}; - use crate::config::GlobalConfig; - - #[test] - fn calculate_coordinator_different_consensus_hashes_produces_unique_results() { - let number_of_tests = 5; - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - - for _ in 0..number_of_tests { - let result = CoordinatorSelector::calculate_coordinator_ids( - &public_keys, - &generate_random_consensus_hash(), - ); - results.push(result); - } - - // Check that not all coordinator IDs are the same - let all_ids_same = results.iter().all(|ids| ids == &results[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - } - - fn generate_calculate_coordinator_test_results( - random_consensus: bool, - count: usize, - ) -> Vec> { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - let same_hash = generate_random_consensus_hash(); - for _ in 0..count { - let hash = if random_consensus { - generate_random_consensus_hash() - } else { - same_hash - }; - let result = CoordinatorSelector::calculate_coordinator_ids(&public_keys, &hash); - results.push(result); - } - results - } - - #[test] - fn calculate_coordinator_results_should_vary_or_match_based_on_hash() { - let results_with_random_hash = generate_calculate_coordinator_test_results(true, 5); - let all_ids_same = results_with_random_hash - .iter() - .all(|ids| ids == &results_with_random_hash[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - - let results_with_static_hash = generate_calculate_coordinator_test_results(false, 5); - let all_ids_same = results_with_static_hash - .iter() - .all(|ids| ids == &results_with_static_hash[0]); - assert!(all_ids_same, "All coordinator IDs should be the same"); - } -} diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs deleted file mode 100644 index ed1d9800165..00000000000 --- a/stacks-signer/src/v1/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use libsigner::v1::messages::SignerMessage; - -use crate::v1::signer::Signer; - -/// The coordinator selector for the signer -pub mod coordinator; -/// The signer module for processing events -pub mod signer; -/// The stackerdb module for sending messages between signers and miners -pub mod stackerdb_manager; - -/// A v1 spawned signer -pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs deleted file mode 100644 index aa8fcfb0d2f..00000000000 --- a/stacks-signer/src/v1/signer.rs +++ /dev/null @@ -1,1764 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::VecDeque; -use std::fmt::Debug; -use std::path::PathBuf; -use std::sync::mpsc::Sender; -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; -use blockstack_lib::chainstate::stacks::StacksTransaction; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use blockstack_lib::util_lib::db::Error as DBError; -use hashbrown::HashSet; -use libsigner::v1::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, -}; -use libsigner::{BlockProposal, SignerEvent}; -use rand_core::OsRng; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::codec::{read_next, StacksMessageCodec}; -use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_common::{debug, error, info, warn}; -use wsts::common::Signature; -use wsts::curve::keys::PublicKey; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::{Message, NonceRequest, Packet, SignatureShareRequest}; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{ - Config as CoordinatorConfig, Coordinator, State as CoordinatorState, -}; -use wsts::state_machine::signer::Signer as SignerStateMachine; -use wsts::state_machine::{OperationResult, SignError}; -use wsts::traits::Signer as _; -use wsts::v2; - -use super::stackerdb_manager::StackerDBManager; -use crate::chainstate::SortitionsView; -use crate::client::{ClientError, SignerSlotID, StacksClient}; -use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use crate::signerdb::{BlockInfo, NakamotoBlockVote, SignerDb}; -use crate::v1::coordinator::CoordinatorSelector; -use crate::Signer as SignerTrait; - -/// The specific operations that a signer can perform -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum Operation { - /// A DKG operation - Dkg, - /// A Sign operation - Sign, -} - -/// The Signer state -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum State { - /// The signer is uninitialized and should read stackerdb to restore state - Uninitialized, - /// The signer is idle, waiting for messages and commands - Idle, - /// The signer is executing a DKG or Sign round - OperationInProgress(Operation), -} - -/// The stacks signer registered for the reward cycle -#[derive(Debug)] -pub struct Signer { - /// The coordinator for inbound messages for a specific reward cycle - pub coordinator: FireCoordinator, - /// The signing round used to sign messages for a specific reward cycle - pub state_machine: SignerStateMachine, - /// the state of the signer - pub state: State, - /// Received Commands that need to be processed - pub commands: VecDeque, - /// The stackerdb client session manager - pub stackerdb_manager: StackerDBManager, - /// Whether the signer is a mainnet signer or not - pub mainnet: bool, - /// The signer id - pub signer_id: u32, - /// The signer slot ids for the signers in the reward cycle - pub signer_slot_ids: Vec, - /// The addresses of other signers - pub signer_addresses: Vec, - /// The signer slot ids for the signers in the NEXT reward cycle - pub next_signer_slot_ids: Vec, - /// The addresses of the signers for the NEXT reward cycle - pub next_signer_addresses: Vec, - /// The reward cycle this signer belongs to - pub reward_cycle: u64, - /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). - pub tx_fee_ustx: u64, - /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) - /// If None, will not cap the fee. - pub max_tx_fee_ustx: Option, - /// The coordinator info for the signer - pub coordinator_selector: CoordinatorSelector, - /// The approved key registered to the contract - pub approved_aggregate_public_key: Option, - /// The current active miner's key (if we know it!) - pub miner_key: Option, - /// Signer DB path - pub db_path: PathBuf, - /// SignerDB for state management - pub signer_db: SignerDb, -} - -impl std::fmt::Display for Signer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Cycle #{} Signer #{}(C:{})", - self.reward_cycle, - self.signer_id, - self.coordinator_selector.get_coordinator().0, - ) - } -} - -impl SignerTrait for Signer { - /// Create a new signer from the given configuration - fn new(config: SignerConfig) -> Self { - Self::from(config) - } - - /// Return the reward cycle of the signer - fn reward_cycle(&self) -> u64 { - self.reward_cycle - } - - /// Process the event - fn process_event( - &mut self, - stacks_client: &StacksClient, - _sortition_state: &mut Option, - event: Option<&SignerEvent>, - res: &Sender>, - current_reward_cycle: u64, - ) { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogeneous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock { .. }) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), - }; - let other_signer_parity = (self.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { - return; - } - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - debug!("{self}: Processing event: {event:?}"); - let Some(event) = event else { - // No event. Do nothing. - debug!("{self}: No event received"); - return; - }; - match event { - SignerEvent::BlockValidationResponse(block_validate_response) => { - info!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) - } - SignerEvent::SignerMessages(signer_set, messages) => { - if *signer_set != self.stackerdb_manager.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the other signers...", - messages.len() - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::MinerMessages(messages, miner_key) => { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the miner", - messages.len(); - "miner_key" => ?miner_key, - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::StatusCheck => { - debug!("{self}: Received a status check event.") - } - SignerEvent::NewBurnBlock { - burn_height, - burn_header_hash, - received_time, - } => { - info!("{self}: Received a new burn block event for block height {burn_height}"); - if let Err(e) = - self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) - { - error!( - "Failed to write burn block event to signerdb"; - "err" => ?e, - "burn_header_hash" => %burn_header_hash, - "burn_height" => burn_height - ); - panic!("Failed to write burn block event to signerdb"); - } - } - } - } - - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - let reward_cycle = command.reward_cycle; - if self.reward_cycle != reward_cycle { - warn!( - "{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" - ); - } else { - info!( - "{self}: Queuing an external runloop command ({:?}): {command:?}", - self.state_machine.public_keys.signers.get(&self.signer_id) - ); - self.commands.push_back(command.command); - } - } - self.process_next_command(stacks_client, current_reward_cycle); - } - - fn has_unprocessed_blocks(&self) -> bool { - self.signer_db - .has_unprocessed_blocks(self.reward_cycle) - .unwrap_or_else(|e| { - error!("{self}: Failed to check if there are pending blocks: {e:?}"); - // Assume there are pending blocks to prevent premature cleanup - true - }) - } -} - -impl Signer { - /// Attempt to process the next command in the queue, and update state accordingly - fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) { - match &self.state { - State::Uninitialized => { - // We cannot process any commands until we have restored our state - warn!("{self}: Cannot process commands until state is restored. Waiting..."); - } - State::Idle => { - let Some(command) = self.commands.front() else { - debug!("{self}: Nothing to process. Waiting for command..."); - return; - }; - let coordinator_id = if matches!(command, SignerCommand::Dkg) { - // We cannot execute a DKG command if we are not the coordinator - Some(self.get_coordinator_dkg().0) - } else { - self.get_coordinator_sign(current_reward_cycle).0 - }; - if coordinator_id != Some(self.signer_id) { - debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", - ); - return; - } - let command = self - .commands - .pop_front() - .expect("BUG: Already asserted that the command queue was not empty"); - self.execute_command(stacks_client, &command); - } - State::OperationInProgress(op) => { - // We cannot execute the next command until the current one is finished... - debug!( - "{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}", - self.coordinator.state - ); - } - } - } - /// Return the current coordinator. - /// If the current reward cycle is the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. - fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { - if self.reward_cycle == current_reward_cycle { - let Some(ref cur_miner) = self.miner_key else { - error!( - "Signer #{}: Could not lookup current miner while in active reward cycle", - self.signer_id - ); - let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); - }; - // coordinator is the current miner. - (None, *cur_miner) - } else { - let selected = self.coordinator_selector.get_coordinator(); - (Some(selected.0), selected.1) - } - } - - /// Refresh the next signer data from the given configuration data - #[allow(dead_code)] - fn update_signer(&mut self, new_signer_config: &SignerConfig) { - self.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } - - /// Get the current coordinator for executing DKG - /// This will always use the coordinator selector to determine the coordinator - fn get_coordinator_dkg(&self) -> (u32, PublicKey) { - self.coordinator_selector.get_coordinator() - } - - /// Read stackerdb messages in case the signer was started late or restarted and missed incoming DKG messages - pub fn read_dkg_stackerdb_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - if self.state != State::Uninitialized { - // We should only read stackerdb if we are uninitialized - return Ok(()); - } - let ordered_packets = self - .stackerdb_manager - .get_dkg_packets(&self.signer_slot_ids)? - .iter() - .filter_map(|packet| { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - debug!( - "{self}: Received a non-DKG message in the DKG message queue. Ignoring it." - ); - return None; - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - }) - .collect::>(); - // We successfully read stackerdb so we are no longer uninitialized - self.state = State::Idle; - debug!( - "{self}: Processing {} DKG messages from stackerdb: {ordered_packets:?}", - ordered_packets.len() - ); - self.handle_packets(stacks_client, res, &ordered_packets, current_reward_cycle); - Ok(()) - } -} - -impl From for Signer { - fn from(signer_config: SignerConfig) -> Self { - let mut stackerdb_manager = StackerDBManager::from(&signer_config); - - let num_signers = signer_config - .signer_entries - .count_signers() - .expect("FATAL: Too many registered signers to fit in a u32"); - let num_keys = signer_config - .signer_entries - .count_keys() - .expect("FATAL: Too many key ids to fit in a u32"); - let threshold = signer_config - .signer_entries - .get_signing_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - let dkg_threshold = signer_config - .signer_entries - .get_dkg_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - - let coordinator_config = CoordinatorConfig { - threshold, - dkg_threshold, - num_signers, - num_keys, - message_private_key: signer_config.ecdsa_private_key, - dkg_public_timeout: signer_config.dkg_public_timeout, - dkg_private_timeout: signer_config.dkg_private_timeout, - dkg_end_timeout: signer_config.dkg_end_timeout, - nonce_timeout: signer_config.nonce_timeout, - sign_timeout: signer_config.sign_timeout, - signer_key_ids: signer_config.signer_entries.coordinator_key_ids, - signer_public_keys: signer_config.signer_entries.signer_public_keys, - }; - - let coordinator = FireCoordinator::new(coordinator_config); - let coordinator_selector = - CoordinatorSelector::from(signer_config.signer_entries.public_keys.clone()); - - debug!( - "Reward cycle #{} Signer #{}: initial coordinator is signer {}", - signer_config.reward_cycle, - signer_config.signer_id, - coordinator_selector.get_coordinator().0 - ); - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - - let mut state_machine = SignerStateMachine::new( - threshold, - num_signers, - num_keys, - signer_config.signer_id, - signer_config.key_ids, - signer_config.ecdsa_private_key, - signer_config.signer_entries.public_keys, - ); - - if let Some(state) = load_encrypted_signer_state( - &mut stackerdb_manager, - signer_config.signer_slot_id, - &state_machine.network_private_key, - ).or_else(|err| { - warn!("Failed to load encrypted signer state from StackerDB, falling back to SignerDB: {err}"); - load_encrypted_signer_state( - &signer_db, - signer_config.reward_cycle, - &state_machine.network_private_key) - }).expect("Failed to load encrypted signer state from both StackerDB and SignerDB") { - state_machine.signer = state; - }; - - Self { - coordinator, - state_machine, - state: State::Uninitialized, - commands: VecDeque::new(), - stackerdb_manager, - mainnet: signer_config.mainnet, - signer_id: signer_config.signer_id, - signer_addresses: signer_config - .signer_entries - .signer_ids - .into_keys() - .collect(), - signer_slot_ids: signer_config.signer_slot_ids.clone(), - next_signer_slot_ids: vec![], - next_signer_addresses: vec![], - reward_cycle: signer_config.reward_cycle, - tx_fee_ustx: signer_config.tx_fee_ustx, - max_tx_fee_ustx: signer_config.max_tx_fee_ustx, - coordinator_selector, - approved_aggregate_public_key: None, - miner_key: None, - db_path: signer_config.db_path, - signer_db, - } - } -} - -impl Signer { - /// Refresh the coordinator selector - pub fn refresh_coordinator(&mut self) { - // TODO: do not use an empty consensus hash - let pox_consensus_hash = ConsensusHash::empty(); - let old_coordinator_id = self.coordinator_selector.get_coordinator().0; - let updated_coordinator_id = self - .coordinator_selector - .refresh_coordinator(&pox_consensus_hash); - if old_coordinator_id != updated_coordinator_id { - debug!( - "{self}: Coordinator updated. Resetting state to Idle."; - "old_coordinator_id" => {old_coordinator_id}, - "updated_coordinator_id" => {updated_coordinator_id}, - "pox_consensus_hash" => %pox_consensus_hash - ); - self.coordinator.state = CoordinatorState::Idle; - self.state = State::Idle; - } - } - - /// Finish an operation and update the coordinator selector accordingly - fn finish_operation(&mut self) { - self.state = State::Idle; - self.coordinator_selector.last_message_time = None; - } - - /// Update operation - fn update_operation(&mut self, operation: Operation) { - self.state = State::OperationInProgress(operation); - self.coordinator_selector.last_message_time = Some(Instant::now()); - } - - /// Execute the given command and update state accordingly - fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) { - match command { - SignerCommand::Dkg => { - crate::monitoring::increment_commands_processed("dkg"); - if self.approved_aggregate_public_key.is_some() { - debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); - return; - } - let vote_round = match stacks_client.get_last_round(self.reward_cycle) { - Ok(last_round) => last_round, - Err(e) => { - error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); - return; - } - }; - // The dkg id will increment internally following "start_dkg_round" so do not increment it here - self.coordinator.current_dkg_id = vote_round.unwrap_or(0); - info!( - "{self}: Starting DKG vote"; - "round" => self.coordinator.current_dkg_id.wrapping_add(1), - "cycle" => self.reward_cycle, - ); - match self.coordinator.start_dkg_round() { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - self.update_operation(Operation::Dkg); - } - Err(e) => { - error!("{self}: Failed to start DKG: {e:?}",); - return; - } - } - self.update_operation(Operation::Dkg); - } - SignerCommand::Sign { - block_proposal, - is_taproot, - merkle_root, - } => { - crate::monitoring::increment_commands_processed("sign"); - if self.approved_aggregate_public_key.is_none() { - debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); - return; - } - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let mut block_info = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) - .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); - if block_info.signed_over { - debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); - return; - } - info!("{self}: Signing block"; - "block_consensus_hash" => %block_proposal.block.header.consensus_hash, - "block_height" => block_proposal.block.header.chain_length, - "pre_sign_block_id" => %block_proposal.block.block_id(), - ); - match self.coordinator.start_signing_round( - &block_proposal.serialize_to_vec(), - *is_taproot, - *merkle_root, - ) { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - block_info.signed_over = true; - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - self.update_operation(Operation::Sign); - } - Err(e) => { - error!("{self}: Failed to start signing block: {e:?}",); - return; - } - } - self.update_operation(Operation::Sign); - } - } - } - - /// Handle the block validate response returned from our prior calls to submit a block for validation - fn handle_block_validate_response( - &mut self, - stacks_client: &StacksClient, - block_validate_response: &BlockValidateResponse, - res: &Sender>, - current_reward_cycle: u64, - ) { - let mut block_info = match block_validate_response { - BlockValidateResponse::Ok(block_validate_ok) => { - crate::monitoring::increment_block_validation_responses(true); - let signer_signature_hash = block_validate_ok.signer_signature_hash; - // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } - }; - let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); - block_info.valid = Some(is_valid); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - info!( - "{self}: Treating block validation for block {} as valid: {:?}", - &block_info.block.block_id(), - block_info.valid - ); - block_info - } - BlockValidateResponse::Reject(block_validate_reject) => { - crate::monitoring::increment_block_validation_responses(false); - let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } - }; - block_info.valid = Some(false); - // Submit a rejection response to the .signers contract for miners - // to observe so they know to send another block and to prove signers are doing work); - warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_validate_reject.clone().into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - block_info - } - }; - if let Some(mut nonce_request) = block_info.ext.take_nonce_request() { - debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); - // We have received validation from the stacks node. Determine our vote and update the request message - self.determine_vote(&mut block_info, &mut nonce_request); - // Send the nonce request through with our vote - let packet = Packet { - msg: Message::NonceRequest(nonce_request), - sig: vec![], - }; - self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); - } - info!( - "{self}: Received a block validate response"; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - ); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - } - - /// Handle signer messages submitted to signers stackerdb - fn handle_signer_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - messages: &[SignerMessage], - current_reward_cycle: u64, - ) { - let packets: Vec = messages - .iter() - .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::EncryptedSignerState(_) - | SignerMessage::Transactions(_) => None, - // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. - SignerMessage::Packet(packet) => { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - self.get_coordinator_sign(current_reward_cycle).1 - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - } - }) - .collect(); - self.handle_packets(stacks_client, res, &packets, current_reward_cycle); - } - - /// Helper function for determining if the provided message is a DKG specific message - fn is_dkg_message(msg: &Message) -> bool { - matches!( - msg, - Message::DkgBegin(_) - | Message::DkgEnd(_) - | Message::DkgEndBegin(_) - | Message::DkgPrivateBegin(_) - | Message::DkgPrivateShares(_) - | Message::DkgPublicShares(_) - ) - } - - /// Process inbound packets as both a signer and a coordinator - /// Will send outbound packets and operation results as appropriate - fn handle_packets( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - packets: &[Packet], - current_reward_cycle: u64, - ) { - if let Ok(packets_len) = packets.len().try_into() { - crate::monitoring::increment_inbound_packets(packets_len); - } - let signer_outbound_messages = self - .state_machine - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a signer: {e:?}",); - vec![] - }); - - // Next process the message as the coordinator - let (coordinator_outbound_messages, operation_results) = if self.reward_cycle - != current_reward_cycle - { - self.coordinator - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a coordinator: {e:?}"); - (vec![], vec![]) - }) - } else { - (vec![], vec![]) - }; - - if !operation_results.is_empty() { - // We have finished a signing or DKG round, either successfully or due to error. - // Regardless of the why, update our state to Idle as we should not expect the operation to continue. - self.process_operation_results(stacks_client, &operation_results); - self.send_operation_results(res, operation_results); - self.finish_operation(); - } else if !packets.is_empty() { - // We have received a message. Update our state accordingly - // Let us be extra explicit in case a new state type gets added to wsts' state machine - match &self.coordinator.state { - CoordinatorState::Idle => {} - CoordinatorState::DkgPublicDistribute - | CoordinatorState::DkgPublicGather - | CoordinatorState::DkgPrivateDistribute - | CoordinatorState::DkgPrivateGather - | CoordinatorState::DkgEndDistribute - | CoordinatorState::DkgEndGather => { - self.update_operation(Operation::Dkg); - } - CoordinatorState::NonceRequest(_, _) - | CoordinatorState::NonceGather(_, _) - | CoordinatorState::SigShareRequest(_, _) - | CoordinatorState::SigShareGather(_, _) => { - self.update_operation(Operation::Sign); - } - } - } - - if packets - .iter() - .any(|packet| matches!(packet.msg, Message::DkgEnd(_))) - { - debug!("{self}: Saving signer state"); - self.save_signer_state() - .unwrap_or_else(|_| panic!("{self}: Failed to save signer state")); - } - self.send_outbound_messages(signer_outbound_messages); - self.send_outbound_messages(coordinator_outbound_messages); - } - - /// Validate a signature share request, updating its message where appropriate. - /// If the request is for a block it has already agreed to sign, it will overwrite the message with the agreed upon value - /// Returns whether the request is valid or not. - fn validate_signature_share_request(&self, request: &mut SignatureShareRequest) -> bool { - let Some(block_vote): Option = read_next(&mut &request.message[..]).ok() - else { - // We currently reject anything that is not a block vote - debug!( - "{self}: Received a signature share request for an unknown message stream. Reject it.", - ); - return false; - }; - - match self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to DB")) - .map(|b| b.vote) - { - Some(Some(vote)) => { - // Overwrite with our agreed upon value in case another message won majority or the coordinator is trying to cheat... - debug!( - "{self}: Set vote (rejected = {}) to {vote:?}", block_vote.rejected; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - request.message = vote.serialize_to_vec(); - true - } - Some(None) => { - // We never agreed to sign this block. Reject it. - // This can happen if the coordinator received enough votes to sign yes - // or no on a block before we received validation from the stacks node. - debug!( - "{self}: Received a signature share request for a block we never agreed to sign. Ignore it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - None => { - // We will only sign across block hashes or block hashes + b'n' byte for - // blocks we have seen a Nonce Request for (and subsequent validation) - // We are missing the context here necessary to make a decision. Reject the block - debug!( - "{self}: Received a signature share request from an unknown block. Reject it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - } - } - - /// Validate a nonce request, updating its message appropriately. - /// If the request is for a block, we will update the request message - /// as either a hash indicating a vote no or the signature hash indicating a vote yes - /// Returns whether the request is valid or not - fn validate_nonce_request( - &mut self, - stacks_client: &StacksClient, - nonce_request: &mut NonceRequest, - ) -> Option { - let Some(block_proposal) = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() - else { - // We currently reject anything that is not a valid block proposal - warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); - return None; - }; - if block_proposal.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Reject the block - warn!( - "{self}: Received a nonce request for a different reward cycle. Reject it."; - "requested_reward_cycle" => block_proposal.reward_cycle, - ); - return None; - } - // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let Some(mut block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .expect("Failed to connect to signer DB") - else { - debug!( - "{self}: received a nonce request for a new block. Submit block for validation. "; - "signer_sighash" => %signer_signature_hash, - ); - let block_info = BlockInfo::new_v1_with_request(block_proposal, nonce_request.clone()); - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); - }); - return Some(block_info); - }; - - if block_info.valid.is_none() { - // We have not yet received validation from the stacks node. Cache the request and wait for validation - debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); - block_info - .ext - .set_nonce_request(nonce_request.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to set nonce_request: {e:?}",); - }); - return Some(block_info); - } - - self.determine_vote(&mut block_info, nonce_request); - Some(block_info) - } - - /// Verify the transactions in a block are as expected - fn verify_block_transactions( - &mut self, - stacks_client: &StacksClient, - block: &NakamotoBlock, - ) -> bool { - let next_reward_cycle = self.reward_cycle.wrapping_add(1); - let approved_aggregate_public_key = stacks_client - .get_approved_aggregate_key(next_reward_cycle) - .unwrap_or(None); - if approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle - // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. - debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); - return true; - } - if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { - //It might be worth building a hashset of the blocks' txids and checking that against the expected transaction's txid. - let block_tx_hashset = block.txs.iter().map(|tx| tx.txid()).collect::>(); - // Ensure the block contains the transactions we expect - let missing_transactions = expected_transactions - .into_iter() - .filter_map(|tx| { - if !block_tx_hashset.contains(&tx.txid()) { - debug!("{self}: expected txid {} is in the block", &tx.txid()); - Some(tx) - } else { - debug!("{self}: missing expected txid {}", &tx.txid()); - None - } - }) - .collect::>(); - let is_valid = missing_transactions.is_empty(); - if !is_valid { - debug!("{self}: Broadcasting a block rejection due to missing expected transactions..."); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::MissingTransactions(missing_transactions), - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } - is_valid - } else { - // Failed to connect to the stacks node to get transactions. Cannot validate the block. Reject it. - debug!("{self}: Broadcasting a block rejection due to signer connectivity issues...",); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - false - } - } - - /// Get transactions from stackerdb for the given addresses and account nonces, filtering out any malformed transactions - fn get_signer_transactions( - &mut self, - nonces: &std::collections::HashMap, - ) -> Result, ClientError> { - let transactions: Vec<_> = self - .stackerdb_manager - .get_current_transactions()? - .into_iter() - .filter_map(|tx| { - if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { - return None; - } - Some(tx) - }) - .collect(); - Ok(transactions) - } - - /// Get the transactions that should be included in the block, filtering out any invalid transactions - fn get_expected_transactions( - &mut self, - stacks_client: &StacksClient, - ) -> Result, ClientError> { - if self.next_signer_slot_ids.is_empty() { - debug!("{self}: No next signers. Skipping transaction retrieval.",); - return Ok(vec![]); - } - // Get all the account nonces for the next signers - let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); - let transactions: Vec<_> = self - .stackerdb_manager - .get_next_transactions(&self.next_signer_slot_ids)?; - let mut filtered_transactions = std::collections::HashMap::new(); - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.mainnet, - transactions, - ); - // We only allow enforcement of one special cased transaction per signer address per block - Ok(filtered_transactions.into_values().collect()) - } - - /// Determine the vote for a block and update the block info and nonce request accordingly - fn determine_vote(&self, block_info: &mut BlockInfo, nonce_request: &mut NonceRequest) { - let rejected = !block_info.valid.unwrap_or(false); - if rejected { - debug!("{self}: Rejecting block {}", block_info.block.block_id()); - } else { - debug!("{self}: Accepting block {}", block_info.block.block_id()); - } - let block_vote = NakamotoBlockVote { - signer_signature_hash: block_info.block.header.signer_signature_hash(), - rejected: !block_info.valid.unwrap_or(false), - }; - let block_vote_bytes = block_vote.serialize_to_vec(); - // Cache our vote - block_info.vote = Some(block_vote); - nonce_request.message = block_vote_bytes; - } - - /// Verify a chunk is a valid wsts packet. Returns the packet if it is valid, else None. - /// NOTE: The packet will be updated if the signer wishes to respond to NonceRequest - /// and SignatureShareRequests with a different message than what the coordinator originally sent. - /// This is done to prevent a malicious coordinator from sending a different message than what was - /// agreed upon and to support the case where the signer wishes to reject a block by voting no - fn verify_packet( - &mut self, - stacks_client: &StacksClient, - mut packet: Packet, - coordinator_public_key: &PublicKey, - ) -> Option { - // We only care about verified wsts packets. Ignore anything else. - if packet.verify(&self.state_machine.public_keys, coordinator_public_key) { - match &mut packet.msg { - Message::SignatureShareRequest(request) => { - if !self.validate_signature_share_request(request) { - return None; - } - } - Message::NonceRequest(request) => { - let Some(updated_block_info) = - self.validate_nonce_request(stacks_client, request) - else { - warn!("Failed to validate and parse nonce request"); - return None; - }; - self.signer_db - .insert_block(&updated_block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let process_request = updated_block_info.vote.is_some(); - if !process_request { - debug!("Failed to validate nonce request"); - return None; - } - } - _ => { - // Nothing to do for other message types - } - } - Some(packet) - } else { - debug!( - "{self}: Failed to verify wsts packet with {}: {packet:?}", - coordinator_public_key - ); - None - } - } - - /// Processes the operation results, broadcasting block acceptance or rejection messages - /// and DKG vote results accordingly - fn process_operation_results( - &mut self, - stacks_client: &StacksClient, - operation_results: &[OperationResult], - ) { - for operation_result in operation_results { - // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results - match operation_result { - OperationResult::Sign(signature) => { - crate::monitoring::increment_operation_results("sign"); - info!("{self}: Received signature result"); - self.process_signature(signature); - } - OperationResult::SignTaproot(_) => { - crate::monitoring::increment_operation_results("sign_taproot"); - debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); - } - OperationResult::Dkg(aggregate_key) => { - crate::monitoring::increment_operation_results("dkg"); - self.process_dkg(stacks_client, aggregate_key); - } - OperationResult::SignError(e) => { - crate::monitoring::increment_operation_results("sign_error"); - warn!("{self}: Received a Sign error: {e:?}"); - self.process_sign_error(e); - } - OperationResult::DkgError(e) => { - crate::monitoring::increment_operation_results("dkg_error"); - warn!("{self}: Received a DKG error: {e:?}"); - // TODO: process these errors and track malicious signers to report - } - } - } - } - - /// Process a dkg result by broadcasting a vote to the stacks node - fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { - let mut dkg_results_bytes = vec![]; - debug!( - "{self}: Received DKG result. Broadcasting vote to the stacks node..."; - "dkg_public_key" => %dkg_public_key - ); - if let Err(e) = SignerMessage::serialize_dkg_result( - &mut dkg_results_bytes, - dkg_public_key, - self.coordinator.party_polynomials.iter(), - ) { - error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } else if let Err(e) = self - .stackerdb_manager - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } - - // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance - let signer_address = stacks_client.get_signer_address(); - // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about - let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); - let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = self - .get_signer_transactions(&account_nonces) - .map_err(|e| { - error!("{self}: Unable to get signer transactions: {e:?}."); - }) - .unwrap_or_default(); - // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce - let next_nonce = signer_transactions - .first() - .map(|tx| tx.get_origin_nonce().wrapping_add(1)) - .unwrap_or(*account_nonce); - let epoch = stacks_client - .get_node_epoch() - .unwrap_or(StacksEpochId::Epoch24); - match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { - Ok(new_transaction) => { - if let Err(e) = self.broadcast_dkg_vote( - stacks_client, - epoch, - signer_transactions, - new_transaction, - ) { - warn!( - "{self}: Failed to broadcast DKG public key vote ({dkg_public_key:?}): {e:?}" - ); - } - } - Err(e) => { - warn!( - "{self}: Failed to build DKG public key vote ({dkg_public_key:?}) transaction: {e:?}." - ); - } - } - } - - /// Build a signed DKG vote transaction - fn build_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: &StacksEpochId, - nonce: u64, - dkg_public_key: Point, - ) -> Result { - let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( - self.stackerdb_manager.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - dkg_public_key, - self.reward_cycle, - nonce, - )?; - let tx_fee = if epoch < &StacksEpochId::Epoch30 { - info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - let fee = if let Some(max_fee) = self.max_tx_fee_ustx { - let estimated_fee = stacks_client - .get_medium_estimated_fee_ustx(&unsigned_tx) - .map_err(|e| { - warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); - e - }) - .unwrap_or(self.tx_fee_ustx); - std::cmp::min(estimated_fee, max_fee) - } else { - self.tx_fee_ustx - }; - debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); - fee - } else { - 0 - }; - unsigned_tx.set_tx_fee(tx_fee); - stacks_client.sign_transaction(unsigned_tx) - } - - // Get the account nonces for the provided list of signer addresses - fn get_account_nonces( - &self, - stacks_client: &StacksClient, - signer_addresses: &[StacksAddress], - ) -> std::collections::HashMap { - let mut account_nonces = std::collections::HashMap::with_capacity(signer_addresses.len()); - for address in signer_addresses { - let Ok(account_nonce) = stacks_client.get_account_nonce(address) else { - warn!("{self}: Unable to get account nonce for address: {address}."); - continue; - }; - account_nonces.insert(*address, account_nonce); - } - account_nonces - } - - /// broadcast the dkg vote transaction according to the current epoch - fn broadcast_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: StacksEpochId, - mut signer_transactions: Vec, - new_transaction: StacksTransaction, - ) -> Result<(), ClientError> { - let txid = new_transaction.txid(); - if self.approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set - info!( - "{self}: Already has an approved aggregate key. Do not broadcast the transaction ({txid:?})." - ); - return Ok(()); - } - if epoch >= StacksEpochId::Epoch30 { - debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); - } else if epoch == StacksEpochId::Epoch25 { - debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); - stacks_client.submit_transaction(&new_transaction)?; - info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); - } else { - debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); - return Ok(()); - } - // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe - signer_transactions.push(new_transaction); - let signer_message = SignerMessage::Transactions(signer_transactions); - self.stackerdb_manager - .send_message_with_retry(signer_message)?; - crate::monitoring::increment_dkg_votes_submitted(); - info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); - Ok(()) - } - - /// Process a signature from a signing round by deserializing the signature and - /// broadcasting an appropriate Reject or Approval message to stackerdb - fn process_signature(&mut self, signature: &Signature) { - // Deserialize the signature result and broadcast an appropriate Reject or Approval message to stackerdb - let message = self.coordinator.get_message(); - let Some(block_vote): Option = read_next(&mut &message[..]).ok() else { - debug!("{self}: Received a signature result for a non-block. Nothing to broadcast."); - return; - }; - - let block_submission = if block_vote.rejected { - crate::monitoring::increment_block_responses_sent(false); - // We signed a rejection message. Return a rejection message - BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) - } else { - crate::monitoring::increment_block_responses_sent(true); - // we agreed to sign the block hash. Return an approval message - BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) - }; - - // Submit signature result to miners to observe - info!("{self}: Submit block response: {block_submission}"); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_submission.into()) - { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); - } - } - - /// Process a sign error from a signing round, broadcasting a rejection message to stackerdb accordingly - fn process_sign_error(&mut self, e: &SignError) { - let message = self.coordinator.get_message(); - // We do not sign across blocks, but across their hashes. however, the first sign request is always across the block - // so we must handle this case first - - let block: NakamotoBlock = read_next(&mut &message[..]).ok().unwrap_or({ - // This is not a block so maybe its across its hash - let Some(block_vote): Option = read_next(&mut &message[..]).ok() - else { - // This is not a block vote either. We cannot process this error - debug!( - "{self}: Received a signature error for a non-block. Nothing to broadcast." - ); - return; - }; - let Some(block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to signer DB")) - else { - debug!( - "{self}: Received a signature result for a block we have not seen before. Ignoring..." - ); - return; - }; - block_info.block - }); - let block_rejection = - BlockRejection::new(block.header.signer_signature_hash(), RejectCode::from(e)); - debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); - } - } - - /// Persist signer state in both SignerDB and StackerDB - fn save_signer_state(&mut self) -> Result<(), PersistenceError> { - let rng = &mut OsRng; - - let state = self.state_machine.signer.save(); - let serialized_state = serde_json::to_vec(&state)?; - - let encrypted_state = encrypt( - &self.state_machine.network_private_key, - &serialized_state, - rng, - )?; - - let signerdb_result = self.save_signer_state_in_signerdb(&encrypted_state); - let stackerdb_result = self.save_signer_state_in_stackerdb(encrypted_state); - - if let Err(err) = &signerdb_result { - warn!("{self}: Failed to persist state in SignerDB: {err}"); - } - - if let Err(err) = &stackerdb_result { - warn!("{self}: Failed to persist state in StackerDB: {err}"); - - stackerdb_result - } else { - signerdb_result - } - } - - /// Persist signer state in SignerDB - fn save_signer_state_in_signerdb( - &self, - encrypted_state: &[u8], - ) -> Result<(), PersistenceError> { - self.signer_db - .insert_encrypted_signer_state(self.reward_cycle, encrypted_state)?; - Ok(()) - } - - /// Persist signer state in StackerDB - /// TODO: this is a no-op until the number of signer slots can be expanded - fn save_signer_state_in_stackerdb( - &mut self, - _encrypted_state: Vec, - ) -> Result<(), PersistenceError> { - /* - * This is a no-op until the number of signer slots can be expanded to 14 - * - let message = SignerMessage::EncryptedSignerState(encrypted_state); - self.stackerdb_manager.send_message_with_retry(message)?; - */ - Ok(()) - } - - /// Send any operation results across the provided channel - fn send_operation_results( - &mut self, - res: &Sender>, - operation_results: Vec, - ) { - let nmb_results = operation_results.len(); - match res.send(operation_results.into_iter().map(|r| r.into()).collect()) { - Ok(_) => { - debug!("{self}: Successfully sent {nmb_results} operation result(s)") - } - Err(e) => { - warn!("{self}: Failed to send {nmb_results} operation results: {e:?}"); - } - } - } - - /// Sending all provided packets through stackerdb with a retry - fn send_outbound_messages(&mut self, outbound_messages: Vec) { - debug!( - "{self}: Sending {} messages to other stacker-db instances.", - outbound_messages.len() - ); - for msg in outbound_messages { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - if let Ok(ack) = ack { - debug!("{self}: send outbound ACK: {ack:?}"); - } else { - warn!("{self}: Failed to send message to stacker-db instance: {ack:?}"); - } - } - } - - /// Refresh DKG and queue it if required - pub fn refresh_dkg( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - // First attempt to retrieve the aggregate key from the contract. - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - // Check stackerdb for any missed DKG messages to catch up our state. - self.read_dkg_stackerdb_messages(stacks_client, res, current_reward_cycle)?; - // Check if we should still queue DKG - if !self.should_queue_dkg(stacks_client)? { - return Ok(()); - } - // Because there could be a slight delay in reading pending transactions and a key being approved by the contract, - // check one last time if the approved key was set since we finished the should queue dkg call - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - if self.commands.front() != Some(&SignerCommand::Dkg) { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(SignerCommand::Dkg); - } else { - debug!("{self}: DKG command already queued..."); - } - Ok(()) - } - - /// Overwrites the approved aggregate key to the value in the contract, updating state accordingly - pub fn update_approved_aggregate_key( - &mut self, - stacks_client: &StacksClient, - ) -> Result<(), ClientError> { - let old_dkg = self.approved_aggregate_public_key; - self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(self.reward_cycle)?; - if self.approved_aggregate_public_key.is_some() { - // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. - // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and - // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. - let internal_dkg = self.coordinator.aggregate_public_key; - if internal_dkg != self.approved_aggregate_public_key { - warn!("{self}: we do not support changing the internal DKG key yet. Expected {internal_dkg:?} got {:?}", self.approved_aggregate_public_key); - } - self.coordinator - .set_aggregate_public_key(self.approved_aggregate_public_key); - if old_dkg != self.approved_aggregate_public_key { - warn!( - "{self}: updated DKG value from {old_dkg:?} to {:?}.", - self.approved_aggregate_public_key - ); - } - match self.state { - State::OperationInProgress(Operation::Dkg) => { - debug!( - "{self}: DKG has already been set. Aborting DKG operation {}.", - self.coordinator.current_dkg_id - ); - self.finish_operation(); - } - State::Uninitialized => { - // If we successfully load the DKG value, we are fully initialized - self.state = State::Idle; - } - _ => { - // do nothing - } - } - } - Ok(()) - } - - /// Should DKG be queued to the current signer's command queue - /// This assumes that no key has been approved by the contract yet - pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { - if self.state != State::Idle - || self.signer_id != self.get_coordinator_dkg().0 - || self.commands.front() == Some(&SignerCommand::Dkg) - { - // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG - return Ok(false); - } - let signer_address = stacks_client.get_signer_address(); - let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); - let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { - warn!("{self}: Failed to get old signer transactions: {e:?}. May trigger DKG unnecessarily"); - }).unwrap_or_default(); - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - let params = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: {self}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}")); - if Some(params.aggregate_key) == self.coordinator.aggregate_public_key - && params.voting_round == self.coordinator.current_dkg_id - { - debug!("{self}: Not triggering a DKG round. Already have a pending vote transaction."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round - ); - return Ok(false); - } - } - if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( - self.coordinator.current_dkg_id, - self.reward_cycle, - *signer_address, - )? { - let Some(round_weight) = stacks_client - .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? - else { - // This only will happen if somehow we registered as a signer and were granted no weight which should not really ever happen. - error!("{self}: already voted for DKG, but no round vote weight found. We either have no voting power or the contract is corrupted."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key - ); - return Ok(false); - }; - let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; - if round_weight < threshold_weight { - // The threshold weight has not been met yet. We should wait for more votes to arrive. - // TODO: this should be on a timeout of some kind. We should not wait forever for the threshold to be met. - // See https://github.com/stacks-network/stacks-core/issues/4568 - debug!("{self}: Not triggering a DKG round. Weight threshold has not been met yet. Waiting for more votes to arrive."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - return Ok(false); - } - } else { - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes - let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); - let old_transactions = self.stackerdb_manager.get_current_transactions()?; - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - // We should not consider other signer transactions and should ignore invalid transaction versions - if transaction.origin_address() != *signer_address - || transaction.is_mainnet() != self.mainnet - { - continue; - } - let Some(params) = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) - else { - continue; - }; - let Some(dkg_public_key) = self.coordinator.aggregate_public_key else { - break; - }; - if params.aggregate_key == dkg_public_key - && params.voting_round == self.coordinator.current_dkg_id - && params.reward_cycle == self.reward_cycle - { - let origin_nonce = transaction.get_origin_nonce(); - if origin_nonce < account_nonce { - // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction - warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); - self.process_dkg(stacks_client, &dkg_public_key); - } else { - debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round, - "reward_cycle" => params.reward_cycle, - "nonce" => origin_nonce - ); - } - return Ok(false); - } - } - } - Ok(true) - } -} - -fn load_encrypted_signer_state( - storage: S, - id: S::IdType, - private_key: &Scalar, -) -> Result, PersistenceError> { - if let Some(encrypted_state) = storage.get_encrypted_signer_state(id)? { - let serialized_state = decrypt(private_key, &encrypted_state)?; - let state = serde_json::from_slice(&serialized_state) - .expect("Failed to deserialize decryoted state"); - Ok(Some(v2::Signer::load(&state))) - } else { - Ok(None) - } -} - -trait SignerStateStorage { - type IdType; - - fn get_encrypted_signer_state( - self, - signer_config: Self::IdType, - ) -> Result>, PersistenceError>; -} - -impl SignerStateStorage for &mut StackerDBManager { - type IdType = SignerSlotID; - - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -impl SignerStateStorage for &SignerDb { - type IdType = u64; - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -fn encrypt( - private_key: &Scalar, - msg: &[u8], - rng: &mut impl rand_core::CryptoRngCore, -) -> Result, EncryptionError> { - wsts::util::encrypt(derive_encryption_key(private_key).as_bytes(), msg, rng) - .map_err(|_| EncryptionError::Encrypt) -} - -fn decrypt(private_key: &Scalar, encrypted_msg: &[u8]) -> Result, EncryptionError> { - wsts::util::decrypt(derive_encryption_key(private_key).as_bytes(), encrypted_msg) - .map_err(|_| EncryptionError::Decrypt) -} - -fn derive_encryption_key(private_key: &Scalar) -> Sha512Trunc256Sum { - let mut prefixed_key = "SIGNER_STATE_ENCRYPTION_KEY/".as_bytes().to_vec(); - prefixed_key.extend_from_slice(&private_key.to_bytes()); - - Sha512Trunc256Sum::from_data(&prefixed_key) -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum PersistenceError { - /// Encryption error - #[error("{0}")] - Encryption(#[from] EncryptionError), - /// Database error - #[error("Database operation failed: {0}")] - DBError(#[from] DBError), - /// Serialization error - #[error("JSON serialization failed: {0}")] - JsonSerializationError(#[from] serde_json::Error), - /// StackerDB client error - #[error("StackerDB client error: {0}")] - StackerDBClientError(#[from] ClientError), -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum EncryptionError { - /// Encryption failed - #[error("Encryption operation failed")] - Encrypt, - /// Decryption failed - #[error("Encryption operation failed")] - Decrypt, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn encrypted_messages_should_be_possible_to_decrypt() { - let msg = "Nobody's gonna know".as_bytes(); - let key = Scalar::random(&mut OsRng); - - let encrypted = encrypt(&key, msg, &mut OsRng).unwrap(); - - assert_ne!(encrypted, msg); - - let decrypted = decrypt(&key, &encrypted).unwrap(); - - assert_eq!(decrypted, msg); - } -} diff --git a/stacks-signer/src/v1/stackerdb_manager.rs b/stacks-signer/src/v1/stackerdb_manager.rs deleted file mode 100644 index cf5e4840225..00000000000 --- a/stacks-signer/src/v1/stackerdb_manager.rs +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -// -use blockstack_lib::chainstate::stacks::StacksTransaction; -use clarity::types::chainstate::StacksPrivateKey; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; -use libstackerdb::StackerDBChunkAckData; -use slog::{slog_debug, slog_error, slog_warn}; -use stacks_common::codec::read_next; -use stacks_common::{debug, error, warn}; -use wsts::net::Packet; - -use crate::client::stackerdb::StackerDB; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; -use crate::config::SignerConfig; - -/// The session manager for communicating with the .signers contracts for the current and next reward cycle -#[derive(Debug)] -pub struct StackerDBManager { - /// The stacker-db transaction msg session for the NEXT reward cycle - next_transaction_session: StackerDBSession, - /// The stacker-db sessions for each signer set and message type. - stackerdb: StackerDB, -} - -impl From<&SignerConfig> for StackerDBManager { - fn from(config: &SignerConfig) -> Self { - let stackerdb = StackerDB::from(config); - let next_transaction_session = StackerDBSession::new( - &config.node_host, - MessageSlotID::Transactions - .stacker_db_contract(config.mainnet, config.reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } -} -impl StackerDBManager { - /// Create a new StackerDB Manager - pub fn new( - host: &str, - stacks_private_key: StacksPrivateKey, - is_mainnet: bool, - reward_cycle: u64, - signer_slot_id: SignerSlotID, - ) -> Self { - let stackerdb = StackerDB::new( - host, - stacks_private_key, - is_mainnet, - reward_cycle, - signer_slot_id, - ); - let next_transaction_session = StackerDBSession::new( - host, - MessageSlotID::Transactions - .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } - - /// Send a message to the stackerdb with retry - pub fn send_message_with_retry( - &mut self, - message: SignerMessage, - ) -> Result { - self.stackerdb.send_message_with_retry(message) - } - - /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry - pub fn send_message_bytes_with_retry( - &mut self, - msg_id: &MessageSlotID, - message_bytes: Vec, - ) -> Result { - self.stackerdb - .send_message_bytes_with_retry(msg_id, message_bytes) - } - - /// Get the ordered DKG packets from stackerdb for the signer slot IDs. - pub fn get_dkg_packets( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let packet_slots = &[ - MessageSlotID::DkgBegin, - MessageSlotID::DkgPublicShares, - MessageSlotID::DkgPrivateBegin, - MessageSlotID::DkgPrivateShares, - MessageSlotID::DkgEndBegin, - MessageSlotID::DkgEnd, - ]; - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let mut packets = vec![]; - for packet_slot in packet_slots { - let session = self - .stackerdb - .get_session_mut(packet_slot) - .ok_or(ClientError::NotConnected)?; - let messages = StackerDB::get_messages(session, &slot_ids)?; - for message in messages { - let SignerMessage::Packet(packet) = message else { - warn!("Found an unexpected type in a packet slot {packet_slot}"); - continue; - }; - packets.push(packet); - } - } - Ok(packets) - } - - /// Get the transactions from stackerdb for the signers - fn get_transactions( - transactions_session: &mut StackerDBSession, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let messages = StackerDB::get_messages(transactions_session, &slot_ids)?; - let mut transactions = vec![]; - for message in messages { - let SignerMessage::Transactions(chunk_transactions) = message else { - warn!("Signer wrote an unexpected type to the transactions slot"); - continue; - }; - transactions.extend(chunk_transactions); - } - Ok(transactions) - } - - /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions(&mut self) -> Result, ClientError> { - let signer_slot_id = self.get_signer_slot_id(); - let Some(transactions_session) = - self.stackerdb.get_session_mut(&MessageSlotID::Transactions) - else { - return Err(ClientError::NotConnected); - }; - Self::get_transactions(transactions_session, &[signer_slot_id]) - } - - /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); - Self::get_transactions(&mut self.next_transaction_session, signer_ids) - } - - /// Get the encrypted state for the given signer - pub fn get_encrypted_signer_state( - &mut self, - signer_id: SignerSlotID, - ) -> Result>, ClientError> { - debug!("Getting the persisted encrypted state for signer {signer_id}"); - let Some(state_session) = self - .stackerdb - .get_session_mut(&MessageSlotID::EncryptedSignerState) - else { - return Err(ClientError::NotConnected); - }; - - let send_request = || { - state_session - .get_latest_chunks(&[signer_id.0]) - .map_err(backoff::Error::transient) - }; - - let Some(chunk) = retry_with_exponential_backoff(send_request)?.pop().ok_or( - ClientError::UnexpectedResponseFormat(format!( - "Missing response for state session request for signer {}", - signer_id - )), - )? - else { - debug!("No persisted state for signer {signer_id}"); - return Ok(None); - }; - - if chunk.is_empty() { - debug!("Empty persisted state for signer {signer_id}"); - return Ok(None); - } - - let SignerMessage::EncryptedSignerState(state) = - read_next::(&mut chunk.as_slice())? - else { - error!("Wrong message type stored in signer state slot for signer {signer_id}"); - return Ok(None); - }; - - Ok(Some(state)) - } - - /// Retrieve the signer set this stackerdb client is attached to - pub fn get_signer_set(&self) -> u32 { - self.stackerdb.get_signer_set() - } - - /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&self) -> SignerSlotID { - self.stackerdb.get_signer_slot_id() - } -} - -#[cfg(test)] -mod tests { - use std::thread::spawn; - use std::time::Duration; - - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use clarity::codec::StacksMessageCodec; - use clarity::types::chainstate::StacksPrivateKey; - use libstackerdb::StackerDBChunkAckData; - - use super::*; - use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; - use crate::config::GlobalConfig; - - #[test] - fn get_signer_transactions_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut manager = StackerDBManager::from(&signer_config); - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx.clone()]); - let message = signer_message.serialize_to_vec(); - - let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || manager.get_next_transactions(&signer_slot_ids)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let signer_message = SignerMessage::Transactions(vec![]); - let message = signer_message.serialize_to_vec(); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let transactions = h.join().unwrap().unwrap(); - assert_eq!(transactions, vec![tx]); - } - - #[test] - fn send_signer_message_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDBManager::from(&signer_config); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx]); - let ack = StackerDBChunkAckData { - accepted: true, - reason: None, - metadata: None, - code: None, - }; - let mock_server = mock_server_from_config(&config); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); - response_bytes.extend(payload.as_bytes()); - std::thread::sleep(Duration::from_millis(500)); - write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap().unwrap()); - } -} diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 19165db0a82..0b9b59a0e72 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -24,7 +24,6 @@ stacks-common = { path = "../../stacks-common" } chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } -wsts = { workspace = true } url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } @@ -48,7 +47,6 @@ stacks = { package = "stackslib", path = "../../stackslib", features = ["default stacks-signer = { path = "../../stacks-signer", features = ["testing"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19d..a2f949a8ccc 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -50,7 +50,6 @@ use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; @@ -290,7 +289,6 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; - let mut attempts = 0; // now, actually run this tenure loop { #[cfg(test)] @@ -371,11 +369,9 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self.gather_signatures( - &mut new_block, - &mut stackerdbs, - &mut attempts, - ) { + let (reward_set, signer_signature) = match self + .gather_signatures(&mut new_block, &mut stackerdbs) + { Ok(x) => x, Err(e) => match e { NakamotoNodeError::StacksTipChanged => { @@ -523,7 +519,6 @@ impl BlockMinerThread { &mut self, new_block: &mut NakamotoBlock, stackerdbs: &mut StackerDBs, - attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( @@ -557,7 +552,6 @@ impl BlockMinerThread { }) })?; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let reward_set = self.load_signer_set()?; if self.config.get_node_config(false).mock_mining { @@ -566,7 +560,7 @@ impl BlockMinerThread { let mut coordinator = SignCoordinator::new( &reward_set, - miner_privkey_as_scalar, + miner_privkey, &self.config, self.globals.should_keep_running.clone(), ) @@ -583,10 +577,8 @@ impl BlockMinerThread { )) })?; - *attempts += 1; let signature = coordinator.run_sign_v0( new_block, - *attempts, &tip, &self.burnchain, &sort_db, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 35d578c0f1c..ee012984228 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -21,7 +21,6 @@ use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -31,8 +30,6 @@ use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NA use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::Error as ChainstateError; -#[cfg(any(test, feature = "testing"))] -use stacks::chainstate::stacks::ThresholdSignature; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; @@ -42,15 +39,6 @@ use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -use wsts::common::PolyCommitment; -#[cfg(any(test, feature = "testing"))] -use wsts::curve::ecdsa; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{Config as CoordinatorConfig, Coordinator}; -use wsts::state_machine::PublicKeys; -use wsts::v2::Aggregator; use super::Error as NakamotoNodeError; use crate::event_dispatcher::STACKER_DB_CHANNEL; @@ -72,11 +60,8 @@ static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); /// is used by Nakamoto miners to act as the coordinator for the blocks they /// produce. pub struct SignCoordinator { - coordinator: FireCoordinator, receiver: Option>, - message_key: Scalar, - #[cfg(any(test, feature = "testing"))] - wsts_public_keys: PublicKeys, + message_key: StacksPrivateKey, is_mainnet: bool, miners_session: StackerDBSession, signer_entries: HashMap, @@ -86,20 +71,6 @@ pub struct SignCoordinator { pub next_signer_bitvec: BitVec<4000>, } -pub struct NakamotoSigningParams { - /// total number of signers - pub num_signers: u32, - /// total number of keys - pub num_keys: u32, - /// threshold of keys needed to form a valid signature - pub threshold: u32, - /// map of signer_id to controlled key_ids - pub signer_key_ids: HashMap>, - /// ECDSA public keys as Point objects indexed by signer_id - pub signer_public_keys: HashMap, - pub wsts_public_keys: PublicKeys, -} - impl Drop for SignCoordinator { fn drop(&mut self) { STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( @@ -108,112 +79,13 @@ impl Drop for SignCoordinator { } } -impl NakamotoSigningParams { - pub fn parse( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - ) -> Result { - let parsed = SignerEntries::parse(is_mainnet, reward_set).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Invalid Reward Set: Could not parse into WSTS structs: {e:?}" - )) - })?; - - let num_keys = parsed - .count_keys() - .expect("FATAL: more than u32::max() signers in the reward set"); - let num_signers = parsed - .count_signers() - .expect("FATAL: more than u32::max() signers in the reward set"); - let threshold = parsed - .get_signing_threshold() - .expect("FATAL: more than u32::max() signers in the reward set"); - - Ok(NakamotoSigningParams { - num_signers, - threshold, - num_keys, - signer_key_ids: parsed.coordinator_key_ids, - signer_public_keys: parsed.signer_public_keys, - wsts_public_keys: parsed.public_keys, - }) - } -} - -#[allow(dead_code)] -fn get_signer_commitments( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - stackerdbs: &StackerDBs, - reward_cycle: u64, - expected_aggregate_key: &Point, -) -> Result, ChainstateError> { - let commitment_contract = - MessageSlotID::DkgResults.stacker_db_contract(is_mainnet, reward_cycle); - let signer_set_len = u32::try_from(reward_set.len()) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set length exceeds u32".into()))?; - for signer_id in 0..signer_set_len { - let Some(signer_data) = stackerdbs.get_latest_chunk(&commitment_contract, signer_id)? - else { - warn!( - "Failed to fetch DKG result, will look for results from other signers."; - "signer_id" => signer_id - ); - continue; - }; - let Ok(SignerMessageV1::DkgResults { - aggregate_key, - party_polynomials, - }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) - else { - warn!( - "Failed to parse DKG result, will look for results from other signers."; - "signer_id" => signer_id, - ); - continue; - }; - - if &aggregate_key != expected_aggregate_key { - warn!( - "Aggregate key in DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "reported" => %aggregate_key, - ); - continue; - } - let computed_key = party_polynomials - .iter() - .fold(Point::default(), |s, (_, comm)| s + comm.poly[0]); - - if expected_aggregate_key != &computed_key { - warn!( - "Aggregate key computed from DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "computed" => %computed_key, - ); - continue; - } - - return Ok(party_polynomials); - } - error!( - "No valid DKG results found for the active signing set, cannot coordinate a group signature"; - "reward_cycle" => reward_cycle, - ); - Err(ChainstateError::InvalidStacksBlock( - "Failed to fetch DKG results for the active signer set".into(), - )) -} - impl SignCoordinator { /// * `reward_set` - the active reward set data, used to construct the signer /// set parameters. - /// * `message_key` - the signing key that the coordinator will use to sign messages - /// broadcasted to the signer set. this should be the miner's registered key. /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - message_key: Scalar, + message_key: StacksPrivateKey, config: &Config, keep_running: Arc, ) -> Result { @@ -224,6 +96,11 @@ impl SignCoordinator { return Err(ChainstateError::NoRegisteredSigners(0)); }; + let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to parse NakamotoSignerEntries: {e:?}" + )) + })?; let rpc_socket = config .node .get_rpc_loopback() @@ -240,33 +117,11 @@ impl SignCoordinator { ) .expect("FATAL: unable to construct initial bitvec for signer set"); - let NakamotoSigningParams { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - wsts_public_keys, - } = NakamotoSigningParams::parse(is_mainnet, reward_set_signers.as_slice())?; debug!( "Initializing miner/coordinator"; - "num_signers" => num_signers, - "num_keys" => num_keys, - "threshold" => threshold, - "signer_key_ids" => ?signer_key_ids, - "signer_public_keys" => ?signer_public_keys, - "wsts_public_keys" => ?wsts_public_keys, + "num_signers" => signer_entries.signer_pks.len(), + "signer_public_keys" => ?signer_entries.signer_pks, ); - let coord_config = CoordinatorConfig { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - dkg_threshold: threshold, - message_private_key: message_key.clone(), - ..Default::default() - }; let total_weight = reward_set.total_signing_weight().map_err(|e| { warn!("Failed to calculate total weight for the reward set: {e:?}"); @@ -288,8 +143,6 @@ impl SignCoordinator { Ok((slot_id, signer)) }) .collect::, ChainstateError>>()?; - - let coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING @@ -303,10 +156,8 @@ impl SignCoordinator { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } let sign_coordinator = Self { - coordinator, message_key, receiver: Some(receiver), - wsts_public_keys, is_mainnet, miners_session, next_signer_bitvec, @@ -325,11 +176,8 @@ impl SignCoordinator { } Ok(Self { - coordinator, - message_key, receiver: Some(receiver), - #[cfg(any(test, feature = "testing"))] - wsts_public_keys, + message_key, is_mainnet, miners_session, next_signer_bitvec, @@ -340,40 +188,6 @@ impl SignCoordinator { }) } - fn get_sign_id(burn_block_height: u64, burnchain: &Burnchain) -> u64 { - burnchain - .pox_constants - .reward_cycle_index(burnchain.first_block_height, burn_block_height) - .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") - } - - /// Send a message over the miners contract using a `Scalar` private key - fn send_miners_message_scalar( - message_key: &Scalar, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); - miner_sk.set_compress_public(true); - Self::send_miners_message( - &miner_sk, - sortdb, - tip, - stackerdbs, - message, - miner_slot_id, - is_mainnet, - miners_session, - election_sortition, - ) - } - /// Send a message over the miners contract using a `StacksPrivateKey` pub fn send_miners_message( miner_sk: &StacksPrivateKey, @@ -425,221 +239,6 @@ impl SignCoordinator { } } - #[cfg_attr(test, mutants::skip)] - #[cfg(any(test, feature = "testing"))] - pub fn begin_sign_v1( - &mut self, - block: &NakamotoBlock, - burn_block_height: u64, - block_attempt: u64, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortiton: &ConsensusHash, - ) -> Result { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; - - let proposal_msg = BlockProposal { - block: block.clone(), - burn_height: burn_block_height, - reward_cycle: reward_cycle_id, - }; - - let block_bytes = proposal_msg.serialize_to_vec(); - let nonce_req_msg = self - .coordinator - .start_signing_round(&block_bytes, false, None) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to start signing round in FIRE coordinator: {e:?}" - )) - })?; - Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - &stackerdbs, - nonce_req_msg.into(), - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - #[cfg(test)] - { - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(_signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(ThresholdSignature::empty()); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - loop { - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - let modified_slots = &event.modified_slots; - - // Update `next_signers_bitvec` with the slots that were modified in the event - modified_slots.iter().for_each(|chunk| { - if let Ok(slot_id) = chunk.slot_id.try_into() { - match &self.next_signer_bitvec.set(slot_id, true) { - Err(e) => { - warn!("Failed to set bitvec for next signer: {e:?}"); - } - _ => (), - }; - } else { - error!("FATAL: slot_id greater than u16, which should never happen."); - } - }); - - let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - debug!("Miner/Coordinator: Received messages from signers"; "count" => messages.len()); - let coordinator_pk = ecdsa::PublicKey::new(&self.message_key).map_err(|_e| { - NakamotoNodeError::MinerSignatureError("Bad signing key for the FIRE coordinator") - })?; - let packets: Vec<_> = messages - .into_iter() - .filter_map(|msg| match msg { - SignerMessageV1::DkgResults { .. } - | SignerMessageV1::BlockResponse(_) - | SignerMessageV1::EncryptedSignerState(_) - | SignerMessageV1::Transactions(_) => None, - SignerMessageV1::Packet(packet) => { - debug!("Received signers packet: {packet:?}"); - if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { - warn!("Failed to verify StackerDB packet: {packet:?}"); - None - } else { - Some(packet) - } - } - }) - .collect(); - let (outbound_msgs, op_results) = self - .coordinator - .process_inbound_messages(&packets) - .unwrap_or_else(|e| { - error!( - "Miner/Coordinator: Failed to process inbound message packets"; - "err" => ?e - ); - (vec![], vec![]) - }); - for operation_result in op_results.into_iter() { - match operation_result { - wsts::state_machine::OperationResult::Dkg { .. } - | wsts::state_machine::OperationResult::SignTaproot(_) - | wsts::state_machine::OperationResult::DkgError(_) => { - debug!("Ignoring unrelated operation result"); - } - wsts::state_machine::OperationResult::Sign(signature) => { - // check if the signature actually corresponds to our block? - let block_sighash = block.header.signer_signature_hash(); - let verified = signature.verify( - self.coordinator.aggregate_public_key.as_ref().unwrap(), - &block_sighash.0, - ); - let signature = ThresholdSignature(signature); - if !verified { - warn!( - "Processed signature but didn't validate over the expected block. Returning error."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash - ); - return Err(NakamotoNodeError::SignerSignatureError( - "Signature failed to validate over the expected block".into(), - )); - } else { - info!( - "SignCoordinator: Generated a valid signature for the block"; - "next_signer_bitvec" => self.next_signer_bitvec.binary_str(), - ); - return Ok(signature); - } - } - wsts::state_machine::OperationResult::SignError(e) => { - return Err(NakamotoNodeError::SignerSignatureError(format!( - "Signing failed: {e:?}" - ))) - } - } - } - for msg in outbound_msgs { - match Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - msg.into(), - // TODO: note, in v1, we'll want to add a new slot, but for now, it just shares - // with the block proposal - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) { - Ok(()) => { - debug!("Miner/Coordinator: sent outbound message."); - } - Err(e) => { - warn!( - "Miner/Coordinator: Failed to send message to StackerDB instance: {e:?}." - ); - } - }; - } - } - } - /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { @@ -682,7 +281,6 @@ impl SignCoordinator { pub fn run_sign_v0( &mut self, block: &NakamotoBlock, - block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, @@ -691,13 +289,9 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; let reward_cycle_id = burnchain .block_height_to_reward_cycle(burn_tip.block_height) .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; let block_proposal = BlockProposal { block: block.clone(), @@ -709,7 +303,7 @@ impl SignCoordinator { debug!("Sending block proposal message to signers"; "signer_signature_hash" => %block.header.signer_signature_hash(), ); - Self::send_miners_message_scalar::( + Self::send_miners_message::( &self.message_key, sortdb, burn_tip, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17b829557fc..c975bfebf96 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -28,8 +28,7 @@ use clarity::vm::{ClarityName, ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; -use libsigner::v1::messages::SignerMessage as SignerMessageV1; -use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use libsigner::{SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -91,7 +90,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; -use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; @@ -441,27 +439,6 @@ pub fn get_latest_block_proposal( Ok((proposed_block, pubkey)) } -#[allow(dead_code)] -fn get_block_proposal_msg_v1( - miners_stackerdb: &mut StackerDBSession, - slot_id: u32, -) -> NakamotoBlock { - let message: SignerMessageV1 = miners_stackerdb - .get_latest(slot_id) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found"); - let SignerMessageV1::Packet(packet) = message else { - panic!("Expected a signer message packet. Got {message:?}"); - }; - let Message::NonceRequest(nonce_request) = packet.msg else { - panic!("Expected a nonce request. Got {:?}", packet.msg); - }; - let block_proposal = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) - .expect("Failed to deserialize block proposal"); - block_proposal.block -} - pub fn read_and_sign_block_proposal( configs: &[&Config], signers: &TestSigners, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465e..91f9bc3282d 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . mod v0; -mod v1; use std::collections::HashSet; // Copyright (C) 2020-2024 Stacks Open Internet Foundation @@ -42,7 +41,7 @@ use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; -use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; +use stacks::chainstate::stacks::StacksPrivateKey; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, @@ -54,12 +53,11 @@ use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; +use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; -use wsts::state_machine::PublicKeys; use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; @@ -76,7 +74,7 @@ use crate::tests::neon_integrations::{ wait_for_runloop, }; use crate::tests::to_addr; -use crate::{BitcoinRegtestController, BurnchainController}; +use crate::BitcoinRegtestController; // Helper struct for holding the btc and stx neon nodes #[allow(dead_code)] @@ -110,8 +108,6 @@ pub struct SignerTest { pub signer_stacks_private_keys: Vec, // link to the stacks node pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, /// The number of cycles to stack for pub num_stacking_cycles: u64, } @@ -224,7 +220,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest u64 { - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let curr_reward_cycle = self.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let next_reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); - let next_reward_cycle_reward_set_calculation = next_reward_cycle_height - .saturating_sub(prepare_phase_len) - .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ - - next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) - } - - fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) - } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); @@ -421,20 +376,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ThresholdSignature { - let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); - let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = - ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) - .unwrap(); - signer_signature - } - /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -558,22 +499,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest SignerSlotID { - let valid_signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); - - self.stacks_client - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) - .expect("FATAL: failed to get signer slots from stackerdb") - .iter() - .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) - }) - .expect("FATAL: signer not registered") - } - fn get_signer_slots( &self, reward_cycle: u64, @@ -597,11 +522,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>() } - /// Get the wsts public keys for the given reward cycle - fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + /// Get the signer public keys for the given reward cycle + fn get_signer_public_keys(&self, reward_cycle: u64) -> Vec { let entries = self.get_reward_set_signers(reward_cycle); let entries = SignerEntries::parse(false, &entries).unwrap(); - entries.public_keys + entries.signer_pks } /// Get the signers for the given reward cycle @@ -630,42 +555,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest StacksPrivateKey { - let spawned_signer = self.spawned_signers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - spawned_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - pub fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - Some(100_000), - None, - Some(9000 + signer_idx), - ) - .pop() - .unwrap(); - - info!("Restarting signer"); - let config = SignerConfig::load_from_str(&signer_config).unwrap(); - let signer = SpawnedSigner::new(config); - self.spawned_signers.insert(signer_idx, signer); - } - pub fn shutdown(self) { self.running_nodes .coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41e..e6808a4b774 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2516,8 +2516,7 @@ fn mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); @@ -2731,8 +2730,7 @@ fn multiple_miners_mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs deleted file mode 100644 index 816db4c5dc6..00000000000 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ /dev/null @@ -1,1155 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::HashSet; -use std::sync::atomic::Ordering; -use std::time::{Duration, Instant}; -use std::{env, thread}; - -use clarity::boot_util::boot_code_id; -use clarity::vm::Value; -use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; -use libsigner::BlockProposal; -use rand::thread_rng; -use rand_core::RngCore; -use stacks::burnchains::Txid; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME}; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::miner::TransactionEvent; -use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, -}; -use stacks::util_lib::strings::StacksString; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::CHAIN_ID_TESTNET; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, -}; -use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{SignerSlotID, StacksClient}; -use stacks_signer::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use stacks_signer::v1::coordinator::CoordinatorSelector; -use stacks_signer::v1::stackerdb_manager::StackerDBManager; -use stacks_signer::v1::SpawnedSigner; -use tracing_subscriber::prelude::*; -use tracing_subscriber::{fmt, EnvFilter}; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::Message; -use wsts::state_machine::OperationResult; - -use super::SignerTest; -use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, -}; -use crate::tests::neon_integrations::{next_block_and_wait, test_observer}; -use crate::tests::to_addr; -use crate::BurnchainController; - -impl SignerTest { - fn boot_to_epoch_3(&mut self, timeout: Duration) -> Point { - boot_to_epoch_3_reward_set( - &self.running_nodes.conf, - &self.running_nodes.blocks_processed, - &self.signer_stacks_private_keys, - &self.signer_stacks_private_keys, - &mut self.running_nodes.btc_regtest_controller, - Some(self.num_stacking_cycles), - ); - let dkg_vote = self.wait_for_dkg(timeout); - - // Advance and mine the DKG key block - self.run_until_epoch_3_boundary(); - - let reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, dkg_vote); - - let (vrfs_submitted, commits_submitted) = ( - self.running_nodes.vrfs_submitted.clone(), - self.running_nodes.commits_submitted.clone(), - ); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - info!("Successfully triggered first block to wake up the miner runloop."); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - info!("Ready to mine Nakamoto blocks!"); - set_dkg - } - - // Only call after already past the epoch 3.0 boundary - fn run_to_dkg(&mut self, timeout: Duration) -> Option { - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - let nmb_blocks_to_mine_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_mine_to_dkg); - info!("Mining {nmb_blocks_to_mine_to_dkg} bitcoin block(s) to reach DKG calculation at bitcoin height {end_block_height}"); - for i in 1..=nmb_blocks_to_mine_to_dkg { - info!("Mining bitcoin block #{i} and nakamoto tenure of {nmb_blocks_to_mine_to_dkg}"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - if nmb_blocks_to_mine_to_dkg == 0 { - None - } else { - Some(self.wait_for_dkg(timeout)) - } - } - - // Only call after already past the epoch 3.0 boundary - fn run_until_burnchain_height_nakamoto( - &mut self, - timeout: Duration, - burnchain_height: u64, - ) -> Vec { - let mut points = vec![]; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height(); - let mut total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); - debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); - let mut nmb_blocks_to_reward_cycle = 0; - let mut blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - while total_nmb_blocks_to_mine > 0 && blocks_to_dkg > 0 { - if blocks_to_dkg > 0 && total_nmb_blocks_to_mine >= blocks_to_dkg { - let dkg = self.run_to_dkg(timeout); - total_nmb_blocks_to_mine -= blocks_to_dkg; - if dkg.is_some() { - points.push(dkg.unwrap()); - } - blocks_to_dkg = 0; - nmb_blocks_to_reward_cycle = self.nmb_blocks_to_reward_cycle_boundary( - self.get_current_reward_cycle().saturating_add(1), - ) - } - if total_nmb_blocks_to_mine >= nmb_blocks_to_reward_cycle { - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_reward_cycle); - debug!("Mining {nmb_blocks_to_reward_cycle} Nakamoto block(s) to reach the next reward cycle boundary at {end_block_height}."); - for i in 1..=nmb_blocks_to_reward_cycle { - debug!("Mining Nakamoto block #{i} of {nmb_blocks_to_reward_cycle}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - total_nmb_blocks_to_mine -= nmb_blocks_to_reward_cycle; - nmb_blocks_to_reward_cycle = 0; - blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - } - } - for i in 1..=total_nmb_blocks_to_mine { - info!("Mining Nakamoto block #{i} of {total_nmb_blocks_to_mine} to reach {burnchain_height}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - points - } - - fn mine_and_verify_confirmed_naka_block( - &mut self, - agg_key: &Point, - timeout: Duration, - ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - - fn wait_for_dkg(&mut self, timeout: Duration) -> Point { - debug!("Waiting for DKG..."); - let mut key = Point::default(); - let dkg_now = Instant::now(); - for signer in self.spawned_signers.iter() { - let mut aggregate_public_key = None; - loop { - let results = signer - .res_recv - .recv_timeout(timeout) - .expect("failed to recv dkg results"); - for result in results { - match result { - SignerResult::OperationResult(OperationResult::Dkg(point)) => { - info!("Received aggregate_group_key {point}"); - aggregate_public_key = Some(point); - } - SignerResult::OperationResult(other) => { - panic!("{}", operation_panic_message(&other)) - } - SignerResult::StatusCheck(state) => { - panic!("Received status check result: {:?}", state); - } - } - } - if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { - break; - } - } - key = aggregate_public_key.expect(&format!( - "Failed to get aggregate public key within {timeout:?}" - )); - } - debug!("Finished waiting for DKG!"); - key - } - - fn generate_invalid_transactions(&self) -> Vec { - let host = self.running_nodes.conf.node.rpc_bind.clone(); - // Get the signer indices - let reward_cycle = self.get_current_reward_cycle(); - - let signer_private_key = self.signer_stacks_private_keys[0]; - - let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, false); - let contract_addr = vote_contract_id.issuer.into(); - let contract_name = vote_contract_id.name.clone(); - - let signer_index = thread_rng().next_u64(); - let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); - - let round = thread_rng().next_u64(); - let round_arg = Value::UInt(round as u128); - - let reward_cycle_arg = Value::UInt(reward_cycle as u128); - let valid_function_args = vec![ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ]; - - // Create a invalid transaction that is not a contract call - let invalid_not_contract_call = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&signer_private_key).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( - &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - "bad-signers-contract-name".into(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - "some-other-function".into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_signer_index = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - point_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - signer_index_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_reward_cycle = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - point_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, // Old nonce - ) - .unwrap(); - - let invalid_stacks_client = StacksClient::new( - StacksPrivateKey::new(), - host, - "12345".to_string(), // That's amazing. I've got the same combination on my luggage! - false, - ); - let invalid_signer_tx = invalid_stacks_client - .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) - .expect("FATAL: failed to build vote for aggregate public key"); - - let unsigned_txs = vec![ - invalid_nonce, - invalid_not_contract_call, - invalid_contract_name, - invalid_contract_address, - invalid_signers_vote_function, - invalid_function_arg_key, - invalid_function_arg_reward_cycle, - invalid_function_arg_round, - invalid_function_arg_signer_index, - invalid_signer_tx, - ]; - unsigned_txs - .into_iter() - .map(|unsigned| { - invalid_stacks_client - .sign_transaction(unsigned) - .expect("Failed to sign transaction") - }) - .collect() - } -} - -fn operation_panic_message(result: &OperationResult) -> String { - match result { - OperationResult::Sign(sig) => { - format!("Received Signature ({},{})", sig.R, sig.z) - } - OperationResult::SignTaproot(proof) => { - format!("Received SchnorrProof ({},{})", proof.r, proof.s) - } - OperationResult::DkgError(dkg_error) => { - format!("Received DkgError {:?}", dkg_error) - } - OperationResult::SignError(sign_error) => { - format!("Received SignError {}", sign_error) - } - OperationResult::Dkg(point) => { - format!("Received aggregate_group_key {point}") - } - } -} - -#[test] -#[ignore] -/// Test the signer can respond to external commands to perform DKG -fn dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![]); - info!("Boot to epoch 3.0 reward calculation..."); - boot_to_epoch_3_reward_set( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - - info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); - // First wait for the automatically triggered DKG to complete - let key = signer_test.wait_for_dkg(timeout); - - info!("------------------------- Test DKG -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do dkg"); - let dkg_now = Instant::now(); - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(RunLoopCommand { - reward_cycle, - command: SignerCommand::Dkg, - }) - .expect("failed to send DKG command"); - } - let new_key = signer_test.wait_for_dkg(timeout); - let dkg_elapsed = dkg_now.elapsed(); - assert_ne!(new_key, key); - - info!("DKG Time Elapsed: {:.2?}", dkg_elapsed); -} - -#[test] -#[ignore] -/// Test the signer rejects requests to sign that do not come from a miner -fn sign_request_rejected() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - - info!("Creating invalid blocks to sign..."); - let header1 = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - timestamp: 8, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block1 = NakamotoBlock { - header: header1, - txs: vec![], - }; - let tx_merkle_root1 = { - let txid_vecs = block1 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block1.header.tx_merkle_root = tx_merkle_root1; - - let header2 = NakamotoBlockHeader { - version: 1, - chain_length: 3, - burn_spent: 4, - consensus_hash: ConsensusHash([0x05; 20]), - parent_block_id: StacksBlockId([0x06; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), - state_index_root: TrieHash([0x08; 32]), - timestamp: 9, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block2 = NakamotoBlock { - header: header2, - txs: vec![], - }; - let tx_merkle_root2 = { - let txid_vecs = block2 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block2.header.tx_merkle_root = tx_merkle_root2; - - let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![]); - let _key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Sign -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); - let block_proposal_1 = BlockProposal { - block: block1.clone(), - burn_height: 0, - reward_cycle, - }; - let block_proposal_2 = BlockProposal { - block: block2.clone(), - burn_height: 0, - reward_cycle, - }; - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do sign"); - let sign_now = Instant::now(); - let sign_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_1, - is_taproot: false, - merkle_root: None, - }, - }; - let sign_taproot_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_2, - is_taproot: true, - merkle_root: None, - }, - }; - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(sign_command.clone()) - .expect("failed to send sign command"); - signer - .cmd_send - .send(sign_taproot_command.clone()) - .expect("failed to send sign taproot command"); - } - - // Don't wait for signatures. Because the block miner is acting as - // the coordinator, signers won't directly sign commands issued by someone - // other than the miner. Rather, they'll just broadcast their rejections. - - let sign_elapsed = sign_now.elapsed(); - - info!("------------------------- Test Block Rejected -------------------------"); - - // Verify the signers rejected the proposed block - let t_start = Instant::now(); - let signer_message = loop { - assert!( - t_start.elapsed() < Duration::from_secs(30), - "Timed out while waiting for signers block response stacker db event" - ); - - let nakamoto_blocks = test_observer::get_stackerdb_chunks(); - if let Some(message) = find_block_response(nakamoto_blocks) { - break message; - } - thread::sleep(Duration::from_secs(1)); - }; - if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) = signer_message { - assert!(matches!( - rejection.reason_code, - RejectCode::ValidationFailed(_) - )); - } else { - panic!("Received unexpected message: {:?}", &signer_message); - } - info!("Sign Time Elapsed: {:.2?}", sign_elapsed); -} - -#[test] -#[ignore] -/// Test that a signer can be offline when a DKG round has commenced and -/// can rejoin the DKG round after it has restarted -fn delayed_dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![]); - boot_to_epoch_3_reward_set_calculation_boundary( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - let public_keys = signer_test.get_signer_public_keys(reward_cycle); - let coordinator_selector = CoordinatorSelector::from(public_keys); - let (_, coordinator_public_key) = coordinator_selector.get_coordinator(); - let coordinator_public_key = - StacksPublicKey::from_slice(coordinator_public_key.to_bytes().as_slice()).unwrap(); - let signer_slot_ids: Vec<_> = (0..num_signers) - .into_iter() - .map(|i| SignerSlotID(i as u32)) - .collect(); - let mut stackerdbs: Vec<_> = signer_slot_ids - .iter() - .map(|i| { - StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing - false, - reward_cycle, - *i, - ) - }) - .collect(); - info!("------------------------- Stop Signers -------------------------"); - let mut to_stop = None; - for (idx, key) in signer_test.signer_stacks_private_keys.iter().enumerate() { - let public_key = StacksPublicKey::from_private(key); - if public_key == coordinator_public_key { - // Do not stop the coordinator. We want coordinator to start a DKG round - continue; - } - // Only stop one signer - to_stop = Some(idx); - break; - } - let signer_idx = to_stop.expect("Failed to find a signer to stop"); - let signer_key = signer_test.stop_signer(signer_idx); - debug!( - "Removed signer {signer_idx} with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - info!("------------------------- Start DKG -------------------------"); - info!("Waiting for DKG to start..."); - // Advance one more to trigger DKG - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - timeout.as_secs(), - || Ok(true), - ) - .expect("Failed to mine bitcoin block"); - // Do not proceed until we guarantee that DKG was triggered - let start_time = Instant::now(); - loop { - let stackerdb = stackerdbs.first_mut().unwrap(); - let dkg_packets: Vec<_> = stackerdb - .get_dkg_packets(&signer_slot_ids) - .expect("Failed to get dkg packets"); - let begin_packets: Vec<_> = dkg_packets - .iter() - .filter_map(|packet| { - if matches!(packet.msg, Message::DkgBegin(_)) { - Some(packet) - } else { - None - } - }) - .collect(); - if !begin_packets.is_empty() { - break; - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Timed out waiting for DKG to be triggered" - ); - } - - info!("------------------------- Restart Stopped Signer -------------------------"); - - signer_test.restart_signer(signer_idx, signer_key); - - info!("------------------------- Wait for DKG -------------------------"); - let key = signer_test.wait_for_dkg(timeout); - let mut transactions = HashSet::with_capacity(num_signers); - let start_time = Instant::now(); - while transactions.len() < num_signers { - for stackerdb in stackerdbs.iter_mut() { - let current_transactions = stackerdb - .get_current_transactions() - .expect("Failed getting current transactions for signer slot id"); - for tx in current_transactions { - transactions.insert(tx.txid()); - } - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to retrieve pending vote transactions within timeout" - ); - } - - // Make sure transactions get mined - let start_time = Instant::now(); - while !transactions.is_empty() { - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to mine transactions within timeout" - ); - next_block_and_wait( - &mut signer_test.running_nodes.btc_regtest_controller, - &signer_test.running_nodes.blocks_processed, - ); - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let txs = block.get("transactions").unwrap().as_array().unwrap(); - for tx in txs.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - transactions.remove(&parsed.txid()); - } - } - } - - // Make sure DKG did get set - assert_eq!( - key, - signer_test - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found") - ); -} - -pub fn find_block_response(chunk_events: Vec) -> Option { - for event in chunk_events.into_iter() { - if event.contract_id.name.as_str() - == &format!("signers-1-{}", MessageSlotID::BlockResponse.to_u8()) - || event.contract_id.name.as_str() - == &format!("signers-0-{}", MessageSlotID::BlockResponse.to_u8()) - { - let Some(data) = event.modified_slots.first() else { - continue; - }; - let msg = SignerMessage::consensus_deserialize(&mut data.data.as_slice()).unwrap(); - return Some(msg); - } - } - None -} - -#[test] -#[ignore] -/// Test that a signer can respond to a miners request for a signature on a block proposal -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5. forcibly triggering DKG to set the key correctly -/// The stacks node is next advanced to epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto block, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers perform a signing -/// round across its signature hash and return it back to the miner. -/// -/// Test Assertion: -/// Signers return an operation result containing a valid signature across the miner's Nakamoto block's signature hash. -/// Signers broadcasted a signature across the miner's proposed block back to the respective .signers-XXX-YYY contract. -/// Miner appends the signature to the block and finishes mininig it. -fn block_proposal() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(30); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - signer_test.mine_nakamoto_block(timeout); - - info!("------------------------- Test Block Proposal -------------------------"); - // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - - info!("------------------------- Test Block Signed -------------------------"); - // Verify that the signers signed the proposed block - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); - assert!(signature - .0 - .verify(&key, proposed_signer_signature_hash.as_bytes())); - - // Test prometheus metrics response - #[cfg(feature = "monitoring_prom")] - { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers can handle a transition between Nakamoto reward cycles -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 2 full Nakamoto reward cycles, sending blocks to observing signers to sign and return. -/// -/// Test Assertion: -/// Signers can perform DKG and sign blocks across Nakamoto reward cycles. -fn mine_2_nakamoto_reward_cycles() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let first_dkg = signer_test.boot_to_epoch_3(timeout); - let curr_reward_cycle = signer_test.get_current_reward_cycle(); - // Mine 2 full Nakamoto reward cycles (epoch 3 starts in the middle of one, hence the + 1) - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let final_reward_cycle = next_reward_cycle.saturating_add(nmb_reward_cycles); - let final_reward_cycle_height_boundary = signer_test - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(final_reward_cycle) - .saturating_sub(1); - - info!("------------------------- Test Mine 2 Nakamoto Reward Cycles -------------------------"); - let dkgs = signer_test - .run_until_burnchain_height_nakamoto(timeout, final_reward_cycle_height_boundary); - assert_eq!(dkgs.len() as u64, nmb_reward_cycles.saturating_add(1)); // We will have mined the DKG vote for the following reward cycle - let last_dkg = dkgs - .last() - .expect(&format!( - "Failed to reach DKG for reward cycle {final_reward_cycle_height_boundary}" - )) - .clone(); - assert_ne!(first_dkg, last_dkg); - - let set_dkg = signer_test - .stacks_client - .get_approved_aggregate_key(final_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, last_dkg); - - let current_burnchain_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will accept a miners block proposal and sign it if it contains all expected transactions, -/// filtering invalid transactions from the block requirements -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. It then advances to the prepare phase of the next reward cycle -/// to enable Nakamoto signers to look at the next signer transactions to compare against a proposed block. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto tenure, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers verify that it contains -/// all of the NEXT signers' expected transactions, being sure to filter out any invalid transactions -/// from stackerDB as well. -/// -/// Test Assertion: -/// Miner proposes a block to the signers containing all expected transactions. -/// Signers broadcast block approval with a signature back to the waiting miner. -/// Miner includes the signers' signature in the block and finishes mining it. -fn filter_bad_transactions() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); - let next_signers_dkg = signer_test - .run_to_dkg(timeout) - .expect("Failed to run to DKG"); - assert_ne!(current_signers_dkg, next_signers_dkg); - - info!("------------------------- Submit Invalid Transactions -------------------------"); - - let signer_private_key = signer_test - .signer_stacks_private_keys - .iter() - .find(|pk| { - let addr = to_addr(pk); - addr == *signer_test.stacks_client.get_signer_address() - }) - .cloned() - .expect("Cannot find signer private key for signer id 1"); - let next_reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - // Must submit to the NEXT reward cycle slots as they are the ones looked at by the CURRENT miners - let signer_index = signer_test.get_signer_index(next_reward_cycle); - let mut stackerdb = StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - signer_private_key, - false, - next_reward_cycle, - signer_index, - ); - - debug!( - "Signer address is {}", - &signer_test.stacks_client.get_signer_address() - ); - - let invalid_txs = signer_test.generate_invalid_transactions(); - let invalid_txids: HashSet = invalid_txs.iter().map(|tx| tx.txid()).collect(); - - // Submit transactions to stackerdb for the signers and miners to pick up during block verification - stackerdb - .send_message_with_retry(SignerMessage::Transactions(invalid_txs)) - .expect("Failed to write expected transactions to stackerdb"); - - info!("------------------------- Verify Nakamoto Block Mined -------------------------"); - let mined_block_event = - signer_test.mine_and_verify_confirmed_naka_block(¤t_signers_dkg, timeout); - for tx_event in &mined_block_event.tx_events { - let TransactionEvent::Success(tx_success) = tx_event else { - panic!("Received unexpected transaction event"); - }; - // Since we never broadcast the "invalid" transaction to the mempool and the transaction did not come from a signer or had an invalid nonce - // the miner should never construct a block that contains them and signers should still approve it - assert!( - !invalid_txids.contains(&tx_success.txid), - "Miner included an invalid transaction in the block" - ); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will be able to continue their operations even if one signer is restarted. -/// -/// Test Setup: -/// The test spins up three stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The signers sign one block as usual. -/// Then, one of the signers is restarted. -/// Finally, the signers sign another block with the restarted signer. -/// -/// Test Assertion: -/// The signers are able to produce a valid signature after one of them is restarted. -fn sign_after_signer_reboot() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Mine Block -------------------------"); - - signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - assert!( - signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - info!("------------------------- Restart one Signer -------------------------"); - let signer_key = signer_test.stop_signer(2); - debug!( - "Removed signer 2 with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - signer_test.restart_signer(2, signer_key); - - info!("------------------------- Test Mine Block after restart -------------------------"); - - let last_block = signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let frost_signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - // Check that the latest block's bitvec is all 1's - assert_eq!( - last_block.signer_bitvec, - serde_json::to_value(BitVec::<4000>::ones(num_signers as u16).unwrap()) - .expect("Failed to serialize BitVec") - .as_str() - .expect("Failed to serialize BitVec") - ); - - assert!( - frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - signer_test.shutdown(); -} From 4f14f4eda8e318e05a68461e776efed0f08813ee Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 10:23:54 -0700 Subject: [PATCH 691/910] Remove wsts from stacks-signer Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/libsigner.rs | 2 - libsigner/src/runloop.rs | 43 +- libsigner/src/tests/http.rs | 12 +- libsigner/src/tests/mod.rs | 41 +- libsigner/src/v1/messages.rs | 1869 ----------------- libsigner/src/v1/mod.rs | 17 - stacks-signer/Cargo.toml | 1 - stacks-signer/src/client/mod.rs | 36 - stacks-signer/src/client/stacks_client.rs | 243 +-- stacks-signer/src/lib.rs | 18 +- stacks-signer/src/runloop.rs | 57 +- stacks-signer/src/signerdb.rs | 51 +- stacks-signer/src/v0/signer.rs | 13 +- stackslib/src/chainstate/nakamoto/miner.rs | 4 +- .../stacks-node/src/nakamoto_node/miner.rs | 139 +- testnet/stacks-node/src/tests/signer/mod.rs | 11 +- 17 files changed, 74 insertions(+), 2484 deletions(-) delete mode 100644 libsigner/src/v1/messages.rs delete mode 100644 libsigner/src/v1/mod.rs diff --git a/Cargo.lock b/Cargo.lock index e1d78fec15d..d16284fa5ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3473,7 +3473,6 @@ dependencies = [ "tracing", "tracing-subscriber", "url", - "wsts", ] [[package]] diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 0da4e68a8f1..878d428bfc3 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -44,8 +44,6 @@ mod session; mod signer_set; /// v0 signer related code pub mod v0; -/// v1 signer related code -pub mod v1; use std::cmp::Eq; use std::fmt::Debug; diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index d4710f82e6b..0a5ed49a6d6 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -41,7 +41,7 @@ const STDERR: i32 = 2; /// Trait describing the needful components of a top-level runloop. /// This is where the signer business logic would go. /// Implement this, and you get all the multithreaded setup for free. -pub trait SignerRunLoop { +pub trait SignerRunLoop { /// Hint to set how long to wait for new events fn set_event_timeout(&mut self, timeout: Duration); /// Getter for the event poll timeout @@ -49,12 +49,7 @@ pub trait SignerRunLoop { /// Run one pass of the event loop, given new Signer events discovered since the last pass. /// Returns Some(R) if this is the final pass -- the runloop evaluated to R /// Returns None to keep running. - fn run_one_pass( - &mut self, - event: Option>, - cmd: Option, - res: &Sender, - ) -> Option; + fn run_one_pass(&mut self, event: Option>, res: &Sender) -> Option; /// This is the main loop body for the signer. It continuously receives events from /// `event_recv`, polling for up to `self.get_event_timeout()` units of time. Once it has @@ -66,7 +61,6 @@ pub trait SignerRunLoop { fn main_loop( &mut self, event_recv: Receiver>, - command_recv: Receiver, result_send: Sender, mut event_stop_signaler: EVST, ) -> Option { @@ -81,11 +75,7 @@ pub trait SignerRunLoop { return None; } }; - // Do not block for commands - let next_command_opt = command_recv.try_recv().ok(); - if let Some(final_state) = - self.run_one_pass(next_event_opt, next_command_opt, &result_send) - { + if let Some(final_state) = self.run_one_pass(next_event_opt, &result_send) { info!("Runloop exit; signaling event-receiver to stop"); event_stop_signaler.send(); return Some(final_state); @@ -95,13 +85,11 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer { /// the runloop itself signer_loop: Option, /// the event receiver to use event_receiver: Option, - /// the command receiver to use - command_receiver: Option>, /// the result sender to use result_sender: Option>, /// phantom data for the codec @@ -193,18 +181,12 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl Signer { /// Create a new signer with the given runloop and event receiver. - pub fn new( - runloop: SL, - event_receiver: EV, - command_receiver: Receiver, - result_sender: Sender, - ) -> Signer { + pub fn new(runloop: SL, event_receiver: EV, result_sender: Sender) -> Signer { Signer { signer_loop: Some(runloop), event_receiver: Some(event_receiver), - command_receiver: Some(command_receiver), result_sender: Some(result_sender), phantom_data: PhantomData, } @@ -212,12 +194,11 @@ impl Signer { } impl< - CMD: Send + 'static, R: Send + 'static, T: SignerEventTrait + 'static, - SL: SignerRunLoop + Send + 'static, + SL: SignerRunLoop + Send + 'static, EV: EventReceiver + Send + 'static, - > Signer + > Signer { /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver @@ -234,10 +215,6 @@ impl< .event_receiver .take() .ok_or(EventError::AlreadyRunning)?; - let command_receiver = self - .command_receiver - .take() - .ok_or(EventError::AlreadyRunning)?; let result_sender = self .result_sender .take() @@ -266,9 +243,7 @@ impl< let runloop_thread = thread::Builder::new() .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) - .spawn(move || { - signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) - }) + .spawn(move || signer_loop.main_loop(event_recv, result_sender, stop_signaler)) .map_err(|e| { error!("SignerRunLoop failed to start: {:?}", &e); ret_stop_signaler.send(); diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d0f3887b45d..b31fb042e88 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -25,7 +25,7 @@ use crate::http::{decode_http_body, decode_http_request, decode_http_response, r #[test] fn test_decode_http_request_ok() { - let tests = vec![ + let tests = [ ("GET /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n", ("GET", "/foo", vec![("host", "localhost:6270")])), ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", @@ -61,7 +61,7 @@ fn test_decode_http_request_ok() { #[test] fn test_decode_http_request_err() { - let tests = vec![ + let tests = [ ( "GET /foo HTTP/1.1\r\n", EventError::Deserialize("".to_string()), @@ -99,7 +99,7 @@ fn test_decode_http_request_err() { #[test] fn test_decode_http_response_ok() { - let tests = vec![ + let tests = [ ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nX-Request-ID: 0\r\n\r\n", vec![("content-type", "application/octet-stream"), ("content-length", "123"), ("x-request-id", "0")]), ("HTTP/1.1 200 Ok\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", @@ -123,7 +123,7 @@ fn test_decode_http_response_ok() { #[test] fn test_decode_http_response_err() { - let tests = vec![ + let tests = [ ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", RPCError::HttpError(400)), ("HTTP/1.1 200", @@ -223,7 +223,7 @@ impl Write for MockHTTPSocket { #[test] fn test_run_http_request_with_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", @@ -288,7 +288,7 @@ fn test_run_http_request_with_body() { #[test] fn test_run_http_request_no_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index fbe1e590897..f0361592ba2 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -24,11 +24,16 @@ use std::time::Duration; use std::{mem, thread}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use clarity::util::hash::Sha512Trunc256Sum; +use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::StackerDBChunkData; +use stacks_common::bitvec::BitVec; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, @@ -38,8 +43,8 @@ use stacks_common::util::sleep_ms; use wsts::net::{DkgBegin, Packet}; use crate::events::{SignerEvent, SignerEventTrait}; -use crate::v1::messages::SignerMessage; -use crate::{Signer, SignerEventReceiver, SignerRunLoop}; +use crate::v0::messages::{BlockRejection, SignerMessage}; +use crate::{BlockProposal, Signer, SignerEventReceiver, SignerRunLoop}; /// Simple runloop implementation. It receives `max_events` events and returns `events` from the /// last call to `run_one_pass` as its final state. @@ -63,7 +68,7 @@ enum Command { Empty, } -impl SignerRunLoop>, Command, T> for SimpleRunLoop { +impl SignerRunLoop>, T> for SimpleRunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.poll_timeout = timeout; } @@ -75,7 +80,6 @@ impl SignerRunLoop>, Command, T> for Sim fn run_one_pass( &mut self, event: Option>, - _cmd: Option, _res: &Sender>>, ) -> Option>> { debug!("Got event: {:?}", &event); @@ -99,16 +103,34 @@ impl SignerRunLoop>, Command, T> for Sim fn test_simple_signer() { let contract_id = NakamotoSigners::make_signers_db_contract_id(0, 0, false); let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 5; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:30000".parse().unwrap(); let mut chunks = vec![]; + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([0; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; for i in 0..max_events { let privk = Secp256k1PrivateKey::new(); - let msg = wsts::net::Message::DkgBegin(DkgBegin { dkg_id: 0 }); - let message = SignerMessage::Packet(Packet { msg, sig: vec![] }); + let message = SignerMessage::BlockProposal(block_proposal.clone()); let message_bytes = message.serialize_to_vec(); let mut chunk = StackerDBChunkData::new(i as u32, 1, message_bytes); chunk.sign(&privk).unwrap(); @@ -178,10 +200,9 @@ fn test_simple_signer() { #[test] fn test_status_endpoint() { let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 1; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:31000".parse().unwrap(); // simulate a node that's trying to push data diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs deleted file mode 100644 index b412d9a66ff..00000000000 --- a/libsigner/src/v1/messages.rs +++ /dev/null @@ -1,1869 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Messages in the signer-miner interaction have a multi-level hierarchy. -//! Signers send messages to each other through Packet messages. These messages, -//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored -//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a -//! shared identifier space between the four message kinds and their subtypes. -//! -//! These four message kinds are differentiated with a `SignerMessageTypePrefix` -//! and the `SignerMessage` enum. - -use std::fmt::{Debug, Display}; -use std::io::{Read, Write}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Sender; -use std::sync::Arc; - -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; -use blockstack_lib::net::api::postblock_proposal::{ - BlockValidateReject, BlockValidateResponse, ValidateRejectCode, -}; -use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::util::retry::BoundReader; -use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::QualifiedContractIdentifier; -use hashbrown::{HashMap, HashSet}; -use serde::{Deserialize, Serialize}; -use stacks_common::codec::{ - read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, - StacksMessageCodec, -}; -use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::util::hash::Sha512Trunc256Sum; -use tiny_http::{ - Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, -}; -use wsts::common::{PolyCommitment, PublicNonce, Signature, SignatureShare, TupleProof}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; -use wsts::net::{ - BadPrivateShare, DkgBegin, DkgEnd, DkgEndBegin, DkgFailure, DkgPrivateBegin, DkgPrivateShares, - DkgPublicShares, DkgStatus, Message, NonceRequest, NonceResponse, Packet, - SignatureShareRequest, SignatureShareResponse, -}; -use wsts::schnorr::ID; -use wsts::state_machine::{signer, SignError}; - -use crate::http::{decode_http_body, decode_http_request}; -use crate::{EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait}; - -define_u8_enum!( -/// Enum representing the stackerdb message identifier: this is -/// the contract index in the signers contracts (i.e., X in signers-0-X) -MessageSlotID { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin - DkgPrivateBegin = 1, - /// DkgEndBegin - DkgEndBegin = 2, - /// DkgEnd - DkgEnd = 3, - /// DkgPublicshares - DkgPublicShares = 4, - /// DkgPrivateShares - DkgPrivateShares = 5, - /// NonceRequest - NonceRequest = 6, - /// NonceResponse - NonceResponse = 7, - /// SignatureShareRequest - SignatureShareRequest = 8, - /// SignatureShareResponse - SignatureShareResponse = 9, - /// Block proposal responses for miners to observe - BlockResponse = 10, - /// Transactions list for miners and signers to observe - Transactions = 11, - /// DKG Results - DkgResults = 12, - /// Persisted encrypted signer state containing DKG shares - EncryptedSignerState = 13 -}); - -impl MessageSlotIDTrait for MessageSlotID { - fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - fn all() -> &'static [Self] { - MessageSlotID::ALL - } -} - -impl SignerMessageTrait for SignerMessage { - fn msg_id(&self) -> Option { - Some(self.msg_id()) - } -} - -define_u8_enum!( -/// Enum representing the signer message type prefix -SignerMessageTypePrefix { - /// A block response message - BlockResponse = 0, - /// A wsts packet message - Packet = 1, - /// A list of transactions that a signer cares about - Transactions = 2, - /// The results of a successful DKG - DkgResults = 3, - /// The encrypted state of the signer to be persisted - EncryptedSignerState = 4 -}); - -#[cfg_attr(test, mutants::skip)] -impl MessageSlotID { - /// Return the StackerDB contract corresponding to messages of this type - pub fn stacker_db_contract( - &self, - mainnet: bool, - reward_cycle: u64, - ) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - - /// Return the u32 identifier for the message slot (used to index the contract that stores it) - pub fn to_u32(&self) -> u32 { - self.to_u8().into() - } -} - -impl Display for MessageSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - -impl TryFrom for SignerMessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown signer message type prefix: {value}")) - }) - } -} - -impl From<&SignerMessage> for SignerMessageTypePrefix { - #[cfg_attr(test, mutants::skip)] - fn from(message: &SignerMessage) -> Self { - match message { - SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, - SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, - SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, - SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, - SignerMessage::EncryptedSignerState(_) => SignerMessageTypePrefix::EncryptedSignerState, - } - } -} - -define_u8_enum!( -/// Enum representing the message type prefix -MessageTypePrefix { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin message - DkgPrivateBegin = 1, - /// DkgEndBegin message - DkgEndBegin = 2, - /// DkgEnd message - DkgEnd = 3, - /// DkgPublicShares message - DkgPublicShares = 4, - /// DkgPrivateShares message - DkgPrivateShares = 5, - /// NonceRequest message - NonceRequest = 6, - /// NonceResponse message - NonceResponse = 7, - /// SignatureShareRequest message - SignatureShareRequest = 8, - /// SignatureShareResponse message - SignatureShareResponse = 9 -}); - -impl From<&Message> for MessageTypePrefix { - fn from(msg: &Message) -> Self { - match msg { - Message::DkgBegin(_) => MessageTypePrefix::DkgBegin, - Message::DkgPrivateBegin(_) => MessageTypePrefix::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageTypePrefix::DkgEndBegin, - Message::DkgEnd(_) => MessageTypePrefix::DkgEnd, - Message::DkgPublicShares(_) => MessageTypePrefix::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageTypePrefix::DkgPrivateShares, - Message::NonceRequest(_) => MessageTypePrefix::NonceRequest, - Message::NonceResponse(_) => MessageTypePrefix::NonceResponse, - Message::SignatureShareRequest(_) => MessageTypePrefix::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageTypePrefix::SignatureShareResponse, - } - } -} - -impl TryFrom for MessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown packet type prefix: {value}")) - }) - } -} - -define_u8_enum!( -/// Enum representing the reject code type prefix -RejectCodeTypePrefix { - /// Validation failed - ValidationFailed = 0, - /// Signed rejection - SignedRejection = 1, - /// Insufficient signers - InsufficientSigners = 2, - /// Missing transactions - MissingTransactions = 3, - /// Connectivity issues - ConnectivityIssues = 4, - /// Nonce timeout - NonceTimeout = 5, - /// Aggregator error - AggregatorError = 6 -}); - -impl TryFrom for RejectCodeTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown reject code type prefix: {value}")) - }) - } -} - -impl From<&RejectCode> for RejectCodeTypePrefix { - fn from(reject_code: &RejectCode) -> Self { - match reject_code { - RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, - RejectCode::SignedRejection(_) => RejectCodeTypePrefix::SignedRejection, - RejectCode::InsufficientSigners(_) => RejectCodeTypePrefix::InsufficientSigners, - RejectCode::MissingTransactions(_) => RejectCodeTypePrefix::MissingTransactions, - RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, - RejectCode::NonceTimeout(_) => RejectCodeTypePrefix::NonceTimeout, - RejectCode::AggregatorError(_) => RejectCodeTypePrefix::AggregatorError, - } - } -} - -/// The messages being sent through the stacker db contracts -#[derive(Clone, PartialEq, Serialize, Deserialize)] -pub enum SignerMessage { - /// The signed/validated Nakamoto block for miners to observe - BlockResponse(BlockResponse), - /// DKG and Signing round data for other signers to observe - Packet(Packet), - /// The list of transactions for miners and signers to observe that this signer cares about - Transactions(Vec), - /// The results of a successful DKG - DkgResults { - /// The aggregate key from the DKG round - aggregate_key: Point, - /// The polynomial commits used to construct the aggregate key - party_polynomials: Vec<(u32, PolyCommitment)>, - }, - /// The encrypted state of the signer to be persisted - EncryptedSignerState(Vec), -} - -impl Debug for SignerMessage { - #[cfg_attr(test, mutants::skip)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::BlockResponse(b) => Debug::fmt(b, f), - Self::Packet(p) => Debug::fmt(p, f), - Self::Transactions(t) => f.debug_tuple("Transactions").field(t).finish(), - Self::DkgResults { - aggregate_key, - party_polynomials, - } => { - let party_polynomials: Vec<_> = party_polynomials - .iter() - .map(|(ix, commit)| (ix, commit.to_string())) - .collect(); - f.debug_struct("DkgResults") - .field("aggregate_key", &aggregate_key.to_string()) - .field("party_polynomials", &party_polynomials) - .finish() - } - Self::EncryptedSignerState(s) => { - f.debug_tuple("EncryptedSignerState").field(s).finish() - } - } - } -} - -impl SignerMessage { - /// Helper function to determine the slot ID for the provided stacker-db writer id - #[cfg_attr(test, mutants::skip)] - pub fn msg_id(&self) -> MessageSlotID { - match self { - Self::Packet(packet) => match packet.msg { - Message::DkgBegin(_) => MessageSlotID::DkgBegin, - Message::DkgPrivateBegin(_) => MessageSlotID::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageSlotID::DkgEndBegin, - Message::DkgEnd(_) => MessageSlotID::DkgEnd, - Message::DkgPublicShares(_) => MessageSlotID::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageSlotID::DkgPrivateShares, - Message::NonceRequest(_) => MessageSlotID::NonceRequest, - Message::NonceResponse(_) => MessageSlotID::NonceResponse, - Message::SignatureShareRequest(_) => MessageSlotID::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageSlotID::SignatureShareResponse, - }, - Self::BlockResponse(_) => MessageSlotID::BlockResponse, - Self::Transactions(_) => MessageSlotID::Transactions, - Self::DkgResults { .. } => MessageSlotID::DkgResults, - Self::EncryptedSignerState(_) => MessageSlotID::EncryptedSignerState, - } - } -} - -impl SignerMessage { - /// Provide an interface for consensus serializing a DkgResults `SignerMessage` - /// without constructing the DkgResults struct (this eliminates a clone) - pub fn serialize_dkg_result<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - SignerMessageTypePrefix::DkgResults - .to_u8() - .consensus_serialize(fd)?; - Self::serialize_dkg_result_components(fd, aggregate_key, party_polynomials) - } - - /// Serialize the internal components of DkgResults (this eliminates a clone) - fn serialize_dkg_result_components<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - aggregate_key.inner_consensus_serialize(fd)?; - let polynomials_len: u32 = party_polynomials - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - polynomials_len.consensus_serialize(fd)?; - for (party_id, polynomial) in party_polynomials { - party_id.consensus_serialize(fd)?; - polynomial.inner_consensus_serialize(fd)?; - } - Ok(()) - } -} - -impl StacksMessageCodec for SignerMessage { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; - match self { - SignerMessage::Packet(packet) => { - packet.inner_consensus_serialize(fd)?; - } - SignerMessage::BlockResponse(block_response) => { - write_next(fd, block_response)?; - } - SignerMessage::Transactions(transactions) => { - write_next(fd, transactions)?; - } - SignerMessage::DkgResults { - aggregate_key, - party_polynomials, - } => { - Self::serialize_dkg_result_components( - fd, - aggregate_key, - party_polynomials.iter().map(|(a, b)| (a, b)), - )?; - } - SignerMessage::EncryptedSignerState(encrypted_state) => { - write_next(fd, encrypted_state)?; - } - }; - Ok(()) - } - - #[cfg_attr(test, mutants::skip)] - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - SignerMessageTypePrefix::Packet => { - let packet = Packet::inner_consensus_deserialize(fd)?; - SignerMessage::Packet(packet) - } - SignerMessageTypePrefix::BlockResponse => { - let block_response = read_next::(fd)?; - SignerMessage::BlockResponse(block_response) - } - SignerMessageTypePrefix::Transactions => { - let transactions = read_next::, _>(fd)?; - SignerMessage::Transactions(transactions) - } - SignerMessageTypePrefix::DkgResults => { - let aggregate_key = Point::inner_consensus_deserialize(fd)?; - let party_polynomial_len = u32::consensus_deserialize(fd)?; - let mut party_polynomials = Vec::with_capacity( - party_polynomial_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..party_polynomial_len { - let party_id = u32::consensus_deserialize(fd)?; - let polynomial = PolyCommitment::inner_consensus_deserialize(fd)?; - party_polynomials.push((party_id, polynomial)); - } - Self::DkgResults { - aggregate_key, - party_polynomials, - } - } - SignerMessageTypePrefix::EncryptedSignerState => { - // Typically the size of the signer state is much smaller, but in the fully degenerate case the size of the persisted state is - // 2800 * 32 * 4 + C for some small constant C. - // To have some margin, we're expanding the left term with an additional factor 4 - let max_encrypted_state_size = 2800 * 32 * 4 * 4; - let mut bound_reader = BoundReader::from_reader(fd, max_encrypted_state_size); - let encrypted_state = read_next::<_, _>(&mut bound_reader)?; - SignerMessage::EncryptedSignerState(encrypted_state) - } - }; - Ok(message) - } -} - -/// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs -pub trait StacksMessageCodecExtensions: Sized { - /// Serialize the struct to the provided writer - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; - /// Deserialize the struct from the provided reader - fn inner_consensus_deserialize(fd: &mut R) -> Result; -} - -impl StacksMessageCodecExtensions for Scalar { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.to_bytes()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let scalar_bytes: [u8; 32] = read_next(fd)?; - Ok(Scalar::from(scalar_bytes)) - } -} - -impl StacksMessageCodecExtensions for Point { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.compress().as_bytes().to_vec()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let compressed_bytes: Vec = read_next(fd)?; - let compressed = Compressed::try_from(compressed_bytes.as_slice()) - .map_err(|e| CodecError::DeserializeError(e.to_string()))?; - Point::try_from(&compressed).map_err(|e| CodecError::DeserializeError(e.to_string())) - } -} - -impl StacksMessageCodecExtensions for PolyCommitment { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - let commit_len: u32 = self - .poly - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - commit_len.consensus_serialize(fd)?; - for poly in self.poly.iter() { - poly.inner_consensus_serialize(fd)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = ID::inner_consensus_deserialize(fd)?; - let commit_len = u32::consensus_deserialize(fd)?; - let mut poly = Vec::with_capacity( - commit_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..commit_len { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - Ok(Self { id, poly }) - } -} - -impl StacksMessageCodecExtensions for ID { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - self.kG.inner_consensus_serialize(fd)?; - self.kca.inner_consensus_serialize(fd) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = Scalar::inner_consensus_deserialize(fd)?; - let k_g = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { id, kG: k_g, kca }) - } -} - -#[allow(non_snake_case)] -impl StacksMessageCodecExtensions for TupleProof { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.R.inner_consensus_serialize(fd)?; - self.rB.inner_consensus_serialize(fd)?; - self.z.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let R = Point::inner_consensus_deserialize(fd)?; - let rB = Point::inner_consensus_deserialize(fd)?; - let z = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { R, rB, z }) - } -} - -impl StacksMessageCodecExtensions for BadPrivateShare { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.shared_key.inner_consensus_serialize(fd)?; - self.tuple_proof.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let shared_key = Point::inner_consensus_deserialize(fd)?; - let tuple_proof = TupleProof::inner_consensus_deserialize(fd)?; - Ok(Self { - shared_key, - tuple_proof, - }) - } -} - -impl StacksMessageCodecExtensions for HashSet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(self.len() as u32))?; - for i in self { - write_next(fd, i)?; - } - Ok(()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let mut set = Self::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - set.insert(i); - } - Ok(set) - } -} - -define_u8_enum!( -/// Enum representing the DKG failure type prefix -DkgFailureTypePrefix { - /// Bad state - BadState = 0, - /// Missing public shares - MissingPublicShares = 1, - /// Bad public shares - BadPublicShares = 2, - /// Missing private shares - MissingPrivateShares = 3, - /// Bad private shares - BadPrivateShares = 4 -}); - -impl TryFrom for DkgFailureTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG failure type prefix: {value}")) - }) - } -} - -impl From<&DkgFailure> for DkgFailureTypePrefix { - fn from(failure: &DkgFailure) -> Self { - match failure { - DkgFailure::BadState => DkgFailureTypePrefix::BadState, - DkgFailure::MissingPublicShares(_) => DkgFailureTypePrefix::MissingPublicShares, - DkgFailure::BadPublicShares(_) => DkgFailureTypePrefix::BadPublicShares, - DkgFailure::MissingPrivateShares(_) => DkgFailureTypePrefix::MissingPrivateShares, - DkgFailure::BadPrivateShares(_) => DkgFailureTypePrefix::BadPrivateShares, - } - } -} - -impl StacksMessageCodecExtensions for DkgFailure { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(DkgFailureTypePrefix::from(self) as u8))?; - match self { - DkgFailure::BadState => { - // No additional data to serialize - } - DkgFailure::MissingPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::MissingPrivateShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPrivateShares(shares) => { - write_next(fd, &(shares.len() as u32))?; - for (id, share) in shares { - write_next(fd, id)?; - share.inner_consensus_serialize(fd)?; - } - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let failure_type_prefix_byte = read_next::(fd)?; - let failure_type_prefix = DkgFailureTypePrefix::try_from(failure_type_prefix_byte)?; - let failure_type = match failure_type_prefix { - DkgFailureTypePrefix::BadState => DkgFailure::BadState, - DkgFailureTypePrefix::MissingPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPublicShares(set) - } - DkgFailureTypePrefix::BadPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::BadPublicShares(set) - } - DkgFailureTypePrefix::MissingPrivateShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPrivateShares(set) - } - DkgFailureTypePrefix::BadPrivateShares => { - let mut map = HashMap::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - let bad_share = BadPrivateShare::inner_consensus_deserialize(fd)?; - map.insert(i, bad_share); - } - DkgFailure::BadPrivateShares(map) - } - }; - Ok(failure_type) - } -} - -impl StacksMessageCodecExtensions for DkgBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - Ok(DkgBegin { dkg_id }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgPrivateBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -impl StacksMessageCodecExtensions for DkgEndBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgEndBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -define_u8_enum!( -/// Enum representing the DKG status type prefix -DkgStatusTypePrefix { - /// Success - Success = 0, - /// Failure - Failure = 1 -}); - -impl TryFrom for DkgStatusTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG status type prefix: {value}")) - }) - } -} - -impl From<&DkgStatus> for DkgStatusTypePrefix { - fn from(status: &DkgStatus) -> Self { - match status { - DkgStatus::Success => DkgStatusTypePrefix::Success, - DkgStatus::Failure(_) => DkgStatusTypePrefix::Failure, - } - } -} - -impl StacksMessageCodecExtensions for DkgEnd { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(DkgStatusTypePrefix::from(&self.status) as u8))?; - match &self.status { - DkgStatus::Success => { - // No additional data to serialize - } - DkgStatus::Failure(failure) => { - failure.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let status_type_prefix_byte = read_next::(fd)?; - let status_type_prefix = DkgStatusTypePrefix::try_from(status_type_prefix_byte)?; - let status = match status_type_prefix { - DkgStatusTypePrefix::Success => DkgStatus::Success, - DkgStatusTypePrefix::Failure => { - let failure = DkgFailure::inner_consensus_deserialize(fd)?; - DkgStatus::Failure(failure) - } - }; - Ok(DkgEnd { - dkg_id, - signer_id, - status, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPublicShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.comms.len() as u32))?; - for (id, comm) in &self.comms { - write_next(fd, id)?; - comm.id.id.inner_consensus_serialize(fd)?; - comm.id.kG.inner_consensus_serialize(fd)?; - comm.id.kca.inner_consensus_serialize(fd)?; - write_next(fd, &(comm.poly.len() as u32))?; - for poly in comm.poly.iter() { - poly.inner_consensus_serialize(fd)? - } - } - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut comms = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let scalar_id = Scalar::inner_consensus_deserialize(fd)?; - let kG = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - let num_poly_coeffs = read_next::(fd)?; - let mut poly = Vec::new(); - for _ in 0..num_poly_coeffs { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - comms.push(( - id, - PolyCommitment { - id: ID { - id: scalar_id, - kG, - kca, - }, - poly, - }, - )); - } - Ok(DkgPublicShares { - dkg_id, - signer_id, - comms, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.shares.len() as u32))?; - for (id, share_map) in &self.shares { - write_next(fd, id)?; - write_next(fd, &(share_map.len() as u32))?; - for (id, share) in share_map { - write_next(fd, id)?; - write_next(fd, share)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let num_share_map = read_next::(fd)?; - let mut share_map = HashMap::new(); - for _ in 0..num_share_map { - let id = read_next::(fd)?; - let share: Vec = read_next(fd)?; - share_map.insert(id, share); - } - shares.push((id, share_map)); - } - Ok(DkgPrivateShares { - dkg_id, - signer_id, - shares, - }) - } -} - -impl StacksMessageCodecExtensions for NonceRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for NonceResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &self.key_ids)?; - write_next(fd, &(self.nonces.len() as u32))?; - for nonce in &self.nonces { - nonce.D.inner_consensus_serialize(fd)?; - nonce.E.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let key_ids = read_next::, _>(fd)?; - let num_nonces = read_next::(fd)?; - let mut nonces = Vec::new(); - for _ in 0..num_nonces { - let D = Point::inner_consensus_deserialize(fd)?; - let E = Point::inner_consensus_deserialize(fd)?; - nonces.push(PublicNonce { D, E }); - } - let message = read_next::, _>(fd)?; - - Ok(NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids, - nonces, - message, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &(self.nonce_responses.len() as u32))?; - for nonce_response in &self.nonce_responses { - nonce_response.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let num_nonce_responses = read_next::(fd)?; - let mut nonce_responses = Vec::new(); - for _ in 0..num_nonce_responses { - nonce_responses.push(NonceResponse::inner_consensus_deserialize(fd)?); - } - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.signature_shares.len() as u32))?; - for share in &self.signature_shares { - write_next(fd, &share.id)?; - share.z_i.inner_consensus_serialize(fd)?; - write_next(fd, &share.key_ids)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut signature_shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let z_i = Scalar::inner_consensus_deserialize(fd)?; - let key_ids = read_next::, _>(fd)?; - signature_shares.push(SignatureShare { id, z_i, key_ids }); - } - Ok(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - }) - } -} - -impl StacksMessageCodecExtensions for Message { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(MessageTypePrefix::from(self) as u8))?; - match self { - Message::DkgBegin(dkg_begin) => { - dkg_begin.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateBegin(dkg_private_begin) => { - dkg_private_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEndBegin(dkg_end_begin) => { - dkg_end_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEnd(dkg_end) => { - dkg_end.inner_consensus_serialize(fd)?; - } - Message::DkgPublicShares(dkg_public_shares) => { - dkg_public_shares.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateShares(dkg_private_shares) => { - dkg_private_shares.inner_consensus_serialize(fd)?; - } - Message::NonceRequest(nonce_request) => { - nonce_request.inner_consensus_serialize(fd)?; - } - Message::NonceResponse(nonce_response) => { - nonce_response.inner_consensus_serialize(fd)?; - } - Message::SignatureShareRequest(signature_share_request) => { - signature_share_request.inner_consensus_serialize(fd)?; - } - Message::SignatureShareResponse(signature_share_response) => { - signature_share_response.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = MessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - MessageTypePrefix::DkgBegin => { - Message::DkgBegin(DkgBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateBegin => { - Message::DkgPrivateBegin(DkgPrivateBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEndBegin => { - Message::DkgEndBegin(DkgEndBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEnd => Message::DkgEnd(DkgEnd::inner_consensus_deserialize(fd)?), - MessageTypePrefix::DkgPublicShares => { - Message::DkgPublicShares(DkgPublicShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateShares => { - Message::DkgPrivateShares(DkgPrivateShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceRequest => { - Message::NonceRequest(NonceRequest::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceResponse => { - Message::NonceResponse(NonceResponse::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::SignatureShareRequest => Message::SignatureShareRequest( - SignatureShareRequest::inner_consensus_deserialize(fd)?, - ), - MessageTypePrefix::SignatureShareResponse => Message::SignatureShareResponse( - SignatureShareResponse::inner_consensus_deserialize(fd)?, - ), - }; - Ok(message) - } -} - -impl StacksMessageCodecExtensions for Packet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.msg.inner_consensus_serialize(fd)?; - write_next(fd, &self.sig)?; - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let msg = Message::inner_consensus_deserialize(fd)?; - let sig: Vec = read_next(fd)?; - Ok(Packet { msg, sig }) - } -} - -define_u8_enum!( -/// Enum representing the block response type prefix -BlockResponseTypePrefix { - /// Accepted - Accepted = 0, - /// Rejected - Rejected = 1 -}); - -impl TryFrom for BlockResponseTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) - }) - } -} - -impl From<&BlockResponse> for BlockResponseTypePrefix { - fn from(block_response: &BlockResponse) -> Self { - match block_response { - BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, - BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, - } - } -} - -/// The response that a signer sends back to observing miners -/// either accepting or rejecting a Nakamoto block with the corresponding reason -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum BlockResponse { - /// The Nakamoto block was accepted and therefore signed - Accepted((Sha512Trunc256Sum, ThresholdSignature)), - /// The Nakamoto block was rejected and therefore not signed - Rejected(BlockRejection), -} - -impl std::fmt::Display for BlockResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockResponse::Accepted(a) => { - write!( - f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 - ) - } - BlockResponse::Rejected(r) => { - write!( - f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash - ) - } - } - } -} - -impl BlockResponse { - /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Accepted((hash, ThresholdSignature(sig))) - } - - /// Create a new rejected BlockResponse for the provided block signer signature hash and signature - pub fn rejected(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Rejected(BlockRejection::new( - hash, - RejectCode::SignedRejection(ThresholdSignature(sig)), - )) - } -} - -impl StacksMessageCodec for BlockResponse { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; - match self { - BlockResponse::Accepted((hash, sig)) => { - write_next(fd, hash)?; - write_next(fd, sig)?; - } - BlockResponse::Rejected(rejection) => { - write_next(fd, rejection)?; - } - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; - let response = match type_prefix { - BlockResponseTypePrefix::Accepted => { - let hash = read_next::(fd)?; - let sig = read_next::(fd)?; - BlockResponse::Accepted((hash, sig)) - } - BlockResponseTypePrefix::Rejected => { - let rejection = read_next::(fd)?; - BlockResponse::Rejected(rejection) - } - }; - Ok(response) - } -} - -/// A rejection response from a signer for a proposed block -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BlockRejection { - /// The reason for the rejection - pub reason: String, - /// The reason code for the rejection - pub reason_code: RejectCode, - /// The signer signature hash of the block that was rejected - pub signer_signature_hash: Sha512Trunc256Sum, -} - -impl BlockRejection { - /// Create a new BlockRejection for the provided block and reason code - pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { - Self { - reason: reason_code.to_string(), - reason_code, - signer_signature_hash, - } - } -} - -impl StacksMessageCodec for BlockRejection { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.reason.as_bytes().to_vec())?; - write_next(fd, &self.reason_code)?; - write_next(fd, &self.signer_signature_hash)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!("Failed to decode reason string: {:?}", &e)) - })?; - let reason_code = read_next::(fd)?; - let signer_signature_hash = read_next::(fd)?; - Ok(Self { - reason, - reason_code, - signer_signature_hash, - }) - } -} - -impl From for BlockRejection { - fn from(reject: BlockValidateReject) -> Self { - Self { - reason: reject.reason, - reason_code: RejectCode::ValidationFailed(reject.reason_code), - signer_signature_hash: reject.signer_signature_hash, - } - } -} - -/// This enum is used to supply a `reason_code` for block rejections -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum RejectCode { - /// RPC endpoint Validation failed - ValidationFailed(ValidateRejectCode), - /// Signers signed a block rejection - SignedRejection(ThresholdSignature), - /// Nonce timeout was reached - NonceTimeout(Vec), - /// Insufficient signers agreed to sign the block - InsufficientSigners(Vec), - /// An internal error occurred in the signer when aggregating the signaure - AggregatorError(String), - /// Missing the following expected transactions - MissingTransactions(Vec), - /// The block was rejected due to connectivity issues with the signer - ConnectivityIssues, -} - -impl From<&SignError> for RejectCode { - fn from(err: &SignError) -> Self { - match err { - SignError::NonceTimeout(_valid_signers, malicious_signers) => { - Self::NonceTimeout(malicious_signers.clone()) - } - SignError::InsufficientSigners(malicious_signers) => { - Self::InsufficientSigners(malicious_signers.clone()) - } - SignError::Aggregator(e) => Self::AggregatorError(e.to_string()), - } - } -} - -impl StacksMessageCodec for RejectCode { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; - match self { - RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, - RejectCode::SignedRejection(sig) => write_next(fd, sig)?, - RejectCode::InsufficientSigners(malicious_signers) - | RejectCode::NonceTimeout(malicious_signers) => write_next(fd, malicious_signers)?, - RejectCode::MissingTransactions(missing_transactions) => { - write_next(fd, missing_transactions)? - } - RejectCode::AggregatorError(reason) => write_next(fd, &reason.as_bytes().to_vec())?, - RejectCode::ConnectivityIssues => write_next(fd, &4u8)?, - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = RejectCodeTypePrefix::try_from(type_prefix_byte)?; - let code = match type_prefix { - RejectCodeTypePrefix::ValidationFailed => RejectCode::ValidationFailed( - ValidateRejectCode::try_from(read_next::(fd)?).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode validation reject code: {:?}", - &e - )) - })?, - ), - RejectCodeTypePrefix::SignedRejection => { - RejectCode::SignedRejection(read_next::(fd)?) - } - RejectCodeTypePrefix::InsufficientSigners => { - RejectCode::InsufficientSigners(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::MissingTransactions => { - RejectCode::MissingTransactions(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::NonceTimeout => { - RejectCode::NonceTimeout(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, - RejectCodeTypePrefix::AggregatorError => { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode reason string: {:?}", - &e - )) - })?; - RejectCode::AggregatorError(reason) - } - }; - Ok(code) - } -} - -impl std::fmt::Display for RejectCode { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - RejectCode::ValidationFailed(code) => write!(f, "Validation failed: {:?}", code), - RejectCode::SignedRejection(sig) => { - write!(f, "A threshold number of signers rejected the block with the following signature: {:?}.", sig) - } - RejectCode::InsufficientSigners(malicious_signers) => write!( - f, - "Insufficient signers agreed to sign the block. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::NonceTimeout(malicious_signers) => write!( - f, - "Nonce timeout occurred signers. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::MissingTransactions(missing_transactions) => write!( - f, - "Missing the following expected transactions: {:?}", - missing_transactions.iter().map(|tx| tx.txid()).collect::>() - ), - RejectCode::ConnectivityIssues => write!( - f, - "The block was rejected due to connectivity issues with the signer." - ), - RejectCode::AggregatorError(reason) => write!( - f, - "An internal error occurred in the signer when aggregating the signaure: {:?}", - reason - ), - } - } -} - -impl From for SignerMessage { - fn from(packet: Packet) -> Self { - Self::Packet(packet) - } -} - -impl From for SignerMessage { - fn from(block_response: BlockResponse) -> Self { - Self::BlockResponse(block_response) - } -} - -impl From for SignerMessage { - fn from(block_rejection: BlockRejection) -> Self { - Self::BlockResponse(BlockResponse::Rejected(block_rejection)) - } -} - -impl From for SignerMessage { - fn from(rejection: BlockValidateReject) -> Self { - Self::BlockResponse(BlockResponse::Rejected(rejection.into())) - } -} - -#[cfg(test)] -mod test { - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use rand::Rng; - use rand_core::OsRng; - use stacks_common::consts::CHAIN_ID_TESTNET; - use stacks_common::types::chainstate::StacksPrivateKey; - use wsts::common::Signature; - - use super::{StacksMessageCodecExtensions, *}; - - #[test] - #[should_panic] - // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! - fn signer_slots_count_is_sane() { - let slot_identifiers_len = MessageSlotID::ALL.len(); - assert!( - SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, - "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", - SIGNER_SLOTS_PER_USER, - slot_identifiers_len, - ); - } - - #[test] - fn serde_reject_code() { - let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::SignedRejection(ThresholdSignature::empty()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::InsufficientSigners(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::NonceTimeout(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::AggregatorError("Test Error".into()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let code = RejectCode::MissingTransactions(vec![tx]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::ConnectivityIssues; - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - } - - #[test] - fn serde_block_rejection() { - let rejection = BlockRejection::new( - Sha512Trunc256Sum([0u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::SignedRejection(ThresholdSignature::empty()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::InsufficientSigners(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::NonceTimeout(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::AggregatorError("Test Error".into()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - } - - #[test] - fn serde_block_response() { - let response = - BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), ThresholdSignature::empty())); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - - let response = BlockResponse::Rejected(BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - )); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - } - - #[test] - fn serde_point_scalar() { - let mut rng = OsRng; - let scalar = Scalar::random(&mut rng); - let mut serialized_scalar = vec![]; - scalar - .inner_consensus_serialize(&mut serialized_scalar) - .expect("serialization to buffer failed."); - let deserialized_scalar = Scalar::inner_consensus_deserialize(&mut &serialized_scalar[..]) - .expect("Failed to deserialize Scalar"); - assert_eq!(scalar, deserialized_scalar); - - let point = Point::from(scalar); - let mut serialized_point = vec![]; - point - .inner_consensus_serialize(&mut serialized_point) - .expect("serialization to buffer failed."); - let deserialized_point = Point::inner_consensus_deserialize(&mut &serialized_point[..]) - .expect("Failed to deserialize Point"); - assert_eq!(point, deserialized_point); - } - - fn test_fixture_packet(msg: Message) { - let packet = Packet { - msg, - sig: vec![1u8; 20], - }; - let mut serialized_packet = vec![]; - packet - .inner_consensus_serialize(&mut serialized_packet) - .expect("serialization to buffer failed."); - let deserialized_packet = Packet::inner_consensus_deserialize(&mut &serialized_packet[..]) - .expect("Failed to deserialize Packet"); - assert_eq!(packet, deserialized_packet); - } - - #[test] - fn serde_packet() { - // Test DKG begin Packet - test_fixture_packet(Message::DkgBegin(DkgBegin { dkg_id: 0 })); - - let dkg_id = rand::thread_rng().gen(); - let signer_id = rand::thread_rng().gen(); - let sign_id = rand::thread_rng().gen(); - let sign_iter_id = rand::thread_rng().gen(); - let mut signer_ids = [0u32; 100]; - rand::thread_rng().fill(&mut signer_ids[..]); - - let mut key_ids = [0u32; 100]; - rand::thread_rng().fill(&mut key_ids[..]); - let nmb_items = rand::thread_rng().gen_range(1..100); - - // Test DKG private begin Packet - test_fixture_packet(Message::DkgPrivateBegin(DkgPrivateBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end begin Packet - test_fixture_packet(Message::DkgEndBegin(DkgEndBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end Packet Success - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Success, - })); - - // Test DKG end Packet Failure - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Failure(DkgFailure::BadState), - })); - - // Test DKG public shares Packet - let rng = &mut OsRng; - let comms = (0..nmb_items) - .map(|i| { - ( - i, - PolyCommitment { - id: ID { - id: Scalar::random(rng), - kG: Point::from(Scalar::random(rng)), - kca: Scalar::random(rng), - }, - poly: vec![ - Point::from(Scalar::random(rng)), - Point::from(Scalar::random(rng)), - ], - }, - ) - }) - .collect(); - test_fixture_packet(Message::DkgPublicShares(DkgPublicShares { - dkg_id, - signer_id, - comms, - })); - - // Test DKG private shares Packet - let mut shares = vec![]; - for i in 0..nmb_items { - let mut shares_map = HashMap::new(); - for i in 0..nmb_items { - let mut bytes = [0u8; 20]; - rng.fill(&mut bytes[..]); - shares_map.insert(i, bytes.to_vec()); - } - shares.push((i, shares_map)); - } - test_fixture_packet(Message::DkgPrivateShares(DkgPrivateShares { - dkg_id, - signer_id, - shares, - })); - - // Test Nonce request Packet with merkle root - let mut message = [0u8; 40]; - rng.fill(&mut message[..]); - let mut merkle_root_bytes = [0u8; 32]; - rng.fill(&mut merkle_root_bytes[..]); - let merkle_root = Some(merkle_root_bytes); - - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Nonce request Packet with no merkle root - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Nonce response Packet - let mut nonces = vec![]; - for _ in 0..nmb_items { - nonces.push(PublicNonce { - D: Point::from(Scalar::random(rng)), - E: Point::from(Scalar::random(rng)), - }); - } - let nonce_response = NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids: key_ids.to_vec(), - nonces, - message: message.to_vec(), - }; - test_fixture_packet(Message::NonceResponse(nonce_response.clone())); - - // Test Signature share request Packet with merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![nonce_response], - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Signature share request Packet with no merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![], - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Signature share response Packet - let mut signature_shares = vec![]; - for i in 0..nmb_items { - let mut key_ids = vec![]; - for i in 0..nmb_items { - key_ids.push(i); - } - signature_shares.push(SignatureShare { - id: i, - z_i: Scalar::random(rng), - key_ids, - }); - } - test_fixture_packet(Message::SignatureShareResponse(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - })); - } - - #[test] - fn serde_signer_message() { - let rng = &mut OsRng; - let signer_message = SignerMessage::Packet(Packet { - msg: Message::DkgBegin(DkgBegin { dkg_id: 0 }), - sig: vec![1u8; 20], - }); - - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( - Sha512Trunc256Sum([2u8; 32]), - ThresholdSignature(Signature { - R: Point::from(Scalar::random(rng)), - z: Scalar::random(rng), - }), - ))); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let signer_message = SignerMessage::Transactions(vec![tx]); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - } -} diff --git a/libsigner/src/v1/mod.rs b/libsigner/src/v1/mod.rs deleted file mode 100644 index e5a691efb2b..00000000000 --- a/libsigner/src/v1/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/// Messages for the v1 signer -pub mod messages; diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 64e3cd5ca9e..da94cc10deb 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -43,7 +43,6 @@ tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = { workspace = true } rand = { workspace = true } url = "2.1.0" rusqlite = { workspace = true } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index c36f73a3f9c..081b5c07ab9 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -148,7 +148,6 @@ pub(crate) mod tests { }; use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{Hash160, Sha256Sum}; - use wsts::curve::point::Point; use super::*; use crate::config::{GlobalConfig, SignerConfig}; @@ -325,41 +324,6 @@ pub(crate) mod tests { (format!("HTTP/1.1 200 Ok\n\n{pox_info_json}"), pox_info) } - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_approved_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some( - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ) - .expect("BUG: Failed to create clarity value from point") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_vote_for_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ( - "aggregate-public-key".into(), - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ), - ("signer-weight".into(), ClarityValue::UInt(1)), // fixed for testing purposes - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - /// Build a response for the get_peer_info_with_retry request with a specific stacks tip height and consensus hash pub fn build_get_peer_info_response( burn_block_height: Option, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7b490144fce..dbe4f9094d8 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -18,7 +18,7 @@ use std::collections::{HashMap, VecDeque}; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ @@ -54,7 +54,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksEpochId; use stacks_common::{debug, warn}; -use wsts::curve::point::{Compressed, Point}; use super::SignerSlotID; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -273,40 +272,6 @@ impl StacksClient { .collect()) } - /// Get the vote for a given round, reward cycle, and signer address - pub fn get_vote_for_aggregate_public_key( - &self, - round: u64, - reward_cycle: u64, - signer: StacksAddress, - ) -> Result, ClientError> { - debug!("Getting vote for aggregate public key..."); - let function_name = ClarityName::from("get-vote"); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round as u128), - ClarityValue::Principal(signer.into()), - ]; - let value = self.read_only_contract_call( - &boot_code_addr(self.mainnet), - &ContractName::from(SIGNERS_VOTING_NAME), - &function_name, - function_args, - )?; - // Return value is of type: - // ```clarity - // (option { aggregate-public-key: (buff 33), signer-weight: uint }) - // ``` - let inner_data = value.expect_optional()?; - if let Some(inner_data) = inner_data { - let tuple = inner_data.expect_tuple()?; - let key_value = tuple.get_owned("aggregate-public-key")?; - self.parse_aggregate_public_key(key_value) - } else { - Ok(None) - } - } - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction pub fn get_medium_estimated_fee_ustx( &self, @@ -406,27 +371,6 @@ impl StacksClient { Ok(()) } - /// Retrieve the approved DKG aggregate public key for the given reward cycle - pub fn get_approved_aggregate_key( - &self, - reward_cycle: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-approved-aggregate-key"); - let voting_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &voting_contract_id.issuer.into(), - &voting_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - inner_data.map_or_else( - || Ok(None), - |key_value| self.parse_aggregate_public_key(key_value), - ) - } - /// Retrieve the current consumed weight for the given reward cycle and DKG round pub fn get_round_vote_weight( &self, @@ -736,61 +680,6 @@ impl StacksClient { Ok(account_entry) } - /// Helper function that attempts to deserialize a clarity hex string as the aggregate public key - fn parse_aggregate_public_key( - &self, - value: ClarityValue, - ) -> Result, ClientError> { - debug!("Parsing aggregate public key..."); - let data = value.expect_buff(33)?; - // It is possible that the point was invalid though when voted upon and this cannot be prevented by pox 4 definitions... - // Pass up this error if the conversions fail. - let compressed_data = Compressed::try_from(data.as_slice()).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to compressed data: {e}" - )) - })?; - let dkg_public_key = Point::try_from(&compressed_data).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to a point: {e}" - )) - })?; - Ok(Some(dkg_public_key)) - } - - /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_unsigned_vote_for_aggregate_public_key( - &self, - signer_index: u32, - round: u64, - dkg_public_key: Point, - reward_cycle: u64, - nonce: u64, - ) -> Result { - debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); - let contract_address = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from(SIGNERS_VOTING_FUNCTION_NAME); - let function_args = vec![ - ClarityValue::UInt(signer_index as u128), - ClarityValue::buff_from(dkg_public_key.compress().data.to_vec())?, - ClarityValue::UInt(round as u128), - ClarityValue::UInt(reward_cycle as u128), - ]; - - let unsigned_tx = Self::build_unsigned_contract_call_transaction( - &contract_address, - contract_name, - function_name, - &function_args, - &self.stacks_private_key, - self.tx_version, - self.chain_id, - nonce, - )?; - Ok(unsigned_tx) - } - /// Try to post a completed nakamoto block to our connected stacks-node /// Returns `true` if the block was accepted or `false` if the block /// was rejected. @@ -1036,15 +925,13 @@ mod tests { use rand_core::RngCore; use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; - use wsts::curve::scalar::Scalar; use super::*; use crate::client::tests::{ - build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_last_set_cycle_response, - build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, - build_get_pox_data_response, build_get_round_info_response, build_get_tenure_tip_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_account_nonce_response, build_get_last_round_response, + build_get_last_set_cycle_response, build_get_medium_estimated_fee_ustx_response, + build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, + build_get_tenure_tip_response, build_get_weight_threshold_response, build_read_only_response, write_response, MockServerClient, }; @@ -1174,45 +1061,6 @@ mod tests { assert!(matches!(res, Err(ClientError::ReqwestError(_)))); } - #[test] - fn get_aggregate_public_key_should_succeed() { - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let response = build_get_approved_aggregate_key_response(Some(orig_point)); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert_eq!(res, Some(orig_point)); - - let response = build_get_approved_aggregate_key_response(None); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert!(res.is_none()); - } - - #[test] - fn parse_valid_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let clarity_value = ClarityValue::buff_from(orig_point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"); - let result = mock - .client - .parse_aggregate_public_key(clarity_value) - .unwrap(); - assert_eq!(result, Some(orig_point)); - } - - #[test] - fn parse_invalid_aggregate_public_key_should_fail() { - let mock = MockServerClient::new(); - let value = ClarityValue::UInt(10_u128); - let result = mock.client.parse_aggregate_public_key(value); - assert!(result.is_err()) - } - #[test] fn transaction_contract_call_should_send_bytes_to_node() { let mock = MockServerClient::new(); @@ -1265,58 +1113,6 @@ mod tests { ); } - #[test] - fn build_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - - let h = spawn(move || { - mock.client.build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - }); - assert!(h.join().unwrap().is_ok()); - } - - #[test] - fn broadcast_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - let unsigned_tx = mock - .client - .build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - .unwrap(); - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - } - #[test] fn core_info_call_for_burn_block_height_should_succeed() { let mock = MockServerClient::new(); @@ -1579,9 +1375,10 @@ mod tests { #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())).compress(); + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); let mut bytes = [0u8; 33]; - bytes.copy_from_slice(point.as_bytes()); + bytes.copy_from_slice(&public_key.to_bytes_compressed()); let stacker_set = RewardSet { rewarded_addresses: vec![PoxAddress::standard_burn_address(false)], start_cycle_state: PoxStartCycleInfo { @@ -1606,30 +1403,6 @@ mod tests { assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } - #[test] - fn get_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(Some(point)); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(point)); - - let mock = MockServerClient::new(); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(None); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - #[test] fn get_round_vote_weight_should_succeed() { let mock = MockServerClient::new(); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 8bac540e7a0..20c2bc2ca87 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -57,7 +57,7 @@ use stacks_common::{info, warn}; use crate::client::StacksClient; use crate::config::SignerConfig; -use crate::runloop::{RunLoop, RunLoopCommand}; +use crate::runloop::RunLoop; /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { @@ -74,13 +74,6 @@ pub trait Signer: Debug + Display { res: &Sender>, current_reward_cycle: u64, ); - /// Process a command - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; } @@ -90,14 +83,12 @@ pub type RunningSigner = libsigner::RunningSigner, Vec /// The wrapper for the runloop signer type type RunLoopSigner = - libsigner::Signer, RunLoop, SignerEventReceiver, T>; + libsigner::Signer, RunLoop, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The underlying running signer thread handle running_signer: RunningSigner, - /// The command sender for interacting with the running signer - pub cmd_send: Sender, /// The result receiver for interacting with the running signer pub res_recv: Receiver>, /// The spawned signer's config @@ -131,7 +122,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner For more information, check the documentation at \ https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." ); - let (cmd_send, cmd_recv) = channel(); let (res_send, res_recv) = channel(); let ev = SignerEventReceiver::new(config.network.is_mainnet()); #[cfg(feature = "monitoring_prom")] @@ -139,12 +129,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); } let runloop = RunLoop::new(config.clone()); - let mut signer: RunLoopSigner = - libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); SpawnedSigner { running_signer, - cmd_send, res_recv, _phantom: std::marker::PhantomData, config, diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index d8d159a0868..855957a70a9 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; -use std::fmt::Debug; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -15,16 +13,15 @@ use std::fmt::Debug; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; -use wsts::common::MerkleRoot; -use wsts::state_machine::OperationResult; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; @@ -55,14 +52,6 @@ pub struct StateInfo { pub enum SignerResult { /// The signer has received a status check StatusCheck(StateInfo), - /// The signer has completed an operation - OperationResult(OperationResult), -} - -impl From for SignerResult { - fn from(result: OperationResult) -> Self { - SignerResult::OperationResult(result) - } } impl From for SignerResult { @@ -71,31 +60,6 @@ impl From for SignerResult { } } -/// Which signer operation to perform -#[derive(PartialEq, Clone, Debug)] -pub enum SignerCommand { - /// Generate a DKG aggregate public key - Dkg, - /// Sign a message - Sign { - /// The block to sign over - block_proposal: BlockProposal, - /// Whether to make a taproot signature - is_taproot: bool, - /// Taproot merkle root - merkle_root: Option, - }, -} - -/// Which operation to perform -#[derive(PartialEq, Clone, Debug)] -pub struct RunLoopCommand { - /// Which signer operation to perform - pub command: SignerCommand, - /// The reward cycle we are performing the operation for - pub reward_cycle: u64, -} - /// The runloop state #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum State { @@ -213,8 +177,6 @@ where pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, - /// The commands received thus far - pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, /// Cache sortitin data from `stacks-node` @@ -230,7 +192,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo stacks_client, stacks_signers: HashMap::with_capacity(2), state: State::Uninitialized, - commands: VecDeque::new(), current_reward_cycle_info: None, sortition_state: None, } @@ -492,7 +453,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, RunLoopCommand, T> for RunLoop + SignerRunLoop, T> for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -505,11 +466,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> fn run_one_pass( &mut self, event: Option>, - cmd: Option, res: &Sender>, ) -> Option> { debug!( - "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", + "Running one pass for the signer. state={:?}, event={event:?}", self.state ); // This is the only event that we respond to from the outer signer runloop @@ -525,9 +485,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> } } - if let Some(cmd) = cmd { - self.commands.push_back(cmd); - } if self.state == State::Uninitialized { if let Err(e) = self.initialize_runloop() { error!("Failed to initialize signer runloop: {e}."); @@ -560,12 +517,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> res, current_reward_cycle, ); - // After processing event, run the next command for each signer - signer.process_command( - &self.stacks_client, - current_reward_cycle, - self.commands.pop_front(), - ); } if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1b8a57abbb9..06b9d703c37 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -36,7 +36,6 @@ use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; -use wsts::net::NonceRequest; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] /// A vote across the signer set for a block @@ -67,21 +66,6 @@ impl StacksMessageCodec for NakamotoBlockVote { } } -#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] -/// Information specific to Signer V1 -pub struct BlockInfoV1 { - /// The associated packet nonce request if we have one - pub nonce_request: Option, -} - -impl From for BlockInfoV1 { - fn from(value: NonceRequest) -> Self { - Self { - nonce_request: Some(value), - } - } -} - #[derive(Serialize, Deserialize, Debug, PartialEq, Default)] /// Store extra version-specific info in `BlockInfo` pub enum ExtraBlockInfo { @@ -90,28 +74,6 @@ pub enum ExtraBlockInfo { None, /// Extra data for Signer V0 V0, - /// Extra data for Signer V1 - V1(BlockInfoV1), -} - -impl ExtraBlockInfo { - /// Take `nonce_request` if it exists - pub fn take_nonce_request(&mut self) -> Option { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, - ExtraBlockInfo::V1(v1) => v1.nonce_request.take(), - } - } - /// Set `nonce_request` if it exists - pub fn set_nonce_request(&mut self, value: NonceRequest) -> Result<(), &str> { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), - ExtraBlockInfo::V1(v1) => { - v1.nonce_request = Some(value); - Ok(()) - } - } - } } define_u8_enum!( @@ -217,14 +179,6 @@ impl From for BlockInfo { } } impl BlockInfo { - /// Create a new BlockInfo with an associated nonce request packet - pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { - let mut block_info = BlockInfo::from(block_proposal); - block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); - block_info.signed_over = true; - block_info - } - /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -283,7 +237,10 @@ impl BlockInfo { ) } BlockState::LocallyRejected => { - matches!(prev_state, BlockState::Unprocessed) + matches!( + prev_state, + BlockState::Unprocessed | BlockState::LocallyRejected + ) } BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 7c94ec908c2..d6eaa37af8b 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -39,7 +39,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerResult}; +use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; @@ -259,17 +259,6 @@ impl SignerTrait for Signer { } } - fn process_command( - &mut self, - _stacks_client: &StacksClient, - _current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") - } - } - fn has_unprocessed_blocks(&self) -> bool { self.signer_db .has_unprocessed_blocks(self.reward_cycle) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 1d267b047f9..4d99b538210 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -487,7 +487,6 @@ impl NakamotoBlockBuilder { tenure_info: NakamotoTenureInfo, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - signer_transactions: Vec, signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( @@ -522,14 +521,13 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - let mut initial_txs: Vec<_> = [ + let initial_txs: Vec<_> = [ tenure_info.tenure_change_tx.clone(), tenure_info.coinbase_tx.clone(), ] .into_iter() .filter_map(|x| x) .collect(); - initial_txs.extend(signer_transactions); // TODO: update this mempool check to prioritize signer vote transactions over other transactions let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a2f949a8ccc..4d9d4f968e6 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -13,17 +13,13 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use hashbrown::HashSet; -use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; use rand::{thread_rng, Rng}; use stacks::burnchains::Burnchain; @@ -32,7 +28,6 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::OnChainRewardSetProvider; use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; @@ -46,7 +41,6 @@ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::secp256k1::MessageSignature; -use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; @@ -331,7 +325,7 @@ impl BlockMinerThread { } } - match self.mine_block(&stackerdbs) { + match self.mine_block() { Ok(x) => { if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; @@ -591,125 +585,6 @@ impl BlockMinerThread { return Ok((reward_set, signature)); } - fn get_stackerdb_contract_and_slots( - &self, - stackerdbs: &StackerDBs, - msg_id: &MessageSlotID, - reward_cycle: u64, - ) -> Result<(QualifiedContractIdentifier, HashMap), NakamotoNodeError> { - let stackerdb_contracts = stackerdbs - .get_stackerdb_contract_ids() - .expect("FATAL: could not get the stacker DB contract ids"); - - let signers_contract_id = - msg_id.stacker_db_contract(self.config.is_mainnet(), reward_cycle); - if !stackerdb_contracts.contains(&signers_contract_id) { - return Err(NakamotoNodeError::SignerSignatureError( - "No signers contract found, cannot wait for signers".into(), - )); - }; - // Get the slots for every signer - let signers = stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB"); - let mut slot_ids_addresses = HashMap::with_capacity(signers.len()); - for (slot_id, address) in stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB") - .into_iter() - .enumerate() - { - slot_ids_addresses.insert( - u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range"), - address, - ); - } - Ok((signers_contract_id, slot_ids_addresses)) - } - - fn get_signer_transactions( - &self, - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - ) -> Result, NakamotoNodeError> { - let next_reward_cycle = self - .burnchain - .block_height_to_reward_cycle(self.burn_block.block_height) - .expect("FATAL: no reward cycle for burn block") - .wrapping_add(1); - let (signers_contract_id, slot_ids_addresses) = self.get_stackerdb_contract_and_slots( - stackerdbs, - &MessageSlotID::Transactions, - next_reward_cycle, - )?; - let slot_ids = slot_ids_addresses.keys().cloned().collect::>(); - let addresses = slot_ids_addresses.values().cloned().collect::>(); - // Get the transactions from the signers for the next block - let signer_chunks = stackerdbs - .get_latest_chunks(&signers_contract_id, &slot_ids) - .expect("FATAL: could not get latest chunks from stacker DB"); - let signer_messages: Vec<(u32, SignerMessageV1)> = slot_ids - .iter() - .zip(signer_chunks.into_iter()) - .filter_map(|(slot_id, chunk)| { - chunk.and_then(|chunk| { - read_next::(&mut &chunk[..]) - .ok() - .map(|msg| (*slot_id, msg)) - }) - }) - .collect(); - - if signer_messages.is_empty() { - return Ok(vec![]); - } - - let (consensus_hash, block_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); - let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - - // Get all nonces for the signers from clarity DB to use to validate transactions - let account_nonces = chainstate - .with_read_only_clarity_tx( - &sortdb - .index_handle_at_block(chainstate, &stacks_block_id) - .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, - &stacks_block_id, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }, - ) - .unwrap_or_default(); - let mut filtered_transactions: HashMap = HashMap::new(); - for (_slot, signer_message) in signer_messages { - match signer_message { - SignerMessageV1::Transactions(transactions) => { - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.config.is_mainnet(), - transactions, - ) - } - _ => {} // Any other message is ignored - } - } - Ok(filtered_transactions.into_values().collect()) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -834,7 +709,7 @@ impl BlockMinerThread { &sort_db, &self.burn_block, &stackerdbs, - SignerMessageV0::BlockPushed(block), + SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, &mut miners_session, @@ -1117,7 +992,7 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self, stackerdbs: &StackerDBs) -> Result { + fn mine_block(&mut self) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); @@ -1165,9 +1040,6 @@ impl BlockMinerThread { parent_block_info.stacks_parent_header.microblock_tail = None; - let signer_transactions = - self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself @@ -1186,7 +1058,6 @@ impl BlockMinerThread { // we'll invoke the event dispatcher ourselves so that it calculates the // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), - signer_transactions, signer_bitvec_len.unwrap_or(0), ) .map_err(|e| { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 91f9bc3282d..4248e72145a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -302,19 +302,12 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - panic!("Recieved an operation result."); - } - SignerResult::StatusCheck(state_info) => { - output.push(Some(state_info)); - } - } + output.push(Some(state_info)); } output } From 6f60813c91468dce9971a4a859c98f86dc31a1e0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 12:31:14 -0500 Subject: [PATCH 692/910] use better sqlite column affinity --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 61306a9764a..bf2b5aff57c 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -806,7 +806,7 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ r#" -- ALLOW NULL - ALTER TABLE mempool ADD COLUMN time_estimate_ms NUMBER; + ALTER TABLE mempool ADD COLUMN time_estimate_ms INTEGER; "#, r#" INSERT INTO schema_version (version) VALUES (7) From 77fc9d8aaebf86b566eb4aa2724e95674191c9e2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:32:05 -0700 Subject: [PATCH 693/910] Remove wsts from stackslib Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/events.rs | 2 +- libsigner/src/v0/messages.rs | 4 +- stackslib/Cargo.toml | 1 - .../burn/operations/vote_for_aggregate_key.rs | 11 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 7 +- .../src/chainstate/nakamoto/signer_set.rs | 13 +- stackslib/src/chainstate/nakamoto/tenure.rs | 5 +- .../src/chainstate/nakamoto/test_signers.rs | 177 +++--------------- .../src/chainstate/nakamoto/tests/mod.rs | 80 ++++---- .../src/chainstate/nakamoto/tests/node.rs | 2 - stackslib/src/chainstate/stacks/boot/mod.rs | 25 +-- .../src/chainstate/stacks/boot/pox_4_tests.rs | 42 +++-- stackslib/src/chainstate/stacks/mod.rs | 43 ----- .../src/chainstate/stacks/transaction.rs | 43 ----- .../nakamoto/download_state_machine.rs | 1 - stackslib/src/net/download/nakamoto/mod.rs | 1 - stackslib/src/net/download/nakamoto/tenure.rs | 1 - stackslib/src/net/mod.rs | 10 +- stackslib/src/net/tests/download/nakamoto.rs | 5 +- stackslib/src/net/tests/mod.rs | 1 - .../src/tests/nakamoto_integrations.rs | 15 +- 23 files changed, 115 insertions(+), 376 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d16284fa5ca..7e0f1ab61dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3523,7 +3523,6 @@ dependencies = [ "time 0.2.27", "url", "winapi 0.3.9", - "wsts", ] [[package]] diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 4fb6d7a5070..70e7853d652 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -25,7 +25,7 @@ use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; +use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7209398c1ce..47d317992d6 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -887,8 +887,8 @@ impl From for SignerMessage { mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::{ - ThresholdSignature, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; use clarity::consts::CHAIN_ID_MAINNET; diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index d04fc3b1af3..edd58c61610 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -56,7 +56,6 @@ stacks-common = { path = "../stacks-common" } pox-locking = { path = "../pox-locking" } libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 3933eacaa64..648859abc6d 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -21,7 +21,6 @@ use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::secp256k1::Secp256k1PublicKey; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::bitcoin::bits::parse_script; use crate::burnchains::bitcoin::{BitcoinTxInput, BitcoinTxInputStructured}; @@ -175,26 +174,18 @@ impl VoteForAggregateKeyOp { /// Check the payload of a vote-for-aggregate-key burn op. /// Both `signer_key` and `aggregate_key` are checked for validity against - /// `Secp256k1PublicKey` from `stacks_common` as well as `Point` from wsts. + /// `Secp256k1PublicKey` from `stacks_common` pub fn check(&self) -> Result<(), op_error> { // Check to see if the aggregate key is valid let aggregate_key_bytes = self.aggregate_key.as_bytes(); Secp256k1PublicKey::from_slice(aggregate_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(aggregate_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - // Check to see if the signer key is valid let signer_key_bytes = self.signer_key.as_bytes(); Secp256k1PublicKey::from_slice(signer_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(signer_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Ok(()) } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index cf016adb7d0..0a59c1a67bb 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -34,7 +34,6 @@ use stacks_common::types::{Address, StacksEpoch, StacksEpochId, StacksPublicKeyB use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e97fefafffb..6a850e6d351 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -53,7 +53,6 @@ use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; -use wsts::curve::point::Point; use self::signer_set::SignerCalculation; use super::burn::db::sortdb::{ @@ -74,7 +73,7 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, ThresholdSignature, TransactionPayload, + TenureChangeError, TenureChangePayload, TransactionPayload, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -4499,8 +4498,8 @@ impl NakamotoChainState { /// Boot code instantiation for the aggregate public key. /// TODO: This should be removed once it's possible for stackers to vote on the aggregate /// public key - pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { - let agg_pub_key = to_hex(&apk.compress().data); + pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: Vec) { + let agg_pub_key = to_hex(&apk); let contract_content = format!( "(define-read-only ({}) 0x{})", BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index d7eaad51b56..38e76f7e512 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -47,7 +47,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -73,8 +72,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use crate::clarity_vm::clarity::{ @@ -101,7 +100,7 @@ pub struct SignerCalculation { pub struct AggregateKeyVoteParams { pub signer_index: u64, - pub aggregate_key: Point, + pub aggregate_key: Vec, pub voting_round: u64, pub reward_cycle: u64, } @@ -547,10 +546,8 @@ impl NakamotoSigners { } let signer_index_value = payload.function_args.first()?; let signer_index = u64::try_from(signer_index_value.clone().expect_u128().ok()?).ok()?; - let point_value = payload.function_args.get(1)?; - let point_bytes = point_value.clone().expect_buff(33).ok()?; - let compressed_data = Compressed::try_from(point_bytes.as_slice()).ok()?; - let aggregate_key = Point::try_from(&compressed_data).ok()?; + let aggregate_key_value = payload.function_args.get(1)?; + let aggregate_key = aggregate_key_value.clone().expect_buff(33).ok()?; let round_value = payload.function_args.get(2)?; let voting_round = u64::try_from(round_value.clone().expect_u128().ok()?).ok()?; let reward_cycle = diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 059da96b7a4..4b7734653c1 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -87,7 +87,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -108,8 +107,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 4ab76137510..6fd559da697 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -25,8 +25,9 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; use hashbrown::HashMap; +use rand::distributions::Standard; use rand::seq::SliceRandom; -use rand::{CryptoRng, RngCore, SeedableRng}; +use rand::{CryptoRng, Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; @@ -36,8 +37,6 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -68,75 +67,32 @@ use crate::util_lib::db::Error as db_error; #[derive(Debug, Clone, PartialEq)] pub struct TestSigners { - /// The parties that will sign the blocks - pub signer_parties: Vec, - /// The commitments to the polynomials for the aggregate public key - pub poly_commitments: HashMap, - /// The aggregate public key - pub aggregate_public_key: Point, - /// The total number of key ids distributed among signer_parties - pub num_keys: u32, - /// The number of vote shares required to sign a block + /// The number of signatures required to validate a block pub threshold: u32, - /// The key ids distributed among signer_parties - pub party_key_ids: Vec>, - /// The cycle for which the signers are valid - pub cycle: u64, /// The signer's private keys pub signer_keys: Vec, + /// The aggregate public key + pub aggregate_public_key: Vec, + /// The cycle for which the aggregate public key was generated + pub cycle: u64, } impl Default for TestSigners { fn default() -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = 5; + let threshold = 5 * 7 / 10; let mut signer_keys = Vec::::new(); - for _ in 0..num_keys { + for _ in 0..num_signers { signer_keys.push(Secp256k1PrivateKey::default()); } - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } } @@ -149,50 +105,15 @@ impl TestSigners { /// Internal function to generate aggregate key information fn default_with_signers(signer_keys: Vec) -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = signer_keys.len(); + let threshold = u32::try_from(num_signers * 7 / 10).unwrap(); Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } @@ -278,25 +199,6 @@ impl TestSigners { keys.iter().map(|key| key.sign(&msg).unwrap()).collect() } - /// Sign a Nakamoto block using the aggregate key. - /// NB: this function is current unused. - #[allow(dead_code)] - fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { - let mut rng = rand_core::OsRng::default(); - let msg = block.header.signer_signature_hash().0; - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); - - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - let signature = sig_aggregator - .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - ThresholdSignature(signature) - } - /// Generate an list of signatures for a block. Only /// signers in the reward set will be included. pub fn generate_ordered_signatures( @@ -353,45 +255,16 @@ impl TestSigners { } // Generate and assign a new aggregate public key - pub fn generate_aggregate_key(&mut self, cycle: u64) -> Point { + pub fn generate_aggregate_key(&mut self, cycle: u64) -> Vec { // If the key is already generated for this cycle, return it if cycle == self.cycle { debug!("Returning cached aggregate key for cycle {}", cycle); return self.aggregate_public_key.clone(); } - debug!("Generating aggregate key for cycle {}", cycle); - let mut rng = ChaCha20Rng::seed_from_u64(cycle); - let num_parties = self.party_key_ids.len().try_into().unwrap(); - // Create the parties - self.signer_parties = self - .party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - self.num_keys, - self.threshold, - &mut rng, - ) - }) - .collect(); - self.poly_commitments = - match wsts::v2::test_helpers::dkg(&mut self.signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - self.aggregate_public_key = sig_aggregator.poly[0]; - self.cycle = cycle; - self.aggregate_public_key.clone() + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + self.aggregate_public_key = aggregate_public_key.clone(); + aggregate_public_key } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 722cfa541af..ea163730ecf 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -25,7 +25,8 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; use libstackerdb::StackerDBChunkData; -use rand::{thread_rng, RngCore}; +use rand::distributions::Standard; +use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; use rusqlite::{params, Connection}; use stacks_common::address::AddressHashMode; @@ -45,8 +46,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use stdext::prelude::Integer; use stx_genesis::GenesisData; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; use crate::burnchains::{BurnchainSigner, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::tests::make_fork_run; @@ -83,9 +82,9 @@ use crate::chainstate::stacks::db::{ }; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksTransaction, - StacksTransactionSigner, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use crate::core; use crate::core::{StacksEpochExtension, STACKS_EPOCH_3_0_MARKER}; @@ -2170,9 +2169,8 @@ fn parse_vote_for_aggregate_public_key_valid() { let signer_index = thread_rng().next_u64(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key.clone()).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2181,7 +2179,7 @@ fn parse_vote_for_aggregate_public_key_valid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2201,7 +2199,7 @@ fn parse_vote_for_aggregate_public_key_valid() { }; let params = NakamotoSigners::parse_vote_for_aggregate_public_key(&valid_tx).unwrap(); assert_eq!(params.signer_index, signer_index); - assert_eq!(params.aggregate_key, point); + assert_eq!(params.aggregate_key, aggregate_key); assert_eq!(params.voting_round, round); assert_eq!(params.reward_cycle, reward_cycle); } @@ -2217,10 +2215,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2229,7 +2225,7 @@ fn parse_vote_for_aggregate_public_key_invalid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2297,8 +2293,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2340,8 +2336,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2361,9 +2357,9 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2403,9 +2399,8 @@ fn valid_vote_transaction() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2414,7 +2409,7 @@ fn valid_vote_transaction() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2454,9 +2449,8 @@ fn valid_vote_transaction_malformed_transactions() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2465,7 +2459,7 @@ fn valid_vote_transaction_malformed_transactions() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2566,8 +2560,8 @@ fn valid_vote_transaction_malformed_transactions() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2609,8 +2603,8 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2630,9 +2624,9 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2689,9 +2683,8 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2700,7 +2693,7 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2818,9 +2811,8 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2829,7 +2821,7 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d23d608ec7d..e7d6fef03f9 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -35,8 +35,6 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::*; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 88ecc8887e2..8562449dd30 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -44,8 +44,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, }; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::{Address, Burnchain, PoxConstants}; @@ -1350,7 +1348,7 @@ impl StacksChainState { sortdb: &SortitionDB, block_id: &StacksBlockId, reward_cycle: u64, - ) -> Result, Error> { + ) -> Result>, Error> { let aggregate_public_key_opt = self .eval_boot_code_read_only( sortdb, @@ -1367,11 +1365,7 @@ impl StacksChainState { let aggregate_public_key = match aggregate_public_key_opt { Some(value) => { // A point should have 33 bytes exactly. - let data = value.expect_buff(33)?; - let msg = - "Pox-4 signers-voting get-approved-aggregate-key returned a corrupted value."; - let compressed_data = Compressed::try_from(data.as_slice()).expect(msg); - Some(Point::try_from(&compressed_data).expect(msg)) + Some(value.expect_buff(33)?) } None => None, }; @@ -2038,13 +2032,12 @@ pub mod test { key: &StacksPrivateKey, nonce: u64, signer_index: u128, - aggregate_public_key: &Point, + aggregate_public_key: Vec, round: u128, cycle: u128, ) -> StacksTransaction { - let aggregate_public_key_val = - Value::buff_from(aggregate_public_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key_val = Value::buff_from(aggregate_public_key) + .expect("Failed to serialize aggregate public key"); make_signers_vote_for_aggregate_public_key_value( key, nonce, @@ -2085,7 +2078,7 @@ pub mod test { peer: &mut TestPeer<'_>, latest_block_id: StacksBlockId, reward_cycle: u128, - ) -> Option { + ) -> Option> { let key_opt = readonly_call( peer, &latest_block_id, @@ -2095,11 +2088,7 @@ pub mod test { ) .expect_optional() .unwrap(); - key_opt.map(|key_value| { - let data = key_value.expect_buff(33).unwrap(); - let compressed_data = Compressed::try_from(data.as_slice()).unwrap(); - Point::try_from(&compressed_data).unwrap() - }) + key_opt.map(|key_value| key_value.expect_buff(33).unwrap()) } pub fn make_pox_2_increase( diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 8fee5bd5b39..d1cceae7cf8 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -44,7 +44,6 @@ use stacks_common::types::{Address, PrivateKey}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stdext::num::integer::Integer; -use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; @@ -7154,7 +7153,7 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7164,7 +7163,7 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7185,7 +7184,7 @@ fn test_scenario_one(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7542,7 +7541,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7552,7 +7551,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7562,7 +7561,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 3, next_reward_cycle, ); @@ -7572,7 +7571,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -8289,7 +8288,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8299,7 +8298,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8309,7 +8308,7 @@ fn test_scenario_four(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8334,7 +8333,7 @@ fn test_scenario_four(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8388,7 +8387,10 @@ fn test_scenario_four(use_nakamoto: bool) { let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.aggregate_public_key.clone().unwrap() + ); // Alice stack-extend err tx let alice_extend_err = make_pox_4_extend( @@ -8422,7 +8424,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, 7, ); @@ -9714,7 +9716,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9722,7 +9724,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9730,7 +9732,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9922,7 +9924,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9930,7 +9932,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9938,7 +9940,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 35c82f9b94e..127751abbbd 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -734,49 +734,6 @@ pub enum TenureChangeError { NotNakamoto, } -/// Schnorr threshold signature using types from `wsts` -#[derive(Debug, Clone, PartialEq)] -pub struct ThresholdSignature(pub wsts::common::Signature); -impl FromSql for ThresholdSignature { - fn column_result(value: ValueRef) -> FromSqlResult { - let hex_str = value.as_str()?; - let bytes = hex_bytes(&hex_str).map_err(|_| FromSqlError::InvalidType)?; - let ts = ThresholdSignature::consensus_deserialize(&mut &bytes[..]) - .map_err(|_| FromSqlError::InvalidType)?; - Ok(ts) - } -} - -impl fmt::Display for ThresholdSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - to_hex(&self.serialize_to_vec()).fmt(f) - } -} - -impl ToSql for ThresholdSignature { - fn to_sql(&self) -> rusqlite::Result { - let bytes = self.serialize_to_vec(); - let hex_str = to_hex(&bytes); - Ok(hex_str.into()) - } -} - -impl serde::Serialize for ThresholdSignature { - fn serialize(&self, s: S) -> Result { - let bytes = self.serialize_to_vec(); - s.serialize_str(&to_hex(&bytes)) - } -} - -impl<'de> serde::Deserialize<'de> for ThresholdSignature { - fn deserialize>(d: D) -> Result { - let hex_str = String::deserialize(d)?; - let bytes = hex_bytes(&hex_str).map_err(serde::de::Error::custom)?; - ThresholdSignature::consensus_deserialize(&mut bytes.as_slice()) - .map_err(serde::de::Error::custom) - } -} - /// A transaction from Stackers to signal new mining tenure #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TenureChangePayload { diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 2204f57a255..c45b212b689 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -28,9 +28,6 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; -use wsts::common::Signature as Secp256k1Signature; -use wsts::curve::point::{Compressed as Secp256k1Compressed, Point as Secp256k1Point}; -use wsts::curve::scalar::Scalar as Secp256k1Scalar; use crate::burnchains::Txid; use crate::chainstate::stacks::{TransactionPayloadID, *}; @@ -154,46 +151,6 @@ impl StacksMessageCodec for TenureChangeCause { } } -impl StacksMessageCodec for ThresholdSignature { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - let compressed = self.0.R.compress(); - let bytes = compressed.as_bytes(); - fd.write_all(bytes).map_err(CodecError::WriteError)?; - write_next(fd, &self.0.z.to_bytes())?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // Read curve point - let mut buf = [0u8; 33]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let R = Secp256k1Point::try_from(&Secp256k1Compressed::from(buf)) - .map_err(|_| CodecError::DeserializeError("Failed to read curve point".into()))?; - - // Read scalar - let mut buf = [0u8; 32]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let z = Secp256k1Scalar::from(buf); - - Ok(Self(Secp256k1Signature { R, z })) - } -} - -impl ThresholdSignature { - pub fn verify(&self, public_key: &Secp256k1Point, msg: &[u8]) -> bool { - self.0.verify(public_key, msg) - } - - /// Create an empty/null signature. This is not valid data, but it is used - /// as a placeholder in the header during mining. - pub fn empty() -> Self { - Self(Secp256k1Signature { - R: Secp256k1Point::G(), - z: Secp256k1Scalar::new(), - }) - } -} - impl StacksMessageCodec for TenureChangePayload { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.tenure_consensus_hash)?; diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index a2f4fe5dc52..132a03f34d0 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 5f03c3811aa..eb43d8aecd9 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -127,7 +127,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 80065dc0c6b..98f102969a1 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 54661a2f092..628243d53ed 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1749,7 +1749,6 @@ pub mod test { use stacks_common::util::secp256k1::*; use stacks_common::util::uint::*; use stacks_common::util::vrf::*; - use wsts::curve::point::Point; use {mio, rand}; use self::nakamoto::test_signers::TestSigners; @@ -2099,7 +2098,7 @@ pub mod test { pub services: u16, /// aggregate public key to use /// (NOTE: will be used post-Nakamoto) - pub aggregate_public_key: Option, + pub aggregate_public_key: Option>, pub test_stackers: Option>, pub test_signers: Option, } @@ -2457,11 +2456,8 @@ pub mod test { let mut receipts = vec![]; if let Some(agg_pub_key) = agg_pub_key_opt { - debug!( - "Setting aggregate public key to {}", - &to_hex(&agg_pub_key.compress().data) - ); - NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key); + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); } else { debug!("Not setting aggregate public key"); } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a6307b324b0..cc90d900110 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -36,9 +36,8 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, + CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::net::api::gettenureinfo::RPCGetTenureInfo; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a74cb0fd2cc..07227c930e4 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -35,7 +35,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c975bfebf96..3c238153ac0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -871,9 +871,8 @@ pub fn boot_to_epoch_3( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1026,9 +1025,8 @@ pub fn boot_to_pre_epoch_3_boundary( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1183,9 +1181,8 @@ fn signer_vote_if_needed( // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); for (i, signer_sk) in signer_sks.iter().enumerate() { let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; From f909a4559918fe568bf3299d484fa1114a331971 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:36:03 -0700 Subject: [PATCH 694/910] Remove wsts from libsigner Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/Cargo.toml | 1 - libsigner/src/events.rs | 6 ------ libsigner/src/tests/mod.rs | 1 - 4 files changed, 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e0f1ab61dd..fdac038acd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1926,7 +1926,6 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "wsts", ] [[package]] diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 7da9801674a..63241d32565 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -32,7 +32,6 @@ stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} thiserror = "1.0" tiny_http = "0.12" -wsts = { workspace = true } [dev-dependencies] mutants = "0.0.3" diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 70e7853d652..1de0e34f090 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -48,12 +48,6 @@ use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; -use wsts::common::Signature; -use wsts::net::{ - DkgBegin, DkgEnd, DkgEndBegin, DkgPrivateBegin, DkgPrivateShares, DkgPublicShares, DkgStatus, - Message, NonceRequest, NonceResponse, Packet, SignatureShareRequest, SignatureShareResponse, -}; -use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; use crate::EventError; diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index f0361592ba2..8ef6d38eeee 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -40,7 +40,6 @@ use stacks_common::codec::{ }; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; -use wsts::net::{DkgBegin, Packet}; use crate::events::{SignerEvent, SignerEventTrait}; use crate::v0::messages::{BlockRejection, SignerMessage}; From 27034e3d2ff6bf805a5ea042a7035cc0dd1865eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:38:24 -0700 Subject: [PATCH 695/910] Remove wsts from stacks-common and stacks-core Signed-off-by: Jacinta Ferrant --- Cargo.lock | 470 +--------------------------- Cargo.toml | 1 - stacks-common/Cargo.toml | 1 - stacks-common/src/util/secp256k1.rs | 82 ----- 4 files changed, 14 insertions(+), 540 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdac038acd0..850de299020 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -32,16 +32,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array 0.14.7", -] - [[package]] name = "aes" version = "0.6.0" @@ -50,18 +40,7 @@ checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" dependencies = [ "aes-soft", "aesni", - "cipher 0.2.5", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.4.4", - "cpufeatures", + "cipher", ] [[package]] @@ -70,25 +49,11 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead 0.5.2", - "aes 0.8.4", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] @@ -98,7 +63,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -108,7 +73,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -208,12 +173,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - [[package]] name = "ascii" version = "1.1.0" @@ -504,18 +463,6 @@ version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -550,33 +497,12 @@ dependencies = [ "tracing", ] -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bs58" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" -dependencies = [ - "tinyvec", -] - [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - [[package]] name = "byteorder" version = "1.5.0" @@ -645,16 +571,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clap" version = "2.34.0" @@ -763,7 +679,7 @@ version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ - "aes-gcm 0.8.0", + "aes-gcm", "base64 0.13.1", "hkdf", "hmac", @@ -875,12 +791,6 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-common" version = "0.1.6" @@ -888,7 +798,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", "typenum", ] @@ -929,16 +838,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" dependencies = [ - "cipher 0.2.5", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1213,18 +1113,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1256,12 +1144,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.30" @@ -1433,17 +1315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" dependencies = [ "opaque-debug", - "polyval 0.4.5", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -1497,7 +1369,6 @@ checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", - "serde", ] [[package]] @@ -1554,12 +1425,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hkdf" version = "0.10.0" @@ -1580,15 +1445,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "http" version = "0.2.11" @@ -1716,26 +1572,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "indexmap" version = "2.2.3" @@ -1752,15 +1588,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "instant" version = "0.1.12" @@ -2189,54 +2016,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "p256k1" -version = "7.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40a031a559eb38c35a14096f21c366254501a06d41c4b327d2a7515d713a5b7" -dependencies = [ - "bitvec", - "bs58 0.4.0", - "cc", - "hex", - "itertools", - "num-traits", - "primitive-types", - "proc-macro2", - "quote", - "rand_core 0.6.4", - "rustfmt-wrapper", - "serde", - "sha2 0.10.8", - "syn 2.0.48", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "parking" version = "2.2.0" @@ -2396,7 +2175,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27abb6e4638dcecc65a92b50d7f1d87dd6dea987ba71db987b6bf881f4877e9d" dependencies = [ "num-traits", - "serde", ] [[package]] @@ -2407,19 +2185,7 @@ checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" dependencies = [ "cpuid-bool", "opaque-debug", - "universal-hash 0.4.0", -] - -[[package]] -name = "polyval" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -2444,26 +2210,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2541,12 +2287,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.7.3" @@ -2886,12 +2626,6 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2919,19 +2653,6 @@ dependencies = [ "semver 1.0.21", ] -[[package]] -name = "rustfmt-wrapper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1adc9dfed5cc999077978cc7163b9282c5751c8d39827c4ea8c8c220ca5a440" -dependencies = [ - "serde", - "tempfile", - "thiserror", - "toml 0.8.10", - "toolchain_find", -] - [[package]] name = "rustix" version = "0.37.27" @@ -3137,15 +2858,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "serde_spanned" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" -dependencies = [ - "serde", -] - [[package]] name = "serde_stacker" version = "0.1.11" @@ -3394,7 +3106,6 @@ dependencies = [ "slog-term", "time 0.2.27", "winapi 0.3.9", - "wsts", ] [[package]] @@ -3431,7 +3142,7 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", @@ -3468,7 +3179,7 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", @@ -3533,12 +3244,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "stdext" version = "0.3.2" @@ -3663,24 +3368,6 @@ dependencies = [ "libc", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" -dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", - "rustix 0.38.31", - "windows-sys 0.52.0", -] - [[package]] name = "term" version = "0.7.0" @@ -3931,64 +3618,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.5", -] - -[[package]] -name = "toml_datetime" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.1", -] - -[[package]] -name = "toolchain_find" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc8c9a7f0a2966e1acdaf0461023d0b01471eeead645370cf4c3f5cff153f2a" -dependencies = [ - "home", - "once_cell", - "regex", - "semver 1.0.21", - "walkdir", -] - [[package]] name = "tower-service" version = "0.3.2" @@ -4094,18 +3723,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unicase" version = "2.7.0" @@ -4152,16 +3769,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "untrusted" version = "0.7.1" @@ -4556,24 +4163,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -4594,37 +4183,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "wsts" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" -dependencies = [ - "aes-gcm 0.10.3", - "bs58 0.5.0", - "hashbrown", - "hex", - "num-traits", - "p256k1", - "polynomial", - "primitive-types", - "rand_core 0.6.4", - "serde", - "sha2 0.10.8", - "thiserror", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - [[package]] name = "zerocopy" version = "0.7.32" diff --git a/Cargo.toml b/Cargo.toml index 8ac168f1f7e..2114e23dfca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" -wsts = { version = "9.0.0", default-features = false } rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } # Use a bit more than default optimization for diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 75692d83c6f..81b4326d4c1 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -31,7 +31,6 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true, optional = true } diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 034a5a4941a..c3b80acac5f 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -26,9 +26,6 @@ use secp256k1::{ use serde::de::{Deserialize, Error as de_Error}; use serde::ser::Error as ser_Error; use serde::Serialize; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use super::hash::Sha256Sum; use crate::impl_byte_array_message_codec; @@ -713,83 +710,4 @@ mod tests { runtime_verify - runtime_recover ); } - - /* - #[test] - fn test_schnorr_signature_serde() { - use wsts::traits::Aggregator; - - // Test that an empty conversion fails. - let empty_signature = SchnorrSignature::default(); - assert!(empty_signature.to_wsts_signature().is_none()); - - // Generate a random Signature and ensure it successfully converts - let mut rng = rand_core::OsRng::default(); - let msg = - "You Idiots! These Are Not Them! You\'ve Captured Their Stunt Doubles!".as_bytes(); - - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![0, 1, 2], vec![3, 4], vec![5, 6, 7], vec![8, 9]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signers: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let comms = match wsts::v2::test_helpers::dkg(&mut signers, &mut rng) { - Ok(comms) => comms, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let aggregate_public_key = comms - .iter() - .fold(Point::default(), |s, comm| s + comm.poly[0]); - - // signers [0,1,3] have "threshold" keys - { - let mut signers = [signers[0].clone(), signers[1].clone(), signers[3].clone()].to_vec(); - let mut sig_agg = wsts::v2::Aggregator::new(num_keys, threshold); - - sig_agg.init(comms.clone()).expect("aggregator init failed"); - - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg, &mut signers, &mut rng); - let original_signature = sig_agg - .sign(msg, &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - // Serialize the signature and verify the results - let schnorr_signature = SchnorrSignature::from(&original_signature); - assert_eq!( - schnorr_signature[..33], - original_signature.R.compress().data[..] - ); - assert_eq!(schnorr_signature[33..], original_signature.z.to_bytes()); - - // Deserialize the signature and verify the results - let reverted_signature = schnorr_signature - .to_wsts_signature() - .expect("Failed to convert schnorr signature to wsts signature"); - assert_eq!(reverted_signature.R, original_signature.R); - assert_eq!(reverted_signature.z, original_signature.z); - assert!(original_signature.verify(&aggregate_public_key, msg)); - assert!(reverted_signature.verify(&aggregate_public_key, msg)); - } - } - */ } From 40043eb11366a52086f39e5ca9c5bf462c44574b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:39:30 -0700 Subject: [PATCH 696/910] Fix comment on SignCoordinator in stacks-node Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index ee012984228..f570009be54 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -54,11 +54,8 @@ pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mute /// waking up to check timeouts? static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -/// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose -/// sole function is to serve as the coordinator for Nakamoto block signing. -/// This coordinator does not operate as a DKG coordinator. Rather, this struct -/// is used by Nakamoto miners to act as the coordinator for the blocks they -/// produce. +/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. +/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. pub struct SignCoordinator { receiver: Option>, message_key: StacksPrivateKey, From 1f53fac8051d3ec77ec065dedb930d2cef427c11 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:57:44 -0700 Subject: [PATCH 697/910] Do not blanket set pox_sync_sample_secs to a postiive integer Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 3 ++- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a5675072352..d144fbf17db 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -585,7 +585,7 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); conf.burnchain.poll_time_secs = 1; - conf.node.pox_sync_sample_secs = 5; + conf.node.pox_sync_sample_secs = 0; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -2159,6 +2159,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b70d9c39dcf..e7985bc2570 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3347,6 +3347,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 81a09c2b5be565e7e369200a461534e2e5104640 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 12:22:11 -0700 Subject: [PATCH 698/910] Add serde to hashrbown Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 + Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 850de299020..dc27c931ccb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1369,6 +1369,7 @@ checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2114e23dfca..10dc427e2e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } -hashbrown = "0.14.3" +hashbrown = { version = "0.14.3", features = ["serde"] } rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" From 677accded78509d0c72675ac13d203cf38ed54f1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 08:15:55 -0500 Subject: [PATCH 699/910] more test fixes --- .../src/tests/nakamoto_integrations.rs | 56 +++++++++++++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 32614902477..6b072a1f4da 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1188,6 +1188,10 @@ fn signer_vote_if_needed( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, ); + let epochs = btc_regtest_controller.get_stacks_epochs(); + let is_naka_epoch = epochs[StacksEpoch::find_epoch(&epochs, block_height).unwrap()] + .epoch_id + .uses_nakamoto_blocks(); if block_height >= prepare_phase_start { // If the key is already set, do nothing. @@ -1210,6 +1214,7 @@ fn signer_vote_if_needed( clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); + let mut expected_nonces = vec![]; for (i, signer_sk) in signer_sks.iter().enumerate() { let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; @@ -1228,8 +1233,19 @@ fn signer_vote_if_needed( clarity::vm::Value::UInt(reward_cycle as u128 + 1), ], ); + expected_nonces.push((to_addr(signer_sk), signer_nonce + 1)); submit_tx(&http_origin, &voting_tx); } + + if is_naka_epoch { + wait_for(30, || { + let all_bumped = expected_nonces.iter().all(|(addr, expected_nonce)| { + get_account(&http_origin, addr).nonce >= *expected_nonce + }); + Ok(all_bumped) + }) + .expect("Timed out waiting for an interim nakamoto block to process our transactions"); + } } } @@ -1465,7 +1481,7 @@ fn simple_neon_integration() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); @@ -1601,6 +1617,19 @@ fn simple_neon_integration() { ) .unwrap(); + wait_for(30, || { + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }); + Ok(transfer_tx_included) + }) + .expect("Timed out waiting for submitted transaction to be included in a block"); + // Mine 15 more nakamoto tenures for _i in 0..15 { next_block_and_mine_commit( @@ -2416,7 +2445,7 @@ fn correct_burn_outs() { epochs[epoch_30_ix].start_height = 225; } - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.initial_balances.clear(); let accounts: Vec<_> = (0..8) .map(|ix| { @@ -5183,6 +5212,15 @@ fn check_block_heights() { next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // in the first tenure, make sure that the contracts are published + if tenure_ix == 0 { + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + } + let heights1_value = call_read_only( &naka_conf, &sender_addr, @@ -5247,9 +5285,15 @@ fn check_block_heights() { .clone() .expect_u128() .unwrap(); + let expected_height = if tenure_ix == 0 { + // tenure 0 will include an interim block at this point because of the contract publish + // txs + last_stacks_block_height + 2 + } else { + last_stacks_block_height + 1 + }; assert_eq!( - sbh, - last_stacks_block_height + 1, + sbh, expected_height, "Stacks block heights should have incremented" ); last_stacks_block_height = sbh; @@ -5373,8 +5417,8 @@ fn check_block_heights() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert_eq!( tip.stacks_block_height, - block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), - "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + block_height_pre_3_0 + 1 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined 1 + (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465e..9bf79f9ce7b 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -266,7 +266,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); Ok(finished_signers.len() == self.spawned_signers.len()) - }).unwrap(); + }).expect("Timed out while waiting for the signers to be registered"); } pub fn wait_for_cycle(&mut self, timeout_secs: u64, reward_cycle: u64) { From 419ce4311f8e6d3705aaec86b35814c008ea064b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 11:04:58 -0500 Subject: [PATCH 700/910] more test fixes --- .../src/tests/nakamoto_integrations.rs | 75 ++++++++++++------- .../src/tests/neon_integrations.rs | 2 + 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6b072a1f4da..2bc0082fed2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5909,6 +5909,15 @@ fn clarity_burn_state() { }) .unwrap(); + // in the first tenure, make sure that the contracts are published + if tenure_ix == 0 { + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + } + let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); @@ -7100,14 +7109,24 @@ fn check_block_times() { contract3_name, contract_clarity3, ); - sender_nonce += 1; submit_tx(&http_origin, &contract_tx3); + sender_nonce += 1; + + // sleep to ensure seconds have changed + thread::sleep(Duration::from_secs(3)); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {:?}", info.stacks_tip_height); let last_stacks_block_height = info.stacks_tip_height as u128; let last_tenure_height = last_stacks_block_height as u128; @@ -7116,7 +7135,7 @@ fn check_block_times() { &sender_addr, contract0_name, "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time0 = time0_value .expect_optional() @@ -7130,7 +7149,7 @@ fn check_block_times() { &sender_addr, contract1_name, "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time1 = time1_value .expect_optional() @@ -7148,7 +7167,7 @@ fn check_block_times() { &sender_addr, contract3_name, "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height - 1)], + vec![&clarity::vm::Value::UInt(last_tenure_height - 2)], ); let time3_tenure = time3_tenure_value .expect_optional() @@ -7166,7 +7185,7 @@ fn check_block_times() { &sender_addr, contract3_name, "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time3_block = time3_block_value .expect_optional() @@ -7176,14 +7195,10 @@ fn check_block_times() { .unwrap(); // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); + thread::sleep(Duration::from_secs(2)); // Mine a Nakamoto block info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let transfer_tx = @@ -7191,19 +7206,15 @@ fn check_block_times() { sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for transfer to complete"); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {:?}", info.stacks_tip_height); let last_stacks_block_height = info.stacks_tip_height as u128; let time0a_value = call_read_only( @@ -7221,7 +7232,7 @@ fn check_block_times() { .unwrap(); assert!( time0a - time0 >= 1, - "get-block-info? time should have changed" + "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" ); let time1a_value = call_read_only( @@ -7598,9 +7609,19 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx3); + // sleep to ensure seconds have changed + thread::sleep(Duration::from_secs(3)); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; @@ -7610,7 +7631,7 @@ fn check_block_info() { &sender_addr, contract0_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; assert_block_info(&tuple0, &miner, &miner_spend); @@ -7620,7 +7641,7 @@ fn check_block_info() { &sender_addr, contract1_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple1 = result1.expect_tuple().unwrap().data_map; assert_eq!(tuple0, tuple1); @@ -7630,7 +7651,7 @@ fn check_block_info() { &sender_addr, contract3_name, "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; assert_eq!( @@ -7661,7 +7682,7 @@ fn check_block_info() { &sender_addr, contract3_name, "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; assert_eq!( diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dd299c8616..4ec3b311d41 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -929,6 +929,8 @@ pub fn call_read_only( let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); + info!("Call read only: {contract}.{function}({args:?})"); + let path = format!( "{http_origin}/v2/contracts/call-read/{}/{}/{}", principal, contract, function From 6a3746c9f6ca4edec8e9ce66efdbf074e87f5615 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 15:13:22 -0500 Subject: [PATCH 701/910] fix test assertion --- testnet/stacks-node/src/tests/signer/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 9bf79f9ce7b..551fc3faa02 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -727,7 +727,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { wait_for(timeout_secs, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events + let block_rejections: HashSet<_> = stackerdb_events .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter_map(|chunk| { @@ -739,7 +739,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest None, } }) - .collect::>(); - Ok(block_rejections.len() == expected_signers.len()) + .collect(); + info!("Checking block rejections"; "rejected_len" => block_rejections.len(), "expected_len" => expected_signers.len()); + Ok(block_rejections.len() >= expected_signers.len()) }) } } From cd49977a7383b85c45a317cb174eb1c2231ecbb4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 26 Sep 2024 14:39:50 -0700 Subject: [PATCH 702/910] Do not advance unless the bootstrapped or follower node also hits epoch 3 and shutdown the runloop Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 89 ++++++++++++++++++---- 1 file changed, 73 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e7985bc2570..2cbbf67a515 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1393,8 +1393,8 @@ fn multiple_miners() { config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - config.node.pox_sync_sample_secs = 5; config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1446,17 +1446,30 @@ fn multiple_miners() { ); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { naka_submitted_commits: rl2_commits, .. } = run_loop_2.counters(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -1553,6 +1566,12 @@ fn multiple_miners() { u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } @@ -1728,6 +1747,18 @@ fn miner_forking() { .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; naka_skip_commit_op.0.lock().unwrap().replace(false); @@ -3403,18 +3434,31 @@ fn multiple_miners_with_nakamoto_blocks() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { naka_submitted_commits: rl2_commits, naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -3536,7 +3580,12 @@ fn multiple_miners_with_nakamoto_blocks() { btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); - + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } @@ -3575,6 +3624,7 @@ fn partial_tenure_fork() { let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let localhost = "127.0.0.1"; // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -3586,11 +3636,12 @@ fn partial_tenure_fork() { signer_config.node_host = node_1_rpc_bind.clone(); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3620,11 +3671,10 @@ fn partial_tenure_fork() { let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -3646,6 +3696,8 @@ fn partial_tenure_fork() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let Counters { naka_mined_blocks: blocks_mined2, naka_proposed_blocks: blocks_proposed2, @@ -3653,7 +3705,7 @@ fn partial_tenure_fork() { } = run_loop_2.counters(); signer_test.boot_to_epoch_3(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); @@ -3918,7 +3970,12 @@ fn partial_tenure_fork() { .unwrap() .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); - + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } From 04c6c8aeacdddaa8bdeafdac3008188a88cff7fd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 07:46:19 -0700 Subject: [PATCH 703/910] Remove stale functions and config options left over from v1 signer and update the README file Signed-off-by: Jacinta Ferrant --- stacks-signer/README.md | 136 +++++---- stacks-signer/src/client/mod.rs | 94 ------- stacks-signer/src/client/stacks_client.rs | 303 +-------------------- stacks-signer/src/config.rs | 177 +----------- stacks-signer/src/main.rs | 2 +- stacks-signer/src/monitoring/mod.rs | 46 ---- stacks-signer/src/monitoring/prometheus.rs | 32 --- stacks-signer/src/runloop.rs | 7 - 8 files changed, 85 insertions(+), 712 deletions(-) diff --git a/stacks-signer/README.md b/stacks-signer/README.md index b3c287d9e31..6e9e0be760a 100644 --- a/stacks-signer/README.md +++ b/stacks-signer/README.md @@ -1,6 +1,6 @@ # stacks-signer: Stacks Signer CLI -stacks-signer is a command-line interface (CLI) for executing DKG (Distributed Key Generation) rounds, signing transactions and blocks, and more within the Stacks blockchain ecosystem. This tool provides various subcommands to interact with the StackerDB contract, perform cryptographic operations, and run a Stacks compliant signer. +stacks-signer is a command-line interface (CLI) for operating a Stacks compliant signer. This tool provides various subcommands to interact with the StackerDB contract, generate SIP voting and stacking signatures, and monitoring the Signer network for expected behaviour. ## Installation @@ -25,18 +25,92 @@ To use stacks-signer, you need to build and install the Rust program. You can do ./target/release/stacks-signer --help ``` +4. **Build with Prometheus Metrics Enabled**: You can optionally build and run the stacks-signer with monitoring metrics enabled. + + ```bash + cd stacks-signer + cargo build --release --features "monitoring_prom" + cargo run --features "monitoring_prom" -p stacks-signer run --config + ``` + +You must specify the "metrics_endpoint" option in the config file to serve these metrics. +See [metrics documentation](TODO) for a complete breakdown of the available metrics. + ## Usage The stacks-signer CLI provides the following subcommands: +### `run` + +Start the signer and handle requests to sign Stacks block proposals. + +```bash +./stacks-signer run --config + +``` + +### `monitor-signers` + +Periodically query the current reward cycle's signers' StackerDB slots to verify their operation. + +```bash +./stacks-signer monitor-signers --host --interval --max-age + +``` +- `--host`: The Stacks node to connect to. +- `--interval`: The polling interval in seconds for querying stackerDB. +- `--max-age`: The max age in seconds before a signer message is considered stale. + +### `generate-stacking-signature` + +Generate a signature for stacking. + +```bash +./stacks-signer generate-stacking-signature --config --pox-address

--reward-cycle --period --max-amount --auth-id + +``` +- `--config`: The path to the signer configuration file. +- `--pox-address`: The BTC address used to receive rewards +- `--reward-cycle`: The reward cycle during which this signature is used +- `--method`: Stacking metod that can be used +- `--period`: Number of cycles used as a lock period. Use `1` for stack-aggregation-commit method +- `--max-amount`: The max amount of uSTX that can be used in this unique transaction +- `--auth-id`: A unique identifier to prevent re-using this authorization +- `--json`: Output information in JSON format + +### `generate-vote` + +Generate a vote signature for a specific SIP + +```bash +./stacks-signer generate-vote --config --vote --sip + +``` +- `--config`: The path to the signer configuration file. +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + +### `verify-vote` + +Verify the validity of a vote signature for a specific SIP. + +```bash +./stacks-signer verify-vote --public-key --signature --vote --sip + +``` +- `--public-key`: The stacks public key to verify against in hexadecimal format +- `--signature`: The message signature in hexadecimal format +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + ### `get-chunk` Retrieve a chunk from the StackerDB instance. ```bash ./stacks-signer get-chunk --host --contract --slot_id --slot_version -``` +``` - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -49,7 +123,6 @@ Retrieve the latest chunk from the StackerDB instance. ```bash ./stacks-signer get-latest-chunk --host --contract --slot-id ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -71,7 +144,6 @@ Upload a chunk to the StackerDB instance. ```bash ./stacks-signer put-chunk --host --contract --private_key --slot-id --slot-version [--data ] ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--private_key`: The Stacks private key to use in hexademical format. @@ -79,64 +151,10 @@ Upload a chunk to the StackerDB instance. - `--slot-version`: The slot version to get. - `--data`: The data to upload. If you wish to pipe data using STDIN, use with '-'. -### `dkg` - -Run a distributed key generation round through stacker-db. - -```bash -./stacks-signer dkg --config -``` - -- `--config`: The path to the signer configuration file. - -### `dkg-sign` - -Run a distributed key generation round and sign a given message through stacker-db. - -```bash -./stacks-signer dkg-sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - - -### `dkg-sign` - -Sign a given message through stacker-db. - -```bash -./stacks-signer sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - -### `run` - -Start the signer and handle requests to sign messages and participate in DKG rounds via stacker-db. -```bash -./stacks-signer run --config -``` -- `--config`: The path to the signer configuration file. - -### `generate-files` - -Generate the necessary files to run a collection of signers to communicate via stacker-db. - -```bash -./stacks-signer generate-files --host --contract --num-signers --num-keys --network --dir -``` -- `--host`: The stacks node host to connect to. -- `--contract`: The contract ID of the StackerDB signer contract. -- `--num-signers`: The number of signers to generate configuration files for. -- `--num-keys`: The total number of key ids to distribute among the signers. -- `--private-keys:` A path to a file containing a list of hexadecimal representations of Stacks private keys. Required if `--num-keys` is not set. -- `--network`: The network to use. One of "mainnet" or "testnet". -- `--dir`: The directory to write files to. Defaults to the current directory. -- `--timeout`: Optional timeout in milliseconds to use when polling for updates in the StackerDB runloop. - ## Contributing To contribute to the stacks-signer project, please read the [Contributing Guidelines](../CONTRIBUTING.md). + ## License This program is open-source software released under the terms of the GNU General Public License (GPL). You should have received a copy of the GNU General Public License along with this program. \ No newline at end of file diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 081b5c07ab9..5e957a2166c 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -129,15 +129,12 @@ pub(crate) mod tests { use blockstack_lib::chainstate::stacks::boot::POX_4_NAME; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; - use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; - use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; - use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; use libsigner::SignerEntries; use rand::distributions::Standard; @@ -221,28 +218,6 @@ pub(crate) mod tests { ConsensusHash(hash) } - /// Build a response for the get_last_round request - pub fn build_get_last_round_response(round: u64) -> String { - let value = ClarityValue::some(ClarityValue::UInt(round as u128)) - .expect("Failed to create response"); - build_read_only_response(&value) - } - - /// Build a response for the get_account_nonce request - pub fn build_account_nonce_response(nonce: u64) -> String { - let account_nonce_entry = AccountEntryResponse { - nonce, - balance: "0x00000000000000000000000000000000".to_string(), - locked: "0x00000000000000000000000000000000".to_string(), - unlock_height: thread_rng().next_u64(), - balance_proof: None, - nonce_proof: None, - }; - let account_nonce_entry_json = serde_json::to_string(&account_nonce_entry) - .expect("Failed to serialize account nonce entry"); - format!("HTTP/1.1 200 OK\n\n{account_nonce_entry_json}") - } - /// Build a response to get_pox_data_with_retry where it returns a specific reward cycle id and block height pub fn build_get_pox_data_response( reward_cycle: Option, @@ -377,44 +352,6 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } - /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate - pub fn build_get_medium_estimated_fee_ustx_response( - medium_estimate: u64, - ) -> (String, RPCFeeEstimateResponse) { - // Generate some random info - let fee_response = RPCFeeEstimateResponse { - estimated_cost: ExecutionCost { - write_length: thread_rng().next_u64(), - write_count: thread_rng().next_u64(), - read_length: thread_rng().next_u64(), - read_count: thread_rng().next_u64(), - runtime: thread_rng().next_u64(), - }, - estimated_cost_scalar: thread_rng().next_u64(), - cost_scalar_change_by_byte: thread_rng().next_u32() as f64, - estimations: vec![ - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: medium_estimate, - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - ], - }; - let fee_response_json = serde_json::to_string(&fee_response) - .expect("Failed to serialize fee estimate response"); - ( - format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), - fee_response, - ) - } - /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config pub fn generate_signer_config(config: &GlobalConfig, num_signers: u32) -> SignerConfig { @@ -473,43 +410,12 @@ pub(crate) mod tests { stacks_private_key: config.stacks_private_key, node_host: config.node_host.to_string(), mainnet: config.network.is_mainnet(), - dkg_end_timeout: config.dkg_end_timeout, - dkg_private_timeout: config.dkg_private_timeout, - dkg_public_timeout: config.dkg_public_timeout, - nonce_timeout: config.nonce_timeout, - sign_timeout: config.sign_timeout, - tx_fee_ustx: config.tx_fee_ustx, - max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, } } - pub fn build_get_round_info_response(info: Option<(u64, u64)>) -> String { - let clarity_value = if let Some((vote_count, vote_weight)) = info { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ("votes-count".into(), ClarityValue::UInt(vote_count as u128)), - ( - "votes-weight".into(), - ClarityValue::UInt(vote_weight as u128), - ), - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - pub fn build_get_weight_threshold_response(threshold: u64) -> String { - let clarity_value = ClarityValue::UInt(threshold as u128); - build_read_only_response(&clarity_value) - } - pub fn build_get_tenure_tip_response(header_types: &StacksBlockHeaderTypes) -> String { let response_json = serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index dbe4f9094d8..f415896e86a 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -15,11 +15,8 @@ // along with this program. If not, see . use std::collections::{HashMap, VecDeque}; -use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_NAME, -}; +use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -37,9 +34,7 @@ use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersRespon use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; -use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; -use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; -use clarity::util::hash::to_hex; +use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use libsigner::v0::messages::PeerInfo; @@ -272,44 +267,6 @@ impl StacksClient { .collect()) } - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction - pub fn get_medium_estimated_fee_ustx( - &self, - tx: &StacksTransaction, - ) -> Result { - debug!("stacks_node_client: Getting estimated fee..."); - let request = FeeRateEstimateRequestBody { - estimated_len: Some(tx.tx_len()), - transaction_payload: to_hex(&tx.payload.serialize_to_vec()), - }; - let timer = - crate::monitoring::new_rpc_call_timer(&self.fees_transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.fees_transaction_path()) - .header("Content-Type", "application/json") - .json(&request) - .send() - .map_err(backoff::Error::transient) - }; - let response = retry_with_exponential_backoff(send_request)?; - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - timer.stop_and_record(); - let fee_estimate_response = response.json::()?; - let fee = fee_estimate_response - .estimations - .get(1) - .map(|estimate| estimate.fee) - .ok_or_else(|| { - ClientError::UnexpectedResponseFormat( - "RPCFeeEstimateResponse missing medium fee estimate".into(), - ) - })?; - Ok(fee) - } - /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { let pox_info = self.get_pox_data()?; @@ -371,52 +328,6 @@ impl StacksClient { Ok(()) } - /// Retrieve the current consumed weight for the given reward cycle and DKG round - pub fn get_round_vote_weight( - &self, - reward_cycle: u64, - round_id: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-round-info"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round_id as u128), - ]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - let Some(inner_data) = inner_data else { - return Ok(None); - }; - let round_info = inner_data.expect_tuple()?; - let votes_weight = round_info.get("votes-weight")?.to_owned().expect_u128()?; - Ok(Some(votes_weight)) - } - - /// Retrieve the weight threshold required to approve a DKG vote - pub fn get_vote_threshold_weight(&self, reward_cycle: u64) -> Result { - let function_name = ClarityName::from("get-threshold-weight"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - Ok(value.expect_u128()?) - } - - /// Retrieve the current account nonce for the provided address - pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - self.get_account_entry(address).map(|entry| entry.nonce) - } - /// Get information about the tenures between `chosen_parent` and `last_sortition` pub fn get_tenure_forking_info( &self, @@ -545,33 +456,6 @@ impl StacksClient { Ok(peer_info_data) } - /// Retrieve the last DKG vote round number for the current reward cycle - pub fn get_last_round(&self, reward_cycle: u64) -> Result, ClientError> { - debug!("Getting the last DKG vote round of reward cycle {reward_cycle}..."); - let contract_addr = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from("get-last-round"); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let opt_value = self - .read_only_contract_call( - &contract_addr, - &contract_name, - &function_name, - function_args, - )? - .expect_optional()?; - let round = if let Some(value) = opt_value { - Some(u64::try_from(value.expect_u128()?).map_err(|e| { - ClientError::MalformedContractData(format!( - "Failed to convert vote round to u64: {e}" - )) - })?) - } else { - None - }; - Ok(round) - } - /// Get the reward set signers from the stacks node for the given reward cycle pub fn get_reward_set_signers( &self, @@ -711,34 +595,6 @@ impl StacksClient { Ok(post_block_resp.accepted) } - /// Helper function to submit a transaction to the Stacks mempool - pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { - let txid = tx.txid(); - let tx = tx.serialize_to_vec(); - debug!("stacks_node_client: Submitting transaction to the stacks node..."; - "txid" => %txid, - ); - let timer = - crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.transaction_path()) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .map_err(|e| { - debug!("Failed to submit transaction to the Stacks node: {e:?}"); - backoff::Error::transient(e) - }) - }; - let response = retry_with_exponential_backoff(send_request)?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - Ok(txid) - } - /// Makes a read only contract call to a stacks contract pub fn read_only_contract_call( &self, @@ -794,10 +650,6 @@ impl StacksClient { format!("{}/v2/pox", self.http_origin) } - fn transaction_path(&self) -> String { - format!("{}/v2/transactions", self.http_origin) - } - fn read_only_path( &self, contract_addr: &StacksAddress, @@ -839,10 +691,6 @@ impl StacksClient { format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } - fn fees_transaction_path(&self) -> String { - format!("{}/v2/fees/transaction", self.http_origin) - } - fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) } @@ -905,7 +753,6 @@ impl StacksClient { #[cfg(test)] mod tests { use std::collections::BTreeMap; - use std::io::{BufWriter, Write}; use std::thread::spawn; use blockstack_lib::burnchains::Address; @@ -924,15 +771,13 @@ mod tests { use rand::thread_rng; use rand_core::RngCore; use stacks_common::bitvec::BitVec; - use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; + use stacks_common::consts::SIGNER_SLOTS_PER_USER; use super::*; use crate::client::tests::{ - build_account_nonce_response, build_get_last_round_response, - build_get_last_set_cycle_response, build_get_medium_estimated_fee_ustx_response, - build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_tenure_tip_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_last_set_cycle_response, build_get_peer_info_response, + build_get_pox_data_response, build_get_tenure_tip_response, build_read_only_response, + write_response, MockServerClient, }; #[test] @@ -1061,58 +906,6 @@ mod tests { assert!(matches!(res, Err(ClientError::ReqwestError(_)))); } - #[test] - fn transaction_contract_call_should_send_bytes_to_node() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - - let mut tx_bytes = [0u8; 1024]; - { - let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); - tx.consensus_serialize(&mut tx_bytes_writer).unwrap(); - tx_bytes_writer.flush().unwrap(); - } - - let bytes_len = tx_bytes - .iter() - .enumerate() - .rev() - .find(|(_, &x)| x != 0) - .unwrap() - .0 - + 1; - - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - let request_bytes = write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - assert!( - request_bytes - .windows(bytes_len) - .any(|window| window == &tx_bytes[..bytes_len]), - "Request bytes did not contain the transaction bytes" - ); - } - #[test] fn core_info_call_for_burn_block_height_should_succeed() { let mock = MockServerClient::new(); @@ -1134,29 +927,6 @@ mod tests { assert!(h.join().unwrap().is_err()); } - #[test] - fn get_account_nonce_should_succeed() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - let nonce = thread_rng().next_u64(); - write_response(mock.server, build_account_nonce_response(nonce).as_bytes()); - let returned_nonce = h.join().unwrap().expect("Failed to deserialize response"); - assert_eq!(returned_nonce, nonce); - } - - #[test] - fn get_account_nonce_should_fail() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - write_response( - mock.server, - b"HTTP/1.1 200 OK\n\n{\"nonce\":\"invalid nonce\",\"balance\":\"0x00000000000000000000000000000000\",\"locked\":\"0x00000000000000000000000000000000\",\"unlock_height\":0}" - ); - assert!(h.join().unwrap().is_err()); - } - #[test] fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); @@ -1361,17 +1131,6 @@ mod tests { assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } - #[test] - fn get_last_round_should_succeed() { - let mock = MockServerClient::new(); - let round = rand::thread_rng().next_u64(); - let response = build_get_last_round_response(round); - let h = spawn(move || mock.client.get_last_round(0)); - - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap().unwrap(), round); - } - #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); @@ -1403,56 +1162,6 @@ mod tests { assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } - #[test] - fn get_round_vote_weight_should_succeed() { - let mock = MockServerClient::new(); - let vote_count = rand::thread_rng().next_u64(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_round_info_response(Some((vote_count, weight))); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(weight as u128)); - - let mock = MockServerClient::new(); - let round_response = build_get_round_info_response(None); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - - #[test] - fn get_vote_threshold_weight_should_succeed() { - let mock = MockServerClient::new(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_weight_threshold_response(weight); - let h = spawn(move || mock.client.get_vote_threshold_weight(0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), weight as u128); - } - - #[test] - fn get_medium_estimated_fee_ustx_should_succeed() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let estimate = thread_rng().next_u64(); - let response = build_get_medium_estimated_fee_ustx_response(estimate).0; - let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), estimate); - } - #[test] fn get_tenure_tip_should_succeed() { let mock = MockServerClient::new(); diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 802c362b86c..843645945ba 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,8 +35,6 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; -// Default transaction fee to use in microstacks (if unspecificed in the config file) -const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -116,7 +114,7 @@ impl Network { pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer to be used in DKG and Sign rounds + /// The signer ID assigned to this signer (may be different from signer_slot_id) pub signer_id: u32, /// The signer stackerdb slot id (may be different from signer_id) pub signer_slot_id: SignerSlotID, @@ -130,20 +128,6 @@ pub struct SignerConfig { pub node_host: String, /// Whether this signer is running on mainnet or not pub mainnet: bool, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// If set, will use the estimated fee up to this amount. - pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, /// How much time must pass between the first block proposal in a tenure and the next bitcoin block @@ -168,20 +152,6 @@ pub struct GlobalConfig { pub network: Network, /// The time to wait for a response from the stacker-db instance pub event_timeout: Duration, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// the max STX tx fee to use in uSTX when estimating fees - pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file @@ -209,21 +179,6 @@ struct RawConfigFile { pub network: Network, /// The time to wait (in millisecs) for a response from the stacker-db instance pub event_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPublicShares messages - pub dkg_public_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPrivateShares messages - pub dkg_private_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgEnd messages - pub dkg_end_timeout_ms: Option, - /// timeout in (millisecs) to gather nonces - pub nonce_timeout_ms: Option, - /// timeout in (millisecs) to gather signature shares - pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX - pub tx_fee_ustx: Option, - /// the max STX tx fee to use in uSTX when estimating fees. - /// If not set, will use tx_fee_ustx. - pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file or :memory: for an in-memory database @@ -293,11 +248,6 @@ impl TryFrom for GlobalConfig { StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); - let dkg_end_timeout = raw_data.dkg_end_timeout_ms.map(Duration::from_millis); - let dkg_public_timeout = raw_data.dkg_public_timeout_ms.map(Duration::from_millis); - let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); - let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); - let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); let first_proposal_burn_block_timing = Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); let db_path = raw_data.db_path.into(); @@ -328,13 +278,6 @@ impl TryFrom for GlobalConfig { stacks_address, network: raw_data.network, event_timeout, - dkg_end_timeout, - dkg_public_timeout, - dkg_private_timeout, - nonce_timeout, - sign_timeout, - tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), - max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, db_path, metrics_endpoint, @@ -366,10 +309,6 @@ impl GlobalConfig { /// Return a string with non-sensitive configuration /// information for logging purposes pub fn config_to_log_string(&self) -> String { - let tx_fee = match self.tx_fee_ustx { - 0 => "default".to_string(), - _ => (self.tx_fee_ustx as f64 / 1_000_000.0).to_string(), - }; let metrics_endpoint = match &self.metrics_endpoint { Some(endpoint) => endpoint.to_string(), None => "None".to_string(), @@ -382,7 +321,6 @@ Stacks address: {stacks_address} Public key: {public_key} Network: {network} Database path: {db_path} -DKG transaction fee: {tx_fee} uSTX Metrics endpoint: {metrics_endpoint} "#, node_host = self.node_host, @@ -393,7 +331,6 @@ Metrics endpoint: {metrics_endpoint} ), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), - tx_fee = tx_fee, metrics_endpoint = metrics_endpoint, ) } @@ -527,119 +464,9 @@ mod tests { RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); assert_eq!(config.auth_password, "melon"); - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); } - #[test] - fn fee_options_should_deserialize_correctly() { - let pk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let node_host = "localhost"; - let network = Network::Testnet; - let password = "melon"; - - // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test both max_tx_fee_ustx and tx_fee_ustx are specified - let max_tx_fee_ustx = Some(1000); - let tx_fee_ustx = Some(2000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - // Test only max_tx_fee_ustx is specified - let max_tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test only tx_fee_ustx is specified - let tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); - } - #[test] fn test_config_to_string() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); @@ -652,7 +479,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; @@ -663,7 +489,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 5b118db646f..520d4552584 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -1,4 +1,4 @@ -//! # stacks-signer: Stacks signer binary for executing DKG rounds, signing transactions and blocks, and more. +//! # stacks-signer: Stacks signer binary for signing block proposals, interacting with stackerdb, and more. //! //! Usage documentation can be found in the [README]("https://github.com/blockstack/stacks-blockchain/stacks-signer/README.md). //! diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 0ecc99b5f85..e03b03d47af 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -71,52 +71,6 @@ pub fn increment_block_responses_sent(accepted: bool) { } } -/// Increment the signer inbound messages counter -#[allow(unused_variables)] -pub fn increment_signer_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::SIGNER_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the coordinator inbound messages counter -#[allow(unused_variables)] -pub fn increment_coordinator_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::COORDINATOR_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the number of inbound packets received -#[allow(unused_variables)] -pub fn increment_inbound_packets(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::INBOUND_PACKETS_RECEIVED.inc_by(amount); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_commands_processed(command_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::COMMANDS_PROCESSED - .with_label_values(&[command_type]) - .inc(); -} - -/// Increment the number of DKG votes submitted -#[allow(unused_variables)] -pub fn increment_dkg_votes_submitted() { - #[cfg(feature = "monitoring_prom")] - prometheus::DGK_VOTES_SUBMITTED.inc(); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_operation_results(operation_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::OPERATION_RESULTS - .with_label_values(&[operation_type]) - .inc(); -} - /// Increment the number of block proposals received #[allow(unused_variables)] pub fn increment_block_proposals_received() { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index c78db1299d7..247a9f00f50 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -39,38 +39,6 @@ lazy_static! { &["response_type"] ) .unwrap(); - pub static ref SIGNER_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_messages", - "The number of inbound messages received by the signer" - )) - .unwrap(); - pub static ref COORDINATOR_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_coordinator_inbound_messages", - "The number of inbound messages received as a coordinator" - )) - .unwrap(); - pub static ref INBOUND_PACKETS_RECEIVED: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_packets_received", - "The number of inbound packets received by the signer" - )) - .unwrap(); - pub static ref COMMANDS_PROCESSED: IntCounterVec = register_int_counter_vec!( - "stacks_signer_commands_processed", - "The number of commands processed by the signer", - &["command_type"] - ) - .unwrap(); - pub static ref DGK_VOTES_SUBMITTED: IntCounter = register_int_counter!(opts!( - "stacks_signer_dgk_votes_submitted", - "The number of DGK votes submitted by the signer" - )) - .unwrap(); - pub static ref OPERATION_RESULTS: IntCounterVec = register_int_counter_vec!( - "stacks_signer_operation_results_dkg", - "The number of DKG operation results", - &["operation_type"] - ) - .unwrap(); pub static ref BLOCK_PROPOSALS_RECEIVED: IntCounter = register_int_counter!(opts!( "stacks_signer_block_proposals_received", "The number of block proposals received by the signer" diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 855957a70a9..a0e2b739e92 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -281,13 +281,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), mainnet: self.config.network.is_mainnet(), - dkg_end_timeout: self.config.dkg_end_timeout, - dkg_private_timeout: self.config.dkg_private_timeout, - dkg_public_timeout: self.config.dkg_public_timeout, - nonce_timeout: self.config.nonce_timeout, - sign_timeout: self.config.sign_timeout, - tx_fee_ustx: self.config.tx_fee_ustx, - max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, })) From ff580187afde2ade7c4d15b523ddb68fc1bf0e7c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 09:42:40 -0700 Subject: [PATCH 704/910] Do not leak a private key that has a small typo Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d5..0ae5be2a222 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -288,21 +288,11 @@ impl TryFrom for GlobalConfig { ConfigError::BadField("endpoint".to_string(), raw_data.endpoint.clone()) })?; - let stacks_private_key = - StacksPrivateKey::from_hex(&raw_data.stacks_private_key).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; + let stacks_private_key = StacksPrivateKey::from_hex(&raw_data.stacks_private_key) + .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.into()))?; - let ecdsa_private_key = - Scalar::try_from(&stacks_private_key.to_bytes()[..32]).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; + let ecdsa_private_key = Scalar::try_from(&stacks_private_key.to_bytes()[..32]) + .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.to_string()))?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); let stacks_address = From c60e4a63c83c95646b1a7aa6a8d4191a62b8e821 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 10:22:05 -0700 Subject: [PATCH 705/910] Test: try increasing a timeout to see what CI does Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d77ea1ef111..4d55c4d0ce2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3693,7 +3693,7 @@ fn partial_tenure_fork() { let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - wait_for(120, || { + wait_for(200, || { let Some(node_1_info) = get_chain_info_opt(&conf) else { return Ok(false); }; From 0feddea3c7085f4479eac77c4afb77b99adcd3a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 27 Sep 2024 13:29:27 -0500 Subject: [PATCH 706/910] fix: /v3/sortitions over optimistic in finding last sortition --- stackslib/src/net/api/getsortition.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 7b594530c26..ccfa4efec94 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -182,7 +182,11 @@ impl GetSortitionHandler { // try to figure out what the last snapshot in this fork was with a successful // sortition. // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if stacks_parent_sn.sortition { + // if the sortition count incremented by exactly 1 between us and our **stacks** parent, + // then the stacks parent's sortition *must* be the last one with a winner. + let sortitions_incremented_by_1 = + sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1; + let last_sortition_ch = if sortitions_incremented_by_1 { stacks_parent_sn.consensus_hash.clone() } else { // we actually need to perform the marf lookup From 8809f919f2de4cc5cc9285b380fd61320bca03d2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 27 Sep 2024 13:32:29 -0500 Subject: [PATCH 707/910] fix: tenure extend logic. only include tx in first block after extension --- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19d..5e3f72ee20e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1260,6 +1260,12 @@ impl BlockMinerThread { tenure_change_tx: None, }); }; + if self.last_block_mined.is_some() { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { @@ -1289,10 +1295,10 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; - "burn_view_consensus_hash" => %burn_view_consensus_hash, - "parent_block_id" => %parent_block_id, - "num_blocks_so_far" => num_blocks_so_far, + info!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, ); payload = payload.extend( *burn_view_consensus_hash, From a5b5f29fa9147aa23226b4169973690129a8c2b6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:23:02 -0400 Subject: [PATCH 708/910] feat: helper method to get the tenure ID for a block --- stackslib/src/chainstate/nakamoto/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e97fefafffb..b7e95ff14ba 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2534,6 +2534,18 @@ impl NakamotoChainState { Ok(result) } + /// Load a consensus hash for a Nakamoto header + pub fn get_block_header_nakamoto_tenure_id( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT consensus_hash FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + Ok(result) + } + /// Load an epoch2 header pub fn get_block_header_epoch2( chainstate_conn: &Connection, From 475ed44047f94cc7bcef898598bd0907ba4af7c8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:23:31 -0400 Subject: [PATCH 709/910] feat: tool for generating invs for nakamoto testnet --- stackslib/src/main.rs | 58 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 029ee16c6bb..52d481affb4 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -63,9 +63,10 @@ use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; use blockstack_lib::net::db::LocalPeer; +use blockstack_lib::net::inv::nakamoto::InvGenerator; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::StacksMessage; +use blockstack_lib::net::{NakamotoInvData, StacksMessage}; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, cli}; @@ -974,6 +975,61 @@ simulating a miner. process::exit(1); } + if argv[1] == "get-tenure-inv" { + let chainstate_root_path = &argv[2]; + let tip_block_ids = &argv[3..]; + let chainstate_path = format!("{}/chainstate", &chainstate_root_path); + let sortition_path = format!("{}/burnchain/sortition", &chainstate_root_path); + + let (chainstate, _) = + StacksChainState::open(false, 0x80000000, &chainstate_path, None).unwrap(); + let pox_consts = + PoxConstants::new(900, 100, 80, 0, 0, u64::MAX, u64::MAX, 240, 241, 242, 242); + let sortition_db = SortitionDB::open(&sortition_path, true, pox_consts).unwrap(); + + let mut invgen = InvGenerator::new(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortition_db.conn()).unwrap(); + + for tip_block_id in tip_block_ids.iter() { + let tip_block_id = StacksBlockId::from_hex(tip_block_id).unwrap(); + let header = + NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip_block_id) + .unwrap() + .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus( + sortition_db.conn(), + &header.consensus_hash, + ) + .unwrap() + .unwrap(); + + let reward_cycle = sortition_db + .pox_constants + .block_height_to_reward_cycle(230, sn.block_height) + .unwrap(); + + let bitvec_bools = invgen + .make_tenure_bitvector( + &tip, + &sortition_db, + &chainstate, + &header.consensus_hash, + &header.anchored_header.block_hash(), + reward_cycle, + ) + .unwrap(); + let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools) + .map_err(|e| { + warn!("Failed to create a NakamotoInv response: {:?}", &e); + e + }) + .unwrap(); + + println!("{}: {:?}", tip_block_id, &nakamoto_inv); + } + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); From d70c223eff2632cc50e3a52e1935935d030717f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:24:17 -0400 Subject: [PATCH 710/910] fix: when advancing the tip height, invalidate all cached inventory state between (and including) the new and old tips, since cached data from the old tip will have stored a negative cache result that ought now to be positive in the context of the new tip. --- stackslib/src/net/inv/nakamoto.rs | 66 +++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3b7a5050bab..08cd795ad21 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -17,7 +17,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use stacks_common::bitvec::BitVec; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::StacksEpochId; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -155,16 +155,34 @@ impl InvGenerator { &self, chainstate: &StacksChainState, tip_block_id: &StacksBlockId, - ) -> Result, NetError> { + ) -> Result)>, NetError> { let mut cursor = tip_block_id.clone(); + let mut chs = vec![]; + let Some(ch) = + NakamotoChainState::get_block_header_nakamoto_tenure_id(chainstate.db(), &cursor)? + else { + return Ok(None); + }; + chs.push(ch); for _ in 0..self.tip_ancestor_search_depth { let parent_id_opt = NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor)?; + let Some(parent_id) = parent_id_opt else { return Ok(None); }; + + let Some(parent_ch) = NakamotoChainState::get_block_header_nakamoto_tenure_id( + chainstate.db(), + &parent_id, + )? + else { + return Ok(None); + }; + chs.push(parent_ch); + if self.processed_tenures.contains_key(&parent_id) { - return Ok(Some(parent_id)); + return Ok(Some((parent_id, chs))); } cursor = parent_id; } @@ -188,32 +206,40 @@ impl InvGenerator { pub(crate) fn get_processed_tenure( &mut self, chainstate: &StacksChainState, - tip_block_id: &StacksBlockId, + tip_block_ch: &ConsensusHash, + tip_block_bh: &BlockHeaderHash, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - if self.processed_tenures.get(tip_block_id).is_none() { + let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh); + if self.processed_tenures.get(&tip_block_id).is_none() { // this tip has no known table. // does it have an ancestor with a table? If so, then move its ancestor's table to this // tip. Otherwise, make a new table. - if let Some(ancestor_tip_id) = - self.find_ancestor_processed_tenures(chainstate, tip_block_id)? + if let Some((ancestor_tip_id, intermediate_tenures)) = + self.find_ancestor_processed_tenures(chainstate, &tip_block_id)? { - let ancestor_tenures = self + let mut ancestor_tenures = self .processed_tenures .remove(&ancestor_tip_id) .unwrap_or_else(|| { panic!("FATAL: did not have ancestor tip reported by search"); }); + for ch in intermediate_tenures.into_iter() { + ancestor_tenures.remove(&ch); + } + ancestor_tenures.remove(tip_block_ch); + self.processed_tenures .insert(tip_block_id.clone(), ancestor_tenures); } else { self.processed_tenures .insert(tip_block_id.clone(), HashMap::new()); } + } else { } - let Some(tenure_infos) = self.processed_tenures.get_mut(tip_block_id) else { + let Some(tenure_infos) = self.processed_tenures.get_mut(&tip_block_id) else { unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); }; @@ -224,9 +250,9 @@ impl InvGenerator { } else { // we have not loaded the tenure info for this tip, so go get it let loaded_info_opt = - InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; - tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); + InvTenureInfo::load(chainstate, &tip_block_id, &tenure_id_consensus_hash)?; + tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); self.cache_misses = self.cache_misses.saturating_add(1); return Ok(loaded_info_opt); } @@ -269,9 +295,11 @@ impl InvGenerator { tip: &BlockSnapshot, sortdb: &SortitionDB, chainstate: &StacksChainState, - nakamoto_tip: &StacksBlockId, + nakamoto_tip_ch: &ConsensusHash, + nakamoto_tip_bh: &BlockHeaderHash, reward_cycle: u64, ) -> Result, NetError> { + let nakamoto_tip = StacksBlockId::new(nakamoto_tip_ch, nakamoto_tip_bh); let ih = sortdb.index_handle(&tip.sortition_id); // N.B. reward_cycle_to_block_height starts at reward index 1 @@ -290,8 +318,12 @@ impl InvGenerator { let mut cur_height = reward_cycle_end_tip.block_height; let mut cur_consensus_hash = reward_cycle_end_tip.consensus_hash; - let mut cur_tenure_opt = - self.get_processed_tenure(chainstate, &nakamoto_tip, &cur_consensus_hash)?; + let mut cur_tenure_opt = self.get_processed_tenure( + chainstate, + nakamoto_tip_ch, + nakamoto_tip_bh, + &cur_consensus_hash, + )?; // loop variables and invariants: // @@ -342,7 +374,8 @@ impl InvGenerator { tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, - &nakamoto_tip, + nakamoto_tip_ch, + nakamoto_tip_bh, &cur_tenure_info.parent_tenure_id_consensus_hash, )?; } else { @@ -363,7 +396,8 @@ impl InvGenerator { tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, - &nakamoto_tip, + nakamoto_tip_ch, + nakamoto_tip_bh, &parent_sortition_consensus_hash, )?; } From 61713ca932aa44bba35c81d72ad7255bf2763372 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:26:59 -0400 Subject: [PATCH 711/910] chore: API sync --- stackslib/src/net/chat.rs | 3 +- stackslib/src/net/tests/inv/nakamoto.rs | 260 +++++++++++++++++++++--- 2 files changed, 230 insertions(+), 33 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index ba0b70b1a5a..1cb9c76dbe4 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1744,7 +1744,8 @@ impl ConversationP2P { &tip, sortdb, chainstate, - &network.stacks_tip.block_id(), + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, reward_cycle, )?; let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 2f600272070..951ddefe72d 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -172,7 +172,8 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -183,7 +184,14 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // check the reward cycles for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -234,7 +242,8 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -244,7 +253,14 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -287,7 +303,8 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -297,7 +314,14 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!("At reward cycle {}: {:?}", rc, &bitvec); @@ -1119,9 +1143,13 @@ fn test_nakamoto_make_tenure_inv_in_forks() { .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) .unwrap(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let naka_tip = peer.network.stacks_tip.block_id(); let first_naka_tip = naka_tip.clone(); let first_sort_tip = sort_tip.clone(); + let first_naka_tip_ch = naka_tip_ch.clone(); + let first_naka_tip_bh = naka_tip_bh.clone(); // find the first block in this tenure let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) @@ -1143,7 +1171,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 0); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1152,7 +1187,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1161,7 +1203,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1173,7 +1222,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 13); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1202,6 +1258,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_rc = sortdb .pox_constants @@ -1209,7 +1267,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { .unwrap(); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1228,6 +1293,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); // // ---------------------- the inv generator can track multiple forks at once ---------------------- @@ -1255,7 +1322,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load inv off of the canonical tip. // It should show a missed sortition. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1276,7 +1350,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &sort_tip, &sortdb, &chainstate, - &fork_naka_block.block_id(), + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), tip_rc, ) .unwrap(); @@ -1306,6 +1381,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let new_naka_tip = peer.network.stacks_tip.block_id(); + let new_naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let new_naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_rc = sortdb .pox_constants @@ -1319,7 +1396,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // It should show two missed sortitions, for each fork. // only one additional cache miss let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1341,7 +1425,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &sort_tip, &sortdb, &chainstate, - &fork_naka_block.block_id(), + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), tip_rc, ) .unwrap(); @@ -1363,7 +1448,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load inv off of the canonical tip again. // It should show two missed sortitions. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1397,6 +1489,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.mine_nakamoto_on(vec![naka_block.clone()]); } let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); // new inv generator with a search depth of 3 @@ -1408,7 +1502,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &first_sort_tip, &sortdb, &chainstate, - &first_naka_tip, + &first_naka_tip_ch, + &first_naka_tip_bh, tip_rc, ) .unwrap(); @@ -1417,7 +1512,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load a descendant that is 6 blocks higher let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); assert_eq!( bits, @@ -1436,7 +1538,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &first_sort_tip, &sortdb, &chainstate, - &first_naka_tip, + &first_naka_tip_ch, + &first_naka_tip_bh, tip_rc, ) .unwrap(); @@ -1445,7 +1548,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load a descendant that is 6 blocks higher let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); assert_eq!( bits, @@ -1508,6 +1618,8 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { .unwrap(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let first_naka_tip = naka_tip.clone(); let first_sort_tip = sort_tip.clone(); @@ -1531,7 +1643,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 0); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1540,7 +1659,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1552,7 +1678,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 13); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1564,7 +1697,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 17); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1576,7 +1716,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 23); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1588,7 +1735,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 27); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1601,7 +1755,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { // load them all again. cache misses should remain the same. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1610,7 +1771,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1622,7 +1790,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1634,7 +1809,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1646,7 +1828,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1658,7 +1847,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); From 95985651f33c3996c3be337570d1b20749ae5772 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 30 Sep 2024 15:30:30 +0300 Subject: [PATCH 712/910] add Nakamoto block heights to info logs for easier integration with log analysis tools --- stacks-signer/src/chainstate.rs | 5 +++++ stacks-signer/src/v0/signer.rs | 2 ++ 2 files changed, 7 insertions(+) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4bbb9741a54..2f9249ae89a 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -194,6 +194,7 @@ impl SortitionsView { { info!( "Current miner timed out, marking as invalid."; + "block_height" => block.header.chain_length, "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -202,6 +203,7 @@ impl SortitionsView { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { info!( "Last miner timed out, marking as invalid."; + "block_height" => block.header.chain_length, "last_sortition_consensus_hash" => ?last_sortition.consensus_hash, ); last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -347,6 +349,7 @@ impl SortitionsView { "sortition_state.consensus_hash" => %sortition_state.consensus_hash, "sortition_state.prior_sortition" => %sortition_state.prior_sortition, "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, + "block_height" => block.header.chain_length, ); let tenures_reorged = client.get_tenure_forking_info( @@ -406,6 +409,7 @@ impl SortitionsView { "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_height" => block.header.chain_length, "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -578,6 +582,7 @@ impl SortitionsView { "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_height" => block.header.chain_length, ); return Ok(true); }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fa34cc4b429..510ae26e39f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -197,6 +197,7 @@ impl SignerTrait for Signer { info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), + "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); loop { @@ -408,6 +409,7 @@ impl Signer { "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); From 5327a59431359294fdbe142a330f9324c2fca893 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 09:38:59 -0500 Subject: [PATCH 713/910] chore: fix merge artifacts --- stackslib/src/net/relay.rs | 2 +- stackslib/src/net/stackerdb/sync.rs | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 9f8bbdb0119..575e96138ea 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -931,7 +931,7 @@ impl Relayer { &block.header.consensus_hash, &block.header.block_hash(), &obtained_method; - "block_id" => &block.header.block_id(), + "block_id" => %block.header.block_id(), ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 6a46c196569..08e6e978eab 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -568,7 +568,6 @@ impl StackerDBSync { old_slot_id, old_version, new_inv.slot_versions[old_slot_id], - &self.smart_contract_id, ); resync = true; break; @@ -913,7 +912,6 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, &naddr, - &self.smart_contract_id, data.error_code ); if data.error_code == NackErrorCodes::StaleView @@ -1075,7 +1073,6 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, &naddr, - &self.smart_contract_id, data.error_code ); if data.error_code == NackErrorCodes::StaleView From cc326c4b4f5df74a23c2883bf7f759f4d9a1084a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 30 Sep 2024 11:10:30 -0400 Subject: [PATCH 714/910] chore: expand test coverage to verify that caching behavior is consistent with no-caching behavior --- stackslib/src/net/inv/nakamoto.rs | 25 +- stackslib/src/net/tests/inv/nakamoto.rs | 539 +++++++++++++++++++++++- 2 files changed, 549 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 08cd795ad21..5a1fdc410da 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -113,6 +113,8 @@ pub struct InvGenerator { tip_ancestor_search_depth: u64, /// count cache misses for `processed_tenures` cache_misses: u128, + /// Disable caching (test only) + no_cache: bool, } impl InvGenerator { @@ -122,6 +124,18 @@ impl InvGenerator { sortitions: HashMap::new(), tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, cache_misses: 0, + no_cache: false, + } + } + + #[cfg(test)] + pub fn new_no_cache() -> Self { + Self { + processed_tenures: HashMap::new(), + sortitions: HashMap::new(), + tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, + cache_misses: 0, + no_cache: true, } } @@ -236,7 +250,6 @@ impl InvGenerator { self.processed_tenures .insert(tip_block_id.clone(), HashMap::new()); } - } else { } let Some(tenure_infos) = self.processed_tenures.get_mut(&tip_block_id) else { @@ -244,9 +257,9 @@ impl InvGenerator { }; // this tip has a known table - if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { + let ret = if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip - return Ok(loaded_tenure_info.clone()); + Ok(loaded_tenure_info.clone()) } else { // we have not loaded the tenure info for this tip, so go get it let loaded_info_opt = @@ -254,8 +267,12 @@ impl InvGenerator { tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); self.cache_misses = self.cache_misses.saturating_add(1); - return Ok(loaded_info_opt); + Ok(loaded_info_opt) + }; + if self.no_cache { + self.processed_tenures.clear(); } + ret } /// Get sortition info, loading it from our cache if needed diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 951ddefe72d..fac9623d3f5 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -176,6 +176,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 10 tenures let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -193,6 +194,20 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { rc as u64, ) .unwrap(); + + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + + assert_eq!(bitvec, bitvec_no_cache); + debug!( "At reward cycle {}: {:?}, mesasge = {:?}", rc, &bitvec, &inv @@ -246,6 +261,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 3 sortitions let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -262,6 +278,19 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { rc as u64, ) .unwrap(); + + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + assert_eq!(bitvec, bitvec_no_cache); + debug!( "At reward cycle {}: {:?}, mesasge = {:?}", rc, &bitvec, &inv @@ -307,6 +336,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 10 tenures let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -323,6 +353,18 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { rc as u64, ) .unwrap(); + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + assert_eq!(bitvec, bitvec_no_cache); + debug!("At reward cycle {}: {:?}", rc, &bitvec); if rc <= 6 { @@ -1127,6 +1169,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); // // ---------------------- basic operations ---------------------- @@ -1180,6 +1223,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1196,6 +1251,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1231,6 +1298,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1279,9 +1358,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); - // only one additional cache miss expected_bits.push(true); - expected_cache_misses += 1; + expected_cache_misses += 2; assert_eq!(bits, expected_bits); assert_eq!(invgen.cache_misses(), expected_cache_misses); @@ -1331,6 +1409,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1341,7 +1431,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, true, true, true, false]); - assert_eq!(invgen.cache_misses(), 17); + assert_eq!(invgen.cache_misses(), 20); // load inv off of the non-canonical tip. // it should show the last 3 canonical tenures as missing, and this forked block as present @@ -1355,6 +1445,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &fork_naka_block.block_id(), @@ -1367,7 +1469,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, false, false, false, true]); - assert_eq!(invgen.cache_misses(), 21); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 24); // add more to the fork peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); @@ -1405,6 +1508,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1415,11 +1530,13 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, true, true, true, false, false]); - assert_eq!(invgen.cache_misses(), 22); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 25); // load inv off of the non-canonical tip again. // it should show the last 3 last canonical tenures as missing, and this forked block as - // present. Only one additional cache miss should manifest. + // present. Two additional cache misses should manifest, since we invalidate the common + // parent's tenure data. let bits = invgen .make_tenure_bitvector( &sort_tip, @@ -1430,6 +1547,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &fork_naka_block.block_id(), @@ -1443,7 +1572,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // only one more cache miss assert_eq!(bits, [true, true, false, false, false, true, true]); - assert_eq!(invgen.cache_misses(), 23); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 27); // load inv off of the canonical tip again. // It should show two missed sortitions. @@ -1457,6 +1587,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1468,7 +1610,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // no new cache misses assert_eq!(bits, [true, true, true, true, true, false, false]); - assert_eq!(invgen.cache_misses(), 23); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 27); // // ---------------------- the inv generator will search only a maximum depth before giving up ---------------------- @@ -1507,7 +1650,20 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip_ch, + &first_naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); // load a descendant that is 6 blocks higher @@ -1521,12 +1677,25 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!( bits, [true, true, true, true, true, false, false, true, true, true] ); // all 10 tenures were loaded, because we had to search more than 5 blocks back + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 12); // new inv generator with a search depth of 10 @@ -1543,7 +1712,20 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip_ch, + &first_naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); // load a descendant that is 6 blocks higher @@ -1557,13 +1739,26 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!( bits, [true, true, true, true, true, false, false, true, true, true] ); - // reused old canonical tip information - assert_eq!(invgen.cache_misses(), 9); + // reused old canonical tip information, but still had an additional cache miss from the parent + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 10); } #[test] @@ -1576,7 +1771,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { let bitvecs = vec![ // full rc vec![true, true, true, true, true, true, true, true, true, true], - // sparce rc + // sparse rc vec![ true, false, false, false, false, false, false, true, true, true, ], @@ -1605,6 +1800,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { peer.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); let sortdb = peer.sortdb_ref().reopen().unwrap(); let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); @@ -1652,10 +1848,22 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); let bits = invgen @@ -1668,13 +1876,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 13); let bits = invgen @@ -1687,13 +1907,26 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 2, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, false, false, false, false, false, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 17); let bits = invgen @@ -1706,9 +1939,21 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 3, ) .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, true, false, true, false, true, false, true] @@ -1725,13 +1970,27 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 4, ) .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, false, false, false, false, false, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 27); let bits = invgen @@ -1744,13 +2003,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 5, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [false, false, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); // load them all again. cache misses should remain the same. @@ -1764,10 +2035,22 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1780,13 +2063,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1799,13 +2094,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 2, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, false, false, false, false, false, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1818,13 +2125,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 3, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, true, false, true, false, true, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1837,13 +2156,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 4, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, false, false, false, false, false, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1856,12 +2187,198 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 5, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [false, false, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); } + +#[test] +fn test_nakamoto_make_tenure_inv_from_old_tips() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + // item 0 is sortition 42 + vec![true, true, true, true, true, true, true, true, true, true], + // sparse rc + // item 0 is sortition 52 + vec![ + true, false, false, false, false, false, false, true, true, true, + ], + // alternating rc + // item 0 is sortition 62 + vec![ + false, true, false, true, false, true, false, true, true, true, + ], + // sparse rc + // item 0 is sortition 72 + vec![ + false, false, false, false, false, false, true, true, true, true, + ], + // full rc + // item 0 is sortition 82 + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + // compute the rc-aligned bitvecs. + // bitvecs[i][0] starts at reward cycle index 2. + // aligned_bitvecs[i][0] starts at reward cycle index 0. + let mut aligned_bitvecs = vec![vec![false, false]]; + let mut i = 2; + loop { + let bitvec_idx = (i - 2) / 10; + let bitvec_bit = (i - 2) % 10; + if bitvec_idx >= bitvecs.len() { + if let Some(ref mut last_bitvec) = aligned_bitvecs.last_mut() { + // last aligned bitvec has all `false`s + while last_bitvec.len() < 10 { + last_bitvec.push(false); + } + } + break; + } + + let aligned_bitvec_idx = i / 10; + let aligned_bitvec_bit = i % 10; + if aligned_bitvec_bit == 0 { + aligned_bitvecs.push(vec![]); + } + + let bit = bitvecs[bitvec_idx][bitvec_bit]; + aligned_bitvecs[aligned_bitvec_idx].push(bit); + + i += 1; + } + + assert_eq!( + aligned_bitvecs[0], + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!( + aligned_bitvecs[1], + vec![true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!( + aligned_bitvecs[2], + vec![true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!( + aligned_bitvecs[3], + vec![true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!( + aligned_bitvecs[4], + vec![true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!( + aligned_bitvecs[5], + vec![true, true, false, false, false, false, false, false, false, false] + ); + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); + + // + // ---------------------- querying each tip will report the successive inv bits ---------------------- + // + let naka_tip = peer.network.stacks_tip.block_id(); + let mut ancestor_tips = vec![]; + let mut cursor = naka_tip.clone(); + loop { + ancestor_tips.push(cursor.clone()); + let Some(parent) = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor).unwrap() + else { + break; + }; + cursor = parent; + } + // last item is an epoch2 block, which we don't care about + ancestor_tips.pop(); + ancestor_tips.reverse(); + + for tip in ancestor_tips.into_iter() { + debug!("load tip {}", &tip); + let hdr = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip) + .unwrap() + .unwrap(); + let tip_ch = hdr.consensus_hash; + let tip_bh = hdr.anchored_header.block_hash(); + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip_ch) + .unwrap() + .unwrap(); + let rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, sn.block_height) + .unwrap(); + let rc_start_height = sortdb + .pox_constants + .reward_cycle_to_block_height(sortdb.first_block_height, rc) + - 1; + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &tip_ch, &tip_bh, rc) + .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &tip_ch, &tip_bh, rc) + .unwrap(); + + debug!("tip {}: consensus_hash={}, burn_height={}, reward_cycle={}, bits={:?}, bits_no_cache={:?}", &tip, &tip_ch, sn.block_height, rc, &bits, &bits_no_cache); + assert_eq!(bits, bits_no_cache); + + // nakamoto starts at burn height 42, and has a reward cycle length of 10, so compute the range of bitvecs we need + assert_eq!(sortdb.pox_constants.reward_cycle_length, 10); + assert!(rc >= 4); + + let mut expected_bits = aligned_bitvecs[(rc - 4) as usize].clone(); + let from_bits = expected_bits.clone(); + + for i in (sn.block_height + 1 - rc_start_height)..10 { + expected_bits[i as usize] = false; + } + + let bit_len = bits.len(); + debug!( + "tip {}: from_bits={:?}, expected_bits={:?}, inv_bits={:?}, rc={}, block_height={}", + &tip, &from_bits, &expected_bits, &bits, rc, sn.block_height + ); + + assert_eq!(bits, expected_bits[0..bit_len]); + } +} From d805368126160a92a4f4fd0ca8046c963fd093d7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 10:27:40 -0500 Subject: [PATCH 715/910] ci: "at least" means >=, not > in tests --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17b829557fc..3312b45b3ac 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8486,8 +8486,10 @@ fn mock_mining() { let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( - blocks_mock_mined > tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks" + blocks_mock_mined >= tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", + blocks_mock_mined, + tenure_count, ); // wait for follower to reach the chain tip From 8b36b8852010e450b0e173bb8891a36f5c6ab6c1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 13:00:41 -0500 Subject: [PATCH 716/910] fix: need interim blocks in the nakamoto integration tests --- .github/workflows/bitcoin-tests.yml | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 0eb676781ec..cd867340eca 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -81,7 +81,7 @@ jobs: - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 + - tests::nakamoto_integrations::flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2bc0082fed2..a7e7e8cbfed 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1718,7 +1718,7 @@ fn simple_neon_integration() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn simple_neon_integration_with_flash_blocks_on_epoch_3() { +fn flash_blocks_on_epoch_3() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1726,7 +1726,7 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); From 9c89e46d39b3bf657888e713b81c5a91da7e3e5f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 11:49:33 -0700 Subject: [PATCH 717/910] Do not wait for an exact number of block rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index b7f39feba67..603525a323d 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -744,7 +744,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); - Ok(block_rejections.len() == expected_signers.len()) + Ok(block_rejections.len() >= expected_signers.len()) }) } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4d55c4d0ce2..50e942a81ad 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4080,6 +4080,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test From 8a8c5b09408056b533649a558c9b52fb8d44dfce Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:59:30 -0700 Subject: [PATCH 718/910] Addressing some PR comments/other improvements to language --- CONTRIBUTING.md | 4 +- docs/branching.md | 4 +- docs/ci-workflow.md | 66 ++----------------- docs/mining.md | 2 - docs/profiling.md | 4 +- docs/release-process.md | 7 +- .../conf/mainnet-follower-conf.toml | 1 - .../stacks-node/conf/mainnet-miner-conf.toml | 3 - .../conf/mainnet-mockminer-conf.toml | 1 - 9 files changed, 16 insertions(+), 76 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 53fcf8a1683..8d6c3aabba8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,8 +50,8 @@ For an example of this process, see PRs - Any major changes should be added to the [CHANGELOG](CHANGELOG.md). - Mention any required documentation changes in the description of your pull request. -- If adding an RPC endpoint, add an entry for the new endpoint to the - OpenAPI spec `./docs/rpc/openapi.yaml`. +- If adding or updating an RPC endpoint, ensure the change is documented in the + OpenAPI spec: [`./docs/rpc/openapi.yaml`](./docs/rpc/openapi.yaml). - If your code adds or modifies any major features (struct, trait, test, module, function, etc.), each should be documented according to our [coding guidelines](#Coding-Guidelines). diff --git a/docs/branching.md b/docs/branching.md index 5b9a96b12ab..04c1e6fd3d0 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -1,12 +1,12 @@ # Git Branching -The following is a modified version of the gitflow branching strategy described in +The following is a slightly modified version of the gitflow branching strategy described in ## Main Branches - **master** - `master` is the main branch where the source code of HEAD always reflects a production-ready state. - **develop** - `develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. -- **next** - `next` may contain consensus-breaking changes. +- **next** - `next` may contain consensus-breaking changes for a future release. - **release/X.Y.Z.A.n** is the release branch. When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md index 16d020985da..0b1ed2b170d 100644 --- a/docs/ci-workflow.md +++ b/docs/ci-workflow.md @@ -4,12 +4,13 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor - Verifying code is formatted correctly - Integration tests +- Unit tests - [Mutation tests](https://en.wikipedia.org/wiki/Mutation_testing) - Creating releases - Building binary archives and calculating checksums - Publishing Docker images -1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). +1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`, or `release/signer-X.Y.Z.A.n.x`). 2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. Tests can be retried quickly since the cache will persist until the cleanup job is run or the cache is evicted. 3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). @@ -20,7 +21,7 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor ## TL;DR - Pushing a new branch will not trigger a workflow -- An open/re-opened/synchronized PR will produce a docker image built from source on Debian with glibc with the following tags: +- A PR that is opened/re-opened/synchronized will produce an amd64 docker image built from source on Debian with glibc with the following tags: - `stacks-core:` - `stacks-core:` - An untagged build of any branch will produce a single image built from source on Debian with glibc: @@ -29,7 +30,7 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor - Github Release of the branch with: - Binary archives for several architectures - Checksum file containing hashes for each archive - - Tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` + - Git tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` - Docker Debian images for several architectures tagged with: - `stacks-core:latest` - `stacks-core:X.Y.Z.A.n` @@ -93,6 +94,7 @@ ex: - `Atlas Tests`: Tests related to Atlas - `Bitcoin Tests`: Tests relating to burnchain operations - `Epoch Tests`: Tests related to epoch changes +- `P2P Tests`: Tests P2P operations - `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or even times out intermittently), it should be added here. - `Stacks Core Tests`: - `full-genesis`: Tests related to full genesis @@ -100,7 +102,7 @@ ex: ### Checking the result of multiple tests at once -You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in a workflow job. +The [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action may be used in order to check that multiple tests are successful in a workflow job. If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. @@ -127,62 +129,6 @@ check-tests: summary_print: "true" ``` -## Triggering a workflow - -### Opening/Updating a PR - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags - - Creates the following images (where branch is named `feat/fix-something` and the PR is numbered `5446`): - - `stacks-core:feat-fix-something` - - `stacks-core:pr-5446` - ---- - -### Merging a branch to develop - -Once a PR is added to the merge queue, the target branch is merged into the source branch. -Then, the same workflows are triggered as in the [previous step](#openingupdating-a-pr). - ---- - -### Manually triggering CI workflow (any branch not named `release/X.Y.Z.A.n`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag - - Creates the following images: - - `stacks-core:` - ---- - -### Manually triggering CI workflow with tag on a release branch - -ex: running the [`CI`](../.github/workflows/ci.yml) on a branch named `release/X.Y.Z.A.n` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) -- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag -- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) - - Archive and checksum files will be uploaded to the versioned github release. -- [Docker Image (Binary)](../.github/workflows/image-build-binary.yml) - - Built from binaries on debian/alpine distributions and pushed with a verrsion and `latest` tags. - - Creates the following images: - - `stacks-core:X.Y.Z.A.n` - - `stacks-core:X.Y.Z.A.n-alpine` - - `stacks-core:latest` - - `stacks-core:latest-alpine` - ## Mutation Testing When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. diff --git a/docs/mining.md b/docs/mining.md index 8b40eb8cc87..a2a914c998f 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -9,8 +9,6 @@ you should make sure to add the following config fields to your [config file](.. miner = True # Bitcoin private key to spend seed = "YOUR PRIVATE KEY" -# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) -mine_microblocks = false # Run as a mock-miner, to test mining without spending BTC. Needs miner=True. #mock_mining = True diff --git a/docs/profiling.md b/docs/profiling.md index 26d1c119aeb..4b8343aae9e 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -17,7 +17,7 @@ Validating the config file using `stacks-node check-config`: ``` $ cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml -INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) +INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (:, release build, linux [x86_64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` @@ -28,7 +28,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoin.hiro.so"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("localhost"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("btcuser"), password: Some("btcpass"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/docs/release-process.md b/docs/release-process.md index 46b4bae621e..b96d3d2beb5 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -33,7 +33,7 @@ This repository uses a 5 part version number: ``` X.Y.Z.A.n -X major version - does not change in practice unless there’s another Stacks 2.0 type event +X major version - in practice, this does not change unless there’s another significant network update (e.g. a Stacks 3.0 type of event) Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) @@ -45,14 +45,14 @@ Optionally, an extra pre-release field may be appended to the version to specify ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, all releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - In other words, the database schema has changed, but an automatic migration was not implemented. - Determine whether this a feature release, as opposed to a hotfix or a patch. - - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. + - A new branch in the format `release/X.Y.Z.A.n(-rc[0-9])` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. @@ -68,6 +68,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index c3094633895..291f3335230 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -2,7 +2,6 @@ # working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "0.0.0.0:9153" [burnchain] diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 4d258b33f0f..1ecfbc35084 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -2,7 +2,6 @@ # working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "127.0.0.1:20443" p2p_bind = "127.0.0.1:20444" -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" @@ -14,8 +13,6 @@ mode = "mainnet" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = -peer_port = # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 # Amount (in sats) per byte - Used to calculate the transaction fees diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 8e966a8a0fc..9d583d218bf 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -4,7 +4,6 @@ rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "0.0.0.0:9153" [burnchain] From b461394c91b69c002d21924aa3b6ca320c87fd43 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 30 Sep 2024 16:18:39 -0400 Subject: [PATCH 719/910] chore: document cache maintenance --- stackslib/src/net/inv/nakamoto.rs | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 5a1fdc410da..d771848fec8 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -163,7 +163,10 @@ impl InvGenerator { /// the maximum expected number of blocks to be processed in-between handling `GetNakamotoInv` /// messages. /// - /// If found, then return the ancestor block ID represented in `self.processed_tenures`. + /// If found, then return the ancestor block ID represented in `self.processed_tenures`, as + /// well as the list of any intermediate tenures between (and including) that of `tip_block_id` + /// and that of (and including) the highest-found ancestor. + /// /// If not, then return None. pub(crate) fn find_ancestor_processed_tenures( &self, @@ -232,6 +235,18 @@ impl InvGenerator { if let Some((ancestor_tip_id, intermediate_tenures)) = self.find_ancestor_processed_tenures(chainstate, &tip_block_id)? { + // The table removals here are for cache maintenance. + // + // Between successive calls to this function, the Stacks tip (identified by + // `tip_block_ch` and `tip_block_bh) can advance as more blocks are discovered. + // This means that tenures that had previously been treated as absent could now be + // present. By evicting cached data for all tenures between (and including) the + // highest ancestor of the current Stacks tip, and the current Stacks tip, we force + // this code to re-evaluate the presence or absence of each potentially-affected + // tenure. + // + // First, remove the highest ancestor's table, so we can re-assign it to the new + // tip. let mut ancestor_tenures = self .processed_tenures .remove(&ancestor_tip_id) @@ -239,11 +254,14 @@ impl InvGenerator { panic!("FATAL: did not have ancestor tip reported by search"); }); + // Clear out any intermediate cached results for tenure presence/absence, including + // both that of the highest ancestor and the current tip. for ch in intermediate_tenures.into_iter() { ancestor_tenures.remove(&ch); } ancestor_tenures.remove(tip_block_ch); + // Update the table so it is pointed to by the new tip. self.processed_tenures .insert(tip_block_id.clone(), ancestor_tenures); } else { @@ -256,12 +274,12 @@ impl InvGenerator { unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); }; - // this tip has a known table let ret = if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip Ok(loaded_tenure_info.clone()) } else { - // we have not loaded the tenure info for this tip, so go get it + // we have not loaded the tenure info for this tip, or it was cleared via cache + // maintenance. Either way, got get it from disk. let loaded_info_opt = InvTenureInfo::load(chainstate, &tip_block_id, &tenure_id_consensus_hash)?; From fdcfcdf16d52fbc8dbd75282352b4b755719f9fe Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 13:20:08 -0700 Subject: [PATCH 720/910] Add some logging to bitcoind test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 57222be981e..28ddad97cf7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1227,8 +1227,9 @@ fn bitcoind_forking_test() { let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; let pre_fork_tenures = 10; - for _i in 0..pre_fork_tenures { - let _mined_block = signer_test.mine_nakamoto_block(Duration::from_secs(30)); + for i in 0..pre_fork_tenures { + debug!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1255,7 +1256,11 @@ fn bitcoind_forking_test() { thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + debug!( + "Mining block {} of 3 to be considered a frequent miner", + i + 1 + ); let commits_count = signer_test .running_nodes .commits_submitted @@ -1278,7 +1283,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); - for _i in 0..5 { + for i in 0..5 { + debug!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1310,7 +1316,11 @@ fn bitcoind_forking_test() { thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + debug!( + "Mining block {} of 3 to be considered a frequent miner", + i + 1 + ); let commits_count = signer_test .running_nodes .commits_submitted @@ -1333,7 +1343,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); - for _i in 0..5 { + for i in 0..5 { + debug!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From 51d600031ec4980286762b3873e87fc5f12270e7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 14:04:52 -0700 Subject: [PATCH 721/910] Fix microblocks disabled test to allow at least one rather than strictly one microblocks Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 42369b800a5..2b2a9a640f7 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -211,11 +211,12 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() + let microblocks_assembled = test_observer::get_microblocks().len(); + info!("Microblocks assembled: {microblocks_assembled}",); + assert!( + microblocks_assembled > 0, + "There should be at least 1 microblock assembled" ); - assert_eq!(test_observer::get_microblocks().len(), 1); let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -244,8 +245,8 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - // but we should have assembled and announced at least 1 to the observer - assert!(test_observer::get_microblocks().len() >= 2); + // but we should have assembled and announced at least 1 more block to the observer + assert!(test_observer::get_microblocks().len() > microblocks_assembled); info!( "Microblocks assembled: {}", test_observer::get_microblocks().len() From 4c311bb2c9e1b5dde1e6f60534d2180a82ff14ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 14:09:18 -0700 Subject: [PATCH 722/910] Convert logs to info in test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 28ddad97cf7..f6edcc572a4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1228,7 +1228,7 @@ fn bitcoind_forking_test() { let pre_fork_tenures = 10; for i in 0..pre_fork_tenures { - debug!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); + info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1257,7 +1257,7 @@ fn bitcoind_forking_test() { // we need to mine some blocks to get back to being considered a frequent miner for i in 0..3 { - debug!( + info!( "Mining block {} of 3 to be considered a frequent miner", i + 1 ); @@ -1284,7 +1284,7 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); for i in 0..5 { - debug!("Mining post-fork tenure {} of 5", i + 1); + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1317,7 +1317,7 @@ fn bitcoind_forking_test() { // we need to mine some blocks to get back to being considered a frequent miner for i in 0..3 { - debug!( + info!( "Mining block {} of 3 to be considered a frequent miner", i + 1 ); @@ -1344,7 +1344,7 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); for i in 0..5 { - debug!("Mining post-fork tenure {} of 5", i + 1); + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From b1f813b31cd041dd02a21aaf9568a09090098371 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 30 Sep 2024 20:54:54 -0700 Subject: [PATCH 723/910] Add language around stacks 3.0 type event --- stacks-signer/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 9d3f2cb5e13..71d47a3e26b 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -33,7 +33,7 @@ When there are changes in-between `stacks-core` releases, the `stacks-signer` bi ``` X.Y.Z.A.n.x -X major version - does not change in practice unless there’s another Stacks 2.0 type event +X major version - in practice, this does not change unless there’s another significant network update (e.g. a Stacks 3.0 type of event) Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) From c5880c651cb793151a5ddd05f21e4d3ee13757d6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 09:10:10 -0500 Subject: [PATCH 724/910] chore: remove infinite loop in signer during tests --- stacks-signer/src/v0/signer.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d6eaa37af8b..3fbeb9b809e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -185,20 +186,13 @@ impl SignerTrait for Signer { ); } SignerMessage::BlockPushed(b) => { - let block_push_result = stacks_client.post_block(b); - if let Err(ref e) = &block_push_result { - warn!( - "{self}: Failed to post block {} (id {}): {e:?}", - &b.header.signer_signature_hash(), - &b.block_id() - ); - }; // This will infinitely loop until the block is acknowledged by the node info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), ); + let start_time = Instant::now(); loop { match stacks_client.post_block(b) { Ok(block_push_result) => { @@ -206,6 +200,11 @@ impl SignerTrait for Signer { break; } Err(e) => { + if cfg!(test) + && start_time.elapsed() > Duration::from_secs(30) + { + panic!("{self}: Timed out in test while pushing block to stacks node: {e}"); + } warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); } }; From c60f91c20f358c3593290b8719b07f22b1e62e1c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 10:51:16 -0500 Subject: [PATCH 725/910] ci: wait for block commits pointed at the correct burn block --- .../burn/operations/leader_block_commit.rs | 6 +- stackslib/src/chainstate/stacks/mod.rs | 9 ++ .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 13 ++- testnet/stacks-node/src/tests/signer/v0.rs | 109 +++++++++++++++--- 5 files changed, 115 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cea03d44353..910315f0820 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -48,14 +48,14 @@ use crate::net::Error as net_error; // return type from parse_data below #[derive(Debug)] -struct ParsedData { +pub struct ParsedData { block_header_hash: BlockHeaderHash, new_seed: VRFSeed, parent_block_ptr: u32, parent_vtxindex: u16, key_block_ptr: u32, key_vtxindex: u16, - burn_parent_modulus: u8, + pub burn_parent_modulus: u8, memo: u8, } @@ -201,7 +201,7 @@ impl LeaderBlockCommitOp { StacksBlockId(self.block_header_hash.0.clone()) } - fn parse_data(data: &Vec) -> Option { + pub fn parse_data(data: &[u8]) -> Option { /* Wire format: 0 2 3 35 67 71 73 77 79 80 diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 127751abbbd..2ce250d991a 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -36,6 +36,7 @@ use stacks_common::address::AddressHashMode; use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; +use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksWorkScore, TrieHash, TRIEHASH_ENCODED_SIZE, @@ -385,6 +386,14 @@ impl Txid { pub fn from_sighash_bytes(txdata: &[u8]) -> Txid { Txid::from_stacks_tx(txdata) } + + /// Create a Txid from the tx hash bytes used in bitcoin. + /// This just reverses the inner bytes of the input. + pub fn from_bitcoin_tx_hash(tx_hash: &Sha256dHash) -> Txid { + let mut txid_bytes = tx_hash.0.clone(); + txid_bytes.reverse(); + Self(txid_bytes) + } } /// How a transaction may be appended to the Stacks blockchain diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 92390095a21..82282926d3e 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -106,7 +106,7 @@ pub struct BitcoinRegtestController { #[derive(Clone)] pub struct OngoingBlockCommit { - payload: LeaderBlockCommitOp, + pub payload: LeaderBlockCommitOp, utxos: UTXOSet, fees: LeaderBlockCommitFees, txids: Vec, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 717e33c9c9d..1f5d4491ccb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -592,11 +592,22 @@ pub fn next_block_and( ) -> Result<(), String> where F: FnMut() -> Result, +{ + next_block_and_controller(btc_controller, timeout_secs, |_| check()) +} + +pub fn next_block_and_controller( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + mut check: F, +) -> Result<(), String> +where + F: FnMut(&mut BitcoinRegtestController) -> Result, { eprintln!("Issuing bitcoin block"); btc_controller.build_next_block(1); let start = Instant::now(); - while !check()? { + while !check(btc_controller)? { if start.elapsed() > Duration::from_secs(timeout_secs) { error!("Timed out waiting for block to process, trying to continue test"); return Err("Timed out".into()); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3f0813b024f..0bcabcc6585 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -27,7 +27,9 @@ use libsigner::v0::messages::{ }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; +use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::LeaderBlockCommitOp; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; @@ -68,15 +70,16 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, + setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; -use crate::{nakamoto_node, BurnchainController, Config, Keychain}; +use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { /// Run the test until the first epoch 2.5 reward cycle. @@ -1221,6 +1224,33 @@ fn bitcoind_forking_test() { let miner_address = Keychain::default(conf.node.seed.clone()) .origin_address(conf.is_mainnet()) .unwrap(); + let miner_pk = signer_test + .running_nodes + .btc_regtest_controller + .get_mining_pubkey() + .as_deref() + .map(Secp256k1PublicKey::from_hex) + .unwrap() + .unwrap(); + + let get_unconfirmed_commit_data = |btc_controller: &mut BitcoinRegtestController| { + let unconfirmed_utxo = btc_controller + .get_all_utxos(&miner_pk) + .into_iter() + .find(|utxo| utxo.confirmations == 0)?; + let unconfirmed_txid = Txid::from_bitcoin_tx_hash(&unconfirmed_utxo.txid); + let unconfirmed_tx = btc_controller.get_raw_transaction(&unconfirmed_txid); + let unconfirmed_tx_opreturn_bytes = unconfirmed_tx.output[0].script_pubkey.as_bytes(); + info!( + "Unconfirmed tx bytes: {}", + stacks::util::hash::to_hex(unconfirmed_tx_opreturn_bytes) + ); + let data = LeaderBlockCommitOp::parse_data( + &unconfirmed_tx_opreturn_bytes[unconfirmed_tx_opreturn_bytes.len() - 77..], + ) + .unwrap(); + Some(data) + }; signer_test.boot_to_epoch_3(); info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -1252,23 +1282,43 @@ fn bitcoind_forking_test() { .build_next_block(1); info!("Wait for block off of shallow fork"); - thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + let current_burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + info!( + "Mining block #{i} to be considered a frequent miner"; + "current_burn_height" => current_burn_height, + ); let commits_count = signer_test .running_nodes .commits_submitted .load(Ordering::SeqCst); - next_block_and( + next_block_and_controller( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - Ok(signer_test + |btc_controller| { + let commits_submitted = signer_test .running_nodes .commits_submitted - .load(Ordering::SeqCst) - > commits_count) + .load(Ordering::SeqCst); + if commits_submitted <= commits_count { + // wait until a commit was submitted + return Ok(false) + } + let Some(payload) = get_unconfirmed_commit_data(btc_controller) else { + warn!("Commit submitted, but bitcoin doesn't see it in the unconfirmed UTXO set, will try to wait."); + return Ok(false) + }; + let burn_parent_modulus = payload.burn_parent_modulus; + let current_modulus = u8::try_from((current_burn_height + 1) % 5).unwrap(); + info!( + "Ongoing Commit Operation check"; + "burn_parent_modulus" => burn_parent_modulus, + "current_modulus" => current_modulus, + "payload" => ?payload, + ); + Ok(burn_parent_modulus == current_modulus) }, ) .unwrap(); @@ -1306,24 +1356,44 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); - info!("Wait for block off of shallow fork"); - thread::sleep(Duration::from_secs(15)); + info!("Wait for block off of deep fork"); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + let current_burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + info!( + "Mining block #{i} to be considered a frequent miner"; + "current_burn_height" => current_burn_height, + ); let commits_count = signer_test .running_nodes .commits_submitted .load(Ordering::SeqCst); - next_block_and( + next_block_and_controller( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - Ok(signer_test + |btc_controller| { + let commits_submitted = signer_test .running_nodes .commits_submitted - .load(Ordering::SeqCst) - > commits_count) + .load(Ordering::SeqCst); + if commits_submitted <= commits_count { + // wait until a commit was submitted + return Ok(false) + } + let Some(payload) = get_unconfirmed_commit_data(btc_controller) else { + warn!("Commit submitted, but bitcoin doesn't see it in the unconfirmed UTXO set, will try to wait."); + return Ok(false) + }; + let burn_parent_modulus = payload.burn_parent_modulus; + let current_modulus = u8::try_from((current_burn_height + 1) % 5).unwrap(); + info!( + "Ongoing Commit Operation check"; + "burn_parent_modulus" => burn_parent_modulus, + "current_modulus" => current_modulus, + "payload" => ?payload, + ); + Ok(burn_parent_modulus == current_modulus) }, ) .unwrap(); @@ -1333,7 +1403,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); - for _i in 0..5 { + for i in 0..5 { + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From 836a97a3c176846a6a248363e6d614724ae8d0e4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 09:28:55 -0700 Subject: [PATCH 726/910] CRC: remove dead code Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 2 -- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 0b43bef7492..89a3eda2d5b 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -78,8 +78,6 @@ pub struct StacksClient { #[derive(Deserialize)] struct GetStackersErrorResp { - #[allow(dead_code)] - err_type: String, err_msg: String, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f6edcc572a4..6484882dc2f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1312,7 +1312,7 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); - info!("Wait for block off of shallow fork"); + info!("Wait for block off of deeper fork"); thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner From 61eab903dfd8718998e54c4dc6a129004a80e808 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 11:52:56 -0700 Subject: [PATCH 727/910] Change vec to hashset in wait_for_block_rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 97e79415809..5961298f2ef 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -617,7 +617,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest None, } }) - .collect::>(); - Ok(block_rejections.len() >= expected_signers.len()) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) }) } } From 4752a906b9d2fe0af36a448fb8cdecd6dcb78b5d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 12:08:53 -0700 Subject: [PATCH 728/910] Do not attempt to process a block validation response for an already globally processed block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d6eaa37af8b..3b6a2a91808 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -530,7 +530,16 @@ impl Signer { .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } else { + block_info + } + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6484882dc2f..658d480ac6d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4001,7 +4001,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 2; + let nmb_txs = 3; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( @@ -4057,7 +4057,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers / 2 + num_signers % 2) + .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -4066,6 +4070,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -4090,6 +4095,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(Vec::new()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} to mine block N+1'"); + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test From 4a8e81983861cf1ffd84370a96f3d19d120f9a09 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 14:21:30 -0500 Subject: [PATCH 729/910] ci: test my actions feature branch --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 0eb676781ec..98dd19d3aa3 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -154,7 +154,7 @@ jobs: - name: Run Tests id: run_tests timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main + uses: stacks-network/actions/stacks-core/run-tests@feat/no-capture-2 with: test-name: ${{ matrix.test-name }} threads: 1 From 9eb4e05c79192d9f6f11495674fd0421a8754be5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 1 Oct 2024 17:05:49 -0400 Subject: [PATCH 730/910] test: remove `signer_vote_if_needed` This is not used in signer v0. Fixes `tests::nakamoto_integrations::continue_tenure_extend` --- .../src/tests/nakamoto_integrations.rs | 154 ------------------ 1 file changed, 154 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b884edc66dc..fcfdc012cb6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1155,69 +1155,6 @@ pub fn is_key_set_for_cycle( Ok(key.is_some()) } -fn signer_vote_if_needed( - btc_regtest_controller: &BitcoinRegtestController, - naka_conf: &Config, - signer_sks: &[StacksPrivateKey], // TODO: Is there some way to get this from the TestSigners? - signers: &TestSigners, -) { - // When we reach the next prepare phase, submit new voting transactions - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - let prepare_phase_start = btc_regtest_controller - .get_burnchain() - .pox_constants - .prepare_phase_start( - btc_regtest_controller.get_burnchain().first_block_height, - reward_cycle, - ); - - if block_height >= prepare_phase_start { - // If the key is already set, do nothing. - if is_key_set_for_cycle( - reward_cycle + 1, - naka_conf.is_mainnet(), - &naka_conf.node.rpc_bind, - ) - .unwrap_or(false) - { - return; - } - - // If we are self-signing, then we need to vote on the aggregate public key - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - - // Get the aggregate key - let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) - .expect("Failed to serialize aggregate public key"); - - for (i, signer_sk) in signer_sks.iter().enumerate() { - let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; - - // Vote on the aggregate public key - let voting_tx = tests::make_contract_call( - &signer_sk, - signer_nonce, - 300, - &StacksAddress::burn_address(false), - SIGNERS_VOTING_NAME, - "vote-for-aggregate-public-key", - &[ - clarity::vm::Value::UInt(i as u128), - aggregate_public_key.clone(), - clarity::vm::Value::UInt(0), - clarity::vm::Value::UInt(reward_cycle as u128 + 1), - ], - ); - submit_tx(&http_origin, &voting_tx); - } - } -} - pub fn setup_epoch_3_reward_set( naka_conf: &Config, blocks_processed: &Arc, @@ -1553,13 +1490,6 @@ fn simple_neon_integration() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // Submit a TX @@ -1595,13 +1525,6 @@ fn simple_neon_integration() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1805,13 +1728,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // Submit a TX @@ -1847,13 +1763,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2580,13 +2489,6 @@ fn correct_burn_outs() { &naka_conf, ); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - run_until_burnchain_height( &mut btc_regtest_controller, &blocks_processed, @@ -2646,13 +2548,6 @@ fn correct_burn_outs() { tip_sn.block_height > prior_tip, "The new burnchain tip must have been processed" ); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } coord_channel @@ -4752,13 +4647,6 @@ fn forked_tenure_is_ignored() { }) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - info!("Commit op is submitted; unpause Tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to @@ -6199,13 +6087,6 @@ fn signer_chainstate() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - let timer = Instant::now(); while proposals_submitted.load(Ordering::SeqCst) <= before { thread::sleep(Duration::from_millis(5)); @@ -6682,13 +6563,6 @@ fn continue_tenure_extend() { ) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6708,13 +6582,6 @@ fn continue_tenure_extend() { next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6756,13 +6623,6 @@ fn continue_tenure_extend() { next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6779,13 +6639,6 @@ fn continue_tenure_extend() { next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6811,13 +6664,6 @@ fn continue_tenure_extend() { }) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() From 70b082a1a9c7b0b75feaee97c4d6e7c9ca146eac Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 16:12:05 -0500 Subject: [PATCH 731/910] Revert "ci: test my actions feature branch" This reverts commit 4a8e81983861cf1ffd84370a96f3d19d120f9a09. --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 98dd19d3aa3..0eb676781ec 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -154,7 +154,7 @@ jobs: - name: Run Tests id: run_tests timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@feat/no-capture-2 + uses: stacks-network/actions/stacks-core/run-tests@main with: test-name: ${{ matrix.test-name }} threads: 1 From 41048b160fe5c7e922ace69c718af29febfaf5d1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 19:57:23 -0500 Subject: [PATCH 732/910] loop forever in both post_block uses --- stacks-signer/src/client/stacks_client.rs | 25 +++++++++++++++++++ stacks-signer/src/v0/signer.rs | 29 ++--------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f415896e86a..8cddee08dc7 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{HashMap, VecDeque}; +use std::fmt::Display; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; @@ -564,6 +566,29 @@ impl StacksClient { Ok(account_entry) } + /// Post a block to the stacks-node, retry forever on errors. + /// + /// In tests, this panics if the retry takes longer than 30 seconds. + pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { + let start_time = Instant::now(); + loop { + match self.post_block(block) { + Ok(block_push_result) => { + debug!("{log_fmt}: Block pushed to stacks node: {block_push_result:?}"); + return block_push_result; + } + Err(e) => { + if cfg!(test) && start_time.elapsed() > Duration::from_secs(30) { + panic!( + "{log_fmt}: Timed out in test while pushing block to stacks node: {e}" + ); + } + warn!("{log_fmt}: Failed to push block to stacks node: {e}. Retrying..."); + } + }; + } + } + /// Try to post a completed nakamoto block to our connected stacks-node /// Returns `true` if the block was accepted or `false` if the block /// was rejected. diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 3fbeb9b809e..29c39797cf8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,7 +15,6 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; -use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -192,23 +191,7 @@ impl SignerTrait for Signer { "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), ); - let start_time = Instant::now(); - loop { - match stacks_client.post_block(b) { - Ok(block_push_result) => { - debug!("{self}: Block pushed to stacks node: {block_push_result:?}"); - break; - } - Err(e) => { - if cfg!(test) - && start_time.elapsed() > Duration::from_secs(30) - { - panic!("{self}: Timed out in test while pushing block to stacks node: {e}"); - } - warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); - } - }; - } + stacks_client.post_block_until_ok(self, &b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -907,15 +890,7 @@ impl Signer { "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - if let Err(e) = stacks_client.post_block(&block) { - warn!( - "{self}: Failed to post block {block_hash}: {e:?}"; - "stacks_block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - "burnchain_consensus_hash" => %block.header.consensus_hash - ); - return; - } + stacks_client.post_block_until_ok(self, &block); if let Err(e) = self.signer_db.set_block_broadcasted( self.reward_cycle, From d611aa16ee11be78ef1bf41acd8b2222c5828683 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:52:05 -0400 Subject: [PATCH 733/910] feat: load needful data for downloading a staging block, even if it isn't processed --- .../src/chainstate/nakamoto/staging_blocks.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 91aad5a3253..382c7088503 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -271,7 +271,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { .optional()?) } - /// Get the rowid of a Nakamoto block + /// Get the rowid of a staging Nakamoto block pub fn get_nakamoto_block_rowid( &self, index_block_hash: &StacksBlockId, @@ -282,6 +282,26 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res) } + /// Get the tenure and parent block ID of a staging block. + /// Used for downloads + pub fn get_tenure_and_parent_block_id( + &self, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT consensus_hash,parent_block_id FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + let args = params![index_block_hash]; + + let mut stmt = self.deref().prepare(sql)?; + Ok(stmt + .query_row(args, |row| { + let ch: ConsensusHash = row.get(0)?; + let parent_id: StacksBlockId = row.get(1)?; + + Ok((ch, parent_id)) + }) + .optional()?) + } + /// Get a Nakamoto block by index block hash, as well as its size. /// Verifies its integrity. /// Returns Ok(Some(block, size)) if the block was present From bfb8667f1e4f186bb3506138a2a50d7a52dce2c3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:52:28 -0400 Subject: [PATCH 734/910] fix: /v3/blocks/:block_id should load staging unprocessed blocks --- stackslib/src/net/api/getblock_v3.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/api/getblock_v3.rs b/stackslib/src/net/api/getblock_v3.rs index 0279d9dc0cb..56e8063dda5 100644 --- a/stackslib/src/net/api/getblock_v3.rs +++ b/stackslib/src/net/api/getblock_v3.rs @@ -181,20 +181,13 @@ impl RPCRequestHandler for RPCNakamotoBlockRequestHandler { let stream_res = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { - let Some(header) = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &block_id)? + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? else { return Err(ChainError::NoSuchBlockError); }; - let Some(nakamoto_header) = header.anchored_header.as_stacks_nakamoto() else { - return Err(ChainError::NoSuchBlockError); - }; - NakamotoBlockStream::new( - chainstate, - block_id.clone(), - nakamoto_header.consensus_hash.clone(), - nakamoto_header.parent_block_id.clone(), - ) + NakamotoBlockStream::new(chainstate, block_id.clone(), tenure_id, parent_block_id) }); // start loading up the block From 5b12c2a23b3d8f8f3beabec2bd0fac646b9d71a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:53:07 -0400 Subject: [PATCH 735/910] fix: if the remote peer returns blocks from an unexpected tenure, then terminate the downloader (so we can try again later) --- .../src/net/download/nakamoto/tenure_downloader.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 92e032fa383..66504e01e66 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -376,8 +376,20 @@ impl NakamotoTenureDownloader { let mut expected_block_id = block_cursor; let mut count = 0; for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { + // must be from this tenure + // This may not always be the case, since a remote peer could have processed a + // different Stacks micro-fork. The consequence of erroring here (or below) is that we + // disconnect from the peer that served this to us. + if block.header.consensus_hash != self.tenure_id_consensus_hash { warn!("Unexpected Nakamoto block -- not part of tenure"; + "block.header.consensus_hash" => %block.header.consensus_hash, + "self.tenure_id_consensus_hash" => %self.tenure_id_consensus_hash, + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- does not match cursor"; "expected_block_id" => %expected_block_id, "block_id" => %block.header.block_id(), "state" => %self.state); From 90b98f05f56caa5693dfb388eb8a092758e3b54c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:53:27 -0400 Subject: [PATCH 736/910] chore: fix failing (broken) unit test --- stackslib/src/net/tests/download/nakamoto.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a6307b324b0..60f7aeb7fc7 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -230,7 +230,7 @@ fn test_nakamoto_tenure_downloader() { }; let mut td = NakamotoTenureDownloader::new( - ch, + tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.block_id(), next_tenure_start_block.header.block_id(), naddr.clone(), @@ -293,6 +293,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_blocks(vec![next_tenure_start_block.clone()]) .is_err()); + debug!("Try accept {:?}", &block); let res = td.try_accept_tenure_blocks(vec![block.clone()]); assert!(res.is_ok()); assert!(res.unwrap().is_none()); From db105e039449a130e52be1c2a599aa6b8e039c2e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 07:54:56 -0700 Subject: [PATCH 737/910] Increase pox_sync_sample_secs to 5 to be on the safe side when waiting for anchor blocks to arrive Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index fcfdc012cb6..0be76c53621 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2053,7 +2053,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 1; + naka_conf.node.pox_sync_sample_secs = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c0f4b2f9cf1..c76d9881051 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1468,7 +1468,7 @@ fn multiple_miners() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3430,7 +3430,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3693,7 +3693,7 @@ fn partial_tenure_fork() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 90c2596fa98a0aec2d2e761216fafa896434919e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 2 Oct 2024 10:34:32 -0500 Subject: [PATCH 738/910] test: add assertions for empty block heuristics --- .../src/tests/nakamoto_integrations.rs | 35 +++++++++++++++++++ .../src/tests/neon_integrations.rs | 27 ++++++++++++++ testnet/stacks-node/src/tests/signer/mod.rs | 4 ++- 3 files changed, 65 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5893d812314..e5a6c87af02 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -230,6 +230,32 @@ impl TestSigningChannel { } } +/// Assert that the block events captured by the test observer +/// all match the miner heuristic of *exclusively* including the +/// tenure change transaction in tenure changing blocks. +pub fn check_nakamoto_empty_block_heuristics() { + let blocks = test_observer::get_blocks(); + for block in blocks.iter() { + // if its not a nakamoto block, don't check anything + if block.get("miner_signature").is_none() { + continue; + } + let txs = test_observer::parse_transactions(block); + let has_tenure_change = txs + .iter() + .any(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))); + if has_tenure_change { + let only_coinbase_and_tenure_change = txs.iter().all(|tx| { + matches!( + tx.payload, + TransactionPayload::TenureChange(_) | TransactionPayload::Coinbase(..) + ) + }); + assert!(only_coinbase_and_tenure_change, "Nakamoto blocks with a tenure change in them should only have coinbase or tenure changes"); + } + } +} + pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); let path = format!("{http_origin}/v3/stacker_set/{cycle}"); @@ -1683,6 +1709,8 @@ fn simple_neon_integration() { assert!(res.contains(&expected_result)); } + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -1960,6 +1988,7 @@ fn flash_blocks_on_epoch_3() { // Verify blocks before and after the gap test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); @@ -2141,6 +2170,8 @@ fn mine_multiple_per_tenure_integration() { "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -2394,6 +2425,8 @@ fn multiple_miners() { "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -2761,6 +2794,8 @@ fn correct_burn_outs() { assert_eq!(signer_weight, 1, "The signer should have a weight of 1, indicating they stacked the minimum stacking amount"); } + check_nakamoto_empty_block_heuristics(); + run_loop_thread.join().unwrap(); } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4ec3b311d41..2c2055bad90 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -197,7 +197,10 @@ pub mod test_observer { use stacks::chainstate::stacks::boot::RewardSet; use stacks::chainstate::stacks::events::StackerDBChunksEvent; + use stacks::chainstate::stacks::StacksTransaction; + use stacks::codec::StacksMessageCodec; use stacks::net::api::postblock_proposal::BlockValidateResponse; + use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; @@ -572,6 +575,30 @@ pub mod test_observer { PROPOSAL_RESPONSES.lock().unwrap().clear(); } + /// Parse the StacksTransactions from a block (does not include burn ops) + /// panics on any failures to parse + pub fn parse_transactions(block: &serde_json::Value) -> Vec { + block + .get("transactions") + .unwrap() + .as_array() + .unwrap() + .iter() + .filter_map(|tx_json| { + if let Some(burnchain_op_val) = tx_json.get("burnchain_op") { + if !burnchain_op_val.is_null() { + return None; + } + } + let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); + let tx_bytes = hex_bytes(tx_hex).unwrap(); + let tx = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + Some(tx) + }) + .collect() + } + pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { // Get set of all burn block heights let burn_block_heights = get_blocks() diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 47cfa9ed8a2..671aae07789 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -59,7 +59,7 @@ use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerCon use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; -use super::nakamoto_integrations::wait_for; +use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; @@ -549,6 +549,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Wed, 2 Oct 2024 09:50:19 -0700 Subject: [PATCH 739/910] Add pox_sync_sample_secs as a small positive integer to ensure we don't continue without an anchor block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c76d9881051..667d91730ad 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1763,6 +1763,7 @@ fn miner_forking() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.node.pox_sync_sample_secs = 5; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { From 9b2f5154d94114b84e66c6a5b9ac5e314b05d058 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:10:07 -0700 Subject: [PATCH 740/910] Fix flakiness in problematic microblocks tests Signed-off-by: Jacinta Ferrant --- .../src/tests/neon_integrations.rs | 184 +++++++++--------- 1 file changed, 87 insertions(+), 97 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dd299c8616..ab892f25b75 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -89,7 +89,7 @@ use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; -use crate::tests::nakamoto_integrations::get_key_for_cycle; +use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; use crate::{neon, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -9909,15 +9909,15 @@ fn test_problematic_blocks_are_not_mined() { cur_files = cur_files_new; } - let tip_info = get_chain_info(&conf); + // all blocks were processed + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for blocks to be processed"); - // blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); // no blocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one block contained tx_exceeds let blocks = test_observer::get_blocks(); @@ -9968,14 +9968,12 @@ fn test_problematic_blocks_are_not_mined() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); @@ -10003,12 +10001,15 @@ fn test_problematic_blocks_are_not_mined() { cur_files = cur_files_new; } - let tip_info = get_chain_info(&conf); - // all blocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for blocks to be processed"); + // none were problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) @@ -10047,18 +10048,15 @@ fn test_problematic_blocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap in case we were just slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height { - break; - } eprintln!( "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -10068,14 +10066,15 @@ fn test_problematic_blocks_are_not_mined() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", @@ -10674,15 +10673,15 @@ fn test_problematic_microblocks_are_not_mined() { sleep_ms(5_000); } - let tip_info = get_chain_info(&conf); - // microblocks and blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); + // no microblocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one microblock contained tx_exceeds let microblocks = test_observer::get_microblocks(); @@ -10741,14 +10740,13 @@ fn test_problematic_microblocks_are_not_mined() { ); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for runloop to advance"); + let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); @@ -10779,13 +10777,14 @@ fn test_problematic_microblocks_are_not_mined() { } // sleep a little longer before checking tip info; this should help with test flakiness - sleep_ms(10_000); - let tip_info = get_chain_info(&conf); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // all microblocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); // none were problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) @@ -10824,18 +10823,15 @@ fn test_problematic_microblocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap as we may just be slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height { - break; - } eprintln!( "\nFollower is at burn block {} stacks block {}\n", follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -10845,14 +10841,15 @@ fn test_problematic_microblocks_are_not_mined() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", @@ -11056,15 +11053,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { sleep_ms(5_000); } - let tip_info = get_chain_info(&conf); + // microblocks and blocks were all processed + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // microblocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); // no microblocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one microblock contained tx_exceeds let microblocks = test_observer::get_microblocks(); @@ -11102,14 +11099,13 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for runloop to advance"); + let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); @@ -11185,11 +11181,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } // sleep a little longer before checking tip info; this should help with test flakiness - sleep_ms(10_000); - let tip_info = get_chain_info(&conf); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // all microblocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high @@ -11244,22 +11241,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap as we may just be slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height == bad_block_height - { - break; - } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {}\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -11269,15 +11259,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } - + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", pox_sync_comms.get_download_passes() From d617fb3164b1550922fe33890543036d3d542abe Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:21:43 -0700 Subject: [PATCH 741/910] Exit post_block_until_ok when testing feature is set Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8febf4b9485..c144d0401a7 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -574,7 +574,9 @@ impl StacksClient { return block_push_result; } Err(e) => { - if cfg!(test) && start_time.elapsed() > Duration::from_secs(30) { + if cfg!(any(test, feature = "testing")) + && start_time.elapsed() > Duration::from_secs(30) + { panic!( "{log_fmt}: Timed out in test while pushing block to stacks node: {e}" ); From 2efa5a8075f3a442a7f540ab7f2bfe544c99f6a8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:36:39 -0700 Subject: [PATCH 742/910] Remove potential flaky points in microblocks_disabled Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 62 +++++++++++++++-------- 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 2b2a9a640f7..4c418939a6b 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,6 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, @@ -169,17 +170,25 @@ fn microblocks_disabled() { submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 - loop { + wait_for(30, || { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_5 - 2 { - break; + if tip_info.burn_block_height >= epoch_2_1 - 2 { + return Ok(true); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } + Ok(false) + }) + .expect("Failed to wait until just before epoch 2.5"); + let old_tip_info = get_chain_info(&conf); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.burn_block_height >= old_tip_info.burn_block_height + 3) + }) + .expect("Failed to process block"); info!("Test passed processing 2.5"); let account = get_account(&http_origin, &spender_1_addr); @@ -195,12 +204,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } // second transaction should not have been processed! @@ -226,12 +238,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..2 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -265,12 +280,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..2 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; From 0b785f54315b8f8baafa90cf6e9cf391e0ad0538 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 2 Oct 2024 11:45:17 -0700 Subject: [PATCH 743/910] feat: refresh sortition view when proposed block has mismatch --- stacks-signer/src/chainstate.rs | 37 +++++++ stacks-signer/src/tests/chainstate.rs | 98 ++++++++++++++++--- stacks-signer/src/v0/signer.rs | 1 + .../src/tests/nakamoto_integrations.rs | 17 +++- 4 files changed, 134 insertions(+), 19 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4bbb9741a54..1ec1d2f8ef8 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -187,6 +187,7 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, reward_cycle: u64, + reset_view_if_wrong_consensus_hash: bool, ) -> Result { if self .cur_sortition @@ -236,6 +237,23 @@ impl SortitionsView { }) }) else { + if reset_view_if_wrong_consensus_hash { + info!( + "Miner block proposal has consensus hash that is neither the current or last sortition. Resetting view."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + self.reset_view(client)?; + return self.check_proposal( + client, + signer_db, + block, + block_pk, + reward_cycle, + false, + ); + } warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, @@ -624,4 +642,23 @@ impl SortitionsView { config, }) } + + /// Reset the view to the current sortition and last sortition + pub fn reset_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; + + let cur_sortition = SortitionState::try_from(current_sortition)?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + + self.cur_sortition = cur_sortition; + self.last_sortition = last_sortition; + Ok(()) + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index a390c27edcb..432325daf25 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -26,6 +26,7 @@ use blockstack_lib::chainstate::stacks::{ TransactionSpendingCondition, TransactionVersion, }; use blockstack_lib::net::api::get_tenures_fork_info::TenureForkingInfo; +use blockstack_lib::net::api::getsortition::SortitionInfo; use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; use clarity::util::vrf::VRFProof; use libsigner::BlockProposal; @@ -128,13 +129,13 @@ fn check_proposal_units() { setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -150,7 +151,8 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1 + 1, + false, ) .unwrap()); @@ -161,7 +163,8 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1 + 1, + false, ) .unwrap()); } @@ -257,7 +260,7 @@ fn reorg_timing_testing( config, } = MockServerClient::new(); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, false) }); header_clone.chain_length -= 1; let response = crate::client::tests::build_get_tenure_tip_response( @@ -294,16 +297,16 @@ fn check_proposal_invalid_status() { setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -314,7 +317,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -363,7 +366,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -373,7 +376,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -400,7 +403,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -410,7 +414,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -422,7 +427,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -432,7 +438,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); } @@ -513,3 +520,64 @@ fn check_sortition_timeout() { .is_timed_out(Duration::from_secs(1), &signer_db) .unwrap()); } + +/// Test that the sortition info is refreshed once +/// when `check_proposal` is called with a sortition view +/// that doesn't match the block proposal +#[test] +fn check_proposal_refresh() { + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = + setup_test_environment("check_proposal_refresh"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + assert!(view + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .unwrap()); + + let MockServerClient { + server, + client, + config: _, + } = MockServerClient::new(); + + let last_sortition = view.last_sortition.as_ref().unwrap(); + + let expected_result = vec![ + SortitionInfo { + burn_block_hash: last_sortition.burn_block_hash, + burn_block_height: 2, + sortition_id: SortitionId([2; 32]), + parent_sortition_id: SortitionId([1; 32]), + consensus_hash: block.header.consensus_hash, + was_sortition: true, + burn_header_timestamp: 2, + miner_pk_hash160: Some(view.cur_sortition.miner_pkh), + stacks_parent_ch: Some(view.cur_sortition.parent_tenure_id), + last_sortition_ch: Some(view.cur_sortition.parent_tenure_id), + committed_block_hash: None, + }, + SortitionInfo { + burn_block_hash: BurnchainHeaderHash([128; 32]), + burn_block_height: 1, + sortition_id: SortitionId([1; 32]), + parent_sortition_id: SortitionId([0; 32]), + consensus_hash: view.cur_sortition.parent_tenure_id, + was_sortition: true, + burn_header_timestamp: 1, + miner_pk_hash160: Some(view.cur_sortition.miner_pkh), + stacks_parent_ch: Some(view.cur_sortition.parent_tenure_id), + last_sortition_ch: Some(view.cur_sortition.parent_tenure_id), + committed_block_hash: None, + }, + ]; + + view.cur_sortition.consensus_hash = ConsensusHash([128; 20]); + let h = std::thread::spawn(move || { + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, true) + }); + crate::client::tests::write_response( + server, + format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), + ); + let result = h.join().unwrap(); + assert!(result.unwrap()); +} diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 064ccc3cd20..e68c3ca1dd9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -379,6 +379,7 @@ impl Signer { &block_proposal.block, miner_pubkey, self.reward_cycle, + true, ) { // Error validating block Err(e) => { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0be76c53621..722528e0ab4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6006,6 +6006,7 @@ fn signer_chainstate() { prior_tenure_first, miner_pk, reward_cycle, + true, ) .unwrap(); assert!( @@ -6020,6 +6021,7 @@ fn signer_chainstate() { block, miner_pk, reward_cycle, + true, ) .unwrap(); assert!( @@ -6056,6 +6058,7 @@ fn signer_chainstate() { &proposal.0, &proposal.1, reward_cycle, + true, ) .unwrap(); @@ -6105,6 +6108,7 @@ fn signer_chainstate() { &proposal_interim.0, &proposal_interim.1, reward_cycle, + true, ) .unwrap(); @@ -6134,6 +6138,7 @@ fn signer_chainstate() { &proposal_interim.0, &proposal_interim.1, reward_cycle, + true, ) .unwrap(); @@ -6209,7 +6214,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6266,7 +6272,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6329,7 +6336,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6394,7 +6402,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." From e204f1109813a903d5d592939487eef192e54028 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:49:35 -0400 Subject: [PATCH 744/910] feat: add a p2p session struct and use it to query nakamoto inventory vectors --- stackslib/src/main.rs | 238 +++++++++++++++++++++++++++++++++--------- 1 file changed, 186 insertions(+), 52 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 52d481affb4..f45eba79f05 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -36,6 +36,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream, ToSocketAddrs}; +use std::time::Duration; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; @@ -62,11 +64,12 @@ use blockstack_lib::clarity::vm::ClarityVersion; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; +use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::db::LocalPeer; -use blockstack_lib::net::inv::nakamoto::InvGenerator; +use blockstack_lib::net::httpcore::{send_http_request, StacksHttpRequest}; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::{NakamotoInvData, StacksMessage}; +use blockstack_lib::net::{GetNakamotoInvData, HandshakeData, StacksMessage, StacksMessageType}; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, cli}; @@ -77,7 +80,7 @@ use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; -use stacks_common::types::net::PeerAddress; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; @@ -86,6 +89,170 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, sleep_ms}; +struct P2PSession { + pub local_peer: LocalPeer, + peer_info: RPCPeerInfoData, + burn_block_hash: BurnchainHeaderHash, + stable_burn_block_hash: BurnchainHeaderHash, + tcp_socket: TcpStream, + seq: u32, +} + +impl P2PSession { + /// Make a StacksMessage. Sign it and set a sequence number. + fn make_peer_message(&mut self, payload: StacksMessageType) -> Result { + let mut msg = StacksMessage::new( + self.peer_info.peer_version, + self.peer_info.network_id, + self.peer_info.burn_block_height, + &self.burn_block_hash, + self.peer_info.stable_burn_block_height, + &self.stable_burn_block_hash, + payload, + ); + + msg.sign(self.seq, &self.local_peer.private_key) + .map_err(|e| format!("Failed to sign message {:?}: {:?}", &msg, &e))?; + self.seq = self.seq.wrapping_add(1); + + Ok(msg) + } + + /// Send a p2p message. + /// Returns error text on failure. + fn send_peer_message(&mut self, msg: StacksMessage) -> Result<(), String> { + msg.consensus_serialize(&mut self.tcp_socket) + .map_err(|e| format!("Failed to send message {:?}: {:?}", &msg, &e)) + } + + /// Receive a p2p message. + /// Returns error text on failure. + fn recv_peer_message(&mut self) -> Result { + let msg: StacksMessage = read_next(&mut self.tcp_socket) + .map_err(|e| format!("Failed to receive message: {:?}", &e))?; + Ok(msg) + } + + /// Begin a p2p session. + /// Synthesizes a LocalPeer from the remote peer's responses to /v2/info and /v2/pox. + /// Performs the initial handshake for you. + /// + /// Returns the session handle on success. + /// Returns error text on failure. + pub fn begin(peer_addr: SocketAddr, data_port: u16) -> Result { + let data_addr = match peer_addr { + SocketAddr::V4(v4addr) => { + SocketAddr::V4(SocketAddrV4::new(v4addr.ip().clone(), data_port)) + } + SocketAddr::V6(v6addr) => { + SocketAddr::V6(SocketAddrV6::new(v6addr.ip().clone(), data_port, 0, 0)) + } + }; + + // get /v2/info + let peer_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_getinfo(PeerHost::from(data_addr.clone()), None) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query /v2/info: {:?}", &e))? + .decode_peer_info() + .map_err(|e| format!("Failed to decode response from /v2/info: {:?}", &e))?; + + // convert `pox_consensus` and `stable_pox_consensus` into their respective burn block + // hashes + let sort_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_get_sortition_consensus( + PeerHost::from(data_addr.clone()), + &peer_info.pox_consensus, + ) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query /v3/sortitions: {:?}", &e))? + .decode_sortition_info() + .map_err(|e| format!("Failed to decode response from /v3/sortitions: {:?}", &e))? + .pop() + .ok_or_else(|| format!("No sortition returned for {}", &peer_info.pox_consensus))?; + + let stable_sort_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_get_sortition_consensus( + PeerHost::from(data_addr.clone()), + &peer_info.stable_pox_consensus, + ) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query stable /v3/sortitions: {:?}", &e))? + .decode_sortition_info() + .map_err(|e| { + format!( + "Failed to decode response from stable /v3/sortitions: {:?}", + &e + ) + })? + .pop() + .ok_or_else(|| { + format!( + "No sortition returned for {}", + &peer_info.stable_pox_consensus + ) + })?; + + let burn_block_hash = sort_info.burn_block_hash; + let stable_burn_block_hash = stable_sort_info.burn_block_hash; + + let local_peer = LocalPeer::new( + peer_info.network_id, + peer_info.parent_network_id, + PeerAddress::from_socketaddr(&peer_addr), + peer_addr.port(), + Some(StacksPrivateKey::new()), + u64::MAX, + UrlString::try_from(format!("http://127.0.0.1:{}", data_port).as_str()).unwrap(), + vec![], + ); + + let tcp_socket = TcpStream::connect(&peer_addr) + .map_err(|e| format!("Failed to open {:?}: {:?}", &peer_addr, &e))?; + + let mut session = Self { + local_peer, + peer_info, + burn_block_hash, + stable_burn_block_hash, + tcp_socket, + seq: 0, + }; + + // perform the handshake + let handshake_data = + StacksMessageType::Handshake(HandshakeData::from_local_peer(&session.local_peer)); + let handshake = session.make_peer_message(handshake_data)?; + session.send_peer_message(handshake)?; + + let resp = session.recv_peer_message()?; + match resp.payload { + StacksMessageType::HandshakeAccept(..) + | StacksMessageType::StackerDBHandshakeAccept(..) => {} + x => { + return Err(format!( + "Peer returned unexpected message (expected HandshakeAccept variant): {:?}", + &x + )); + } + } + + Ok(session) + } +} + #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -975,59 +1142,26 @@ simulating a miner. process::exit(1); } - if argv[1] == "get-tenure-inv" { - let chainstate_root_path = &argv[2]; - let tip_block_ids = &argv[3..]; - let chainstate_path = format!("{}/chainstate", &chainstate_root_path); - let sortition_path = format!("{}/burnchain/sortition", &chainstate_root_path); + if argv[1] == "getnakamotoinv" { + let peer_addr: SocketAddr = argv[2].to_socket_addrs().unwrap().next().unwrap(); + let data_port: u16 = argv[3].parse().unwrap(); + let ch = ConsensusHash::from_hex(&argv[4]).unwrap(); - let (chainstate, _) = - StacksChainState::open(false, 0x80000000, &chainstate_path, None).unwrap(); - let pox_consts = - PoxConstants::new(900, 100, 80, 0, 0, u64::MAX, u64::MAX, 240, 241, 242, 242); - let sortition_db = SortitionDB::open(&sortition_path, true, pox_consts).unwrap(); - - let mut invgen = InvGenerator::new(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortition_db.conn()).unwrap(); - - for tip_block_id in tip_block_ids.iter() { - let tip_block_id = StacksBlockId::from_hex(tip_block_id).unwrap(); - let header = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip_block_id) - .unwrap() - .unwrap(); - let sn = SortitionDB::get_block_snapshot_consensus( - sortition_db.conn(), - &header.consensus_hash, - ) - .unwrap() - .unwrap(); + let mut session = P2PSession::begin(peer_addr, data_port).unwrap(); - let reward_cycle = sortition_db - .pox_constants - .block_height_to_reward_cycle(230, sn.block_height) - .unwrap(); + // send getnakamotoinv + let get_nakamoto_inv = + StacksMessageType::GetNakamotoInv(GetNakamotoInvData { consensus_hash: ch }); - let bitvec_bools = invgen - .make_tenure_bitvector( - &tip, - &sortition_db, - &chainstate, - &header.consensus_hash, - &header.anchored_header.block_hash(), - reward_cycle, - ) - .unwrap(); - let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools) - .map_err(|e| { - warn!("Failed to create a NakamotoInv response: {:?}", &e); - e - }) - .unwrap(); + let msg = session.make_peer_message(get_nakamoto_inv).unwrap(); + session.send_peer_message(msg).unwrap(); + let resp = session.recv_peer_message().unwrap(); - println!("{}: {:?}", tip_block_id, &nakamoto_inv); - } - process::exit(0); + let StacksMessageType::NakamotoInv(inv) = &resp.payload else { + panic!("Got spurious message: {:?}", &resp); + }; + + println!("{:?}", inv); } if argv[1] == "replay-chainstate" { From 3c31ca1dbfb40f44b32cdb016bd52044729635f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:49:55 -0400 Subject: [PATCH 745/910] chore: add helpers to make sortition requests and decode them --- stackslib/src/net/api/getsortition.rs | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 7b594530c26..7074e707926 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -377,3 +377,34 @@ impl HttpResponse for GetSortitionHandler { Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_sortition( + host: PeerHost, + sort_key: &str, + sort_value: &str, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/sortitions/{}/{}", sort_key, sort_value), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } + + pub fn new_get_sortition_consensus(host: PeerHost, ch: &ConsensusHash) -> StacksHttpRequest { + Self::new_get_sortition(host, "consensus", &format!("{}", ch)) + } +} + +impl StacksHttpResponse { + pub fn decode_sortition_info(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let response: Vec = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError(format!("Failed to decode JSON: {:?}", &_e)))?; + Ok(response) + } +} From d01baf5bb1bfd64a1f4be6f37a63b62d325d4de3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:10 -0400 Subject: [PATCH 746/910] feat: with_header() constructor function for a StacksHttpRequest --- stackslib/src/net/httpcore.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 3b4bf8c9b98..c58355a6a97 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -532,6 +532,12 @@ impl StacksHttpRequest { self.preamble.add_header(hdr, value); } + /// Constructor to add headers + pub fn with_header(mut self, hdr: String, value: String) -> Self { + self.add_header(hdr, value); + self + } + /// Get a ref to all request headers pub fn get_headers(&self) -> &BTreeMap { &self.preamble.headers From 0bef8a7c0910bbfa00f1e77622473b9fe34c1a73 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:24 -0400 Subject: [PATCH 747/910] fix: make no_cache a test-only variable --- stackslib/src/net/inv/nakamoto.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index d771848fec8..acacc741537 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -114,6 +114,7 @@ pub struct InvGenerator { /// count cache misses for `processed_tenures` cache_misses: u128, /// Disable caching (test only) + #[cfg(test)] no_cache: bool, } @@ -124,6 +125,7 @@ impl InvGenerator { sortitions: HashMap::new(), tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, cache_misses: 0, + #[cfg(test)] no_cache: false, } } @@ -206,6 +208,17 @@ impl InvGenerator { Ok(None) } + #[cfg(not(test))] + fn test_clear_cache(&mut self) {} + + /// Clear the cache (test only) + #[cfg(test)] + fn test_clear_cache(&mut self) { + if self.no_cache { + self.processed_tenures.clear(); + } + } + /// Get a processed tenure. If it's not cached, then load it from disk. /// /// Loading it is expensive, so once loaded, store it with the cached processed tenure map @@ -287,9 +300,7 @@ impl InvGenerator { self.cache_misses = self.cache_misses.saturating_add(1); Ok(loaded_info_opt) }; - if self.no_cache { - self.processed_tenures.clear(); - } + self.test_clear_cache(); ret } From d76133053edaaa0fe5429e6b9618ef220cd12cbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:35 -0400 Subject: [PATCH 748/910] chroe: don't broadcast to unauthenticated peers --- stackslib/src/net/p2p.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 43386716416..d7ea9684f38 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1050,6 +1050,9 @@ impl PeerNetwork { if let Some(event_id) = self.events.get(&nk) { let event_id = *event_id; if let Some(convo) = self.peers.get_mut(&event_id) { + if !convo.is_authenticated() { + continue; + } // safety check -- don't send to someone who has already been a relayer let mut do_relay = true; if let Some(pubkey) = convo.ref_public_key() { From 0c101e312a29da333c838e83c051b5dfb44ce303 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 07:27:31 -0700 Subject: [PATCH 749/910] CRC: fix typo of 2_1 to 2_5 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 4c418939a6b..94a3edaadb3 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -172,7 +172,7 @@ fn microblocks_disabled() { // wait until just before epoch 2.5 wait_for(30, || { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_1 - 2 { + if tip_info.burn_block_height >= epoch_2_5 - 2 { return Ok(true); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); From 064a223d2190a31c44cea86981d9f4068c624af0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 14:25:13 -0400 Subject: [PATCH 750/910] fix: fix 5267 by making it so that any peer can be re-assigned to an idle downloader --- .../nakamoto/download_state_machine.rs | 10 ++-- .../download/nakamoto/tenure_downloader.rs | 59 ++++++++++--------- .../nakamoto/tenure_downloader_set.rs | 13 ++-- 3 files changed, 42 insertions(+), 40 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 132a03f34d0..02ed8b94190 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -384,7 +384,7 @@ impl NakamotoDownloadStateMachine { &new_wanted_tenures ); self.wanted_tenures.append(&mut new_wanted_tenures); - debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); + test_debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); Ok(()) } @@ -983,9 +983,9 @@ impl NakamotoDownloadStateMachine { prev_schedule }; - debug!("new schedule: {:?}", schedule); - debug!("new available: {:?}", &available); - debug!("new tenure_block_ids: {:?}", &tenure_block_ids); + test_debug!("new schedule: {:?}", schedule); + test_debug!("new available: {:?}", &available); + test_debug!("new tenure_block_ids: {:?}", &tenure_block_ids); self.tenure_download_schedule = schedule; self.tenure_block_ids = tenure_block_ids; @@ -1023,7 +1023,7 @@ impl NakamotoDownloadStateMachine { .map(|wt| (wt.burn_height, &wt.tenure_id_consensus_hash)) .collect(); - debug!("Check availability {:?}", available); + test_debug!("Check availability {:?}", available); let mut highest_available = Vec::with_capacity(2); for (_, ch) in tenure_block_heights.iter().rev() { let available_count = available diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 92e032fa383..e309072f84c 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -66,16 +66,18 @@ use crate::util_lib::db::{DBConn, Error as DBError}; /// start and end block. This includes all tenures except for the two most recent ones. #[derive(Debug, Clone, PartialEq)] pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), + /// Getting the tenure-start block (the given StacksBlockId is it's block ID), as well as the + /// millisecond epoch timestamp at which the request began + GetTenureStartBlock(StacksBlockId, u128), /// Getting the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), + /// The fields here are the block ID of the tenure end block, as well as the millisecond epoch + /// timestamp at which the request begahn + GetTenureEndBlock(StacksBlockId, u128), /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), + /// The fields here are the hash of the _last_ block in the tenure that must be downloaded, as well + /// as the millisecond epoch timestamp at which the request began. The first field is needed + /// because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId, u128), /// We have gotten all the blocks for this tenure Done, } @@ -166,7 +168,7 @@ impl NakamotoTenureDownloader { start_signer_keys, end_signer_keys, idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone(), get_epoch_time_ms()), tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, @@ -187,7 +189,7 @@ impl NakamotoTenureDownloader { &mut self, tenure_start_block: NakamotoBlock, ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + let NakamotoTenureDownloadState::GetTenureStartBlock(..) = &self.state else { // not the right state for this warn!("Invalid state for this method"; "state" => %self.state); @@ -235,7 +237,7 @@ impl NakamotoTenureDownloader { } else { // need to get tenure_end_block. self.state = - NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone()); + NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone(), get_epoch_time_ms()); } Ok(()) } @@ -252,7 +254,7 @@ impl NakamotoTenureDownloader { ) -> Result<(), NetError> { if !matches!( &self.state, - NakamotoTenureDownloadState::GetTenureEndBlock(_) + NakamotoTenureDownloadState::GetTenureEndBlock(..) ) { warn!("Invalid state for this method"; "state" => %self.state); @@ -326,6 +328,7 @@ impl NakamotoTenureDownloader { self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), + get_epoch_time_ms() ); Ok(()) } @@ -361,7 +364,7 @@ impl NakamotoTenureDownloader { &mut self, mut tenure_blocks: Vec, ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = &self.state else { warn!("Invalid state for this method"; "state" => %self.state); return Err(NetError::InvalidState); @@ -461,7 +464,7 @@ impl NakamotoTenureDownloader { &earliest_block.block_id(), &next_block_id ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); return Ok(None); } @@ -486,16 +489,16 @@ impl NakamotoTenureDownloader { peerhost: PeerHost, ) -> Result, ()> { let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id, start_request_time) => { + debug!("Request tenure-start block {} at {}", &start_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id, start_request_time) => { + debug!("Request tenure-end block {} at {}", &end_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { + debug!("Downloading tenure ending at {} at {}", &end_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -558,10 +561,10 @@ impl NakamotoTenureDownloader { response: StacksHttpResponse, ) -> Result>, NetError> { let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { debug!( - "Got download response for tenure-start block {}", - &_block_id + "Got download response for tenure-start block {} in {}ms", + &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -570,8 +573,8 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { + debug!("Got download response to tenure-end block {} in {}ms", &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time)); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e @@ -579,10 +582,10 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id + "Got download response for tenure blocks ending at {} in {}ms", + &_end_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 74ff83460d6..32d45667ccf 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -230,13 +230,11 @@ impl NakamotoTenureDownloaderSet { if !downloader.idle { continue; } - if downloader.naddr != naddr { - continue; - } debug!( "Assign peer {} to work on downloader for {} in state {}", &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); + downloader.naddr = naddr.clone(); self.peers.insert(naddr, i); return true; } @@ -308,8 +306,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, state={})", - tenure_id, downloader.idle, &downloader.state + "Have downloader for tenure {} already (idle={}, state={}, naddr={})", + tenure_id, downloader.idle, &downloader.state, &downloader.naddr ); return true; } @@ -328,7 +326,7 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - debug!("make_tenure_downloaders"; + test_debug!("make_tenure_downloaders"; "schedule" => ?schedule, "available" => ?available, "tenure_block_ids" => ?tenure_block_ids, @@ -463,7 +461,7 @@ impl NakamotoTenureDownloaderSet { continue; }; if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); + debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; @@ -534,6 +532,7 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { + debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; From 1947fc62ce5c7e445ab25d4a96fb1ebb8390a23f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:01:28 -0400 Subject: [PATCH 751/910] chore: cargo fmt --- .../download/nakamoto/tenure_downloader.rs | 52 ++++++++++++++----- .../nakamoto/tenure_downloader_set.rs | 10 +++- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index e309072f84c..c11e9d42dd5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -168,7 +168,10 @@ impl NakamotoTenureDownloader { start_signer_keys, end_signer_keys, idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone(), get_epoch_time_ms()), + state: NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block_id.clone(), + get_epoch_time_ms(), + ), tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, @@ -236,8 +239,10 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&tenure_end_block)?; } else { // need to get tenure_end_block. - self.state = - NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone(), get_epoch_time_ms()); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock( + self.tenure_end_block_id.clone(), + get_epoch_time_ms(), + ); } Ok(()) } @@ -328,7 +333,7 @@ impl NakamotoTenureDownloader { self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), - get_epoch_time_ms() + get_epoch_time_ms(), ); Ok(()) } @@ -364,7 +369,9 @@ impl NakamotoTenureDownloader { &mut self, mut tenure_blocks: Vec, ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = &self.state else { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = + &self.state + else { warn!("Invalid state for this method"; "state" => %self.state); return Err(NetError::InvalidState); @@ -464,7 +471,8 @@ impl NakamotoTenureDownloader { &earliest_block.block_id(), &next_block_id ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); + self.state = + NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); return Ok(None); } @@ -489,16 +497,28 @@ impl NakamotoTenureDownloader { peerhost: PeerHost, ) -> Result, ()> { let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id, start_request_time) => { - debug!("Request tenure-start block {} at {}", &start_block_id, start_request_time); + NakamotoTenureDownloadState::GetTenureStartBlock( + start_block_id, + start_request_time, + ) => { + debug!( + "Request tenure-start block {} at {}", + &start_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id, start_request_time) => { - debug!("Request tenure-end block {} at {}", &end_block_id, start_request_time); + debug!( + "Request tenure-end block {} at {}", + &end_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { - debug!("Downloading tenure ending at {} at {}", &end_block_id, start_request_time); + debug!( + "Downloading tenure ending at {} at {}", + &end_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -564,7 +584,8 @@ impl NakamotoTenureDownloader { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { debug!( "Got download response for tenure-start block {} in {}ms", - &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) + &_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -574,7 +595,11 @@ impl NakamotoTenureDownloader { Ok(None) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { - debug!("Got download response to tenure-end block {} in {}ms", &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time)); + debug!( + "Got download response to tenure-end block {} in {}ms", + &_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) + ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e @@ -585,7 +610,8 @@ impl NakamotoTenureDownloader { NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { debug!( "Got download response for tenure blocks ending at {} in {}ms", - &_end_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) + &_end_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 32d45667ccf..160bad309e2 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -461,7 +461,10 @@ impl NakamotoTenureDownloaderSet { continue; }; if downloader.is_done() { - debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); + debug!( + "Downloader for {} on tenure {} is finished", + &naddr, &downloader.tenure_id_consensus_hash + ); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; @@ -532,7 +535,10 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { - debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); + debug!( + "Downloader for {} on tenure {} is finished", + &naddr, &downloader.tenure_id_consensus_hash + ); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; From dca3d5a7c30f4b7540831d4163ed3229e9f146ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:11:26 -0400 Subject: [PATCH 752/910] chore: address PR feedback --- stackslib/src/main.rs | 20 +++++++++++--------- stackslib/src/net/api/getsortition.rs | 4 ++-- stackslib/src/net/inv/nakamoto.rs | 2 +- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index f45eba79f05..98315cffa82 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -36,7 +36,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; -use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream, ToSocketAddrs}; +use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::time::Duration; use std::{env, fs, io, process, thread}; @@ -140,14 +140,8 @@ impl P2PSession { /// Returns the session handle on success. /// Returns error text on failure. pub fn begin(peer_addr: SocketAddr, data_port: u16) -> Result { - let data_addr = match peer_addr { - SocketAddr::V4(v4addr) => { - SocketAddr::V4(SocketAddrV4::new(v4addr.ip().clone(), data_port)) - } - SocketAddr::V6(v6addr) => { - SocketAddr::V6(SocketAddrV6::new(v6addr.ip().clone(), data_port, 0, 0)) - } - }; + let mut data_addr = peer_addr.clone(); + data_addr.set_port(data_port); // get /v2/info let peer_info = send_http_request( @@ -1143,6 +1137,14 @@ simulating a miner. } if argv[1] == "getnakamotoinv" { + if argv.len() < 5 { + eprintln!( + "Usage: {} getnakamotoinv HOST:PORT DATA_PORT CONSENSUS_HASH", + &argv[0] + ); + process::exit(1); + } + let peer_addr: SocketAddr = argv[2].to_socket_addrs().unwrap().next().unwrap(); let data_port: u16 = argv[3].parse().unwrap(); let ch = ConsensusHash::from_hex(&argv[4]).unwrap(); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 36e00c4ec59..28298eab422 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -392,14 +392,14 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v3/sortitions/{}/{}", sort_key, sort_value), + format!("{}/{}/{}", RPC_SORTITION_INFO_PATH, sort_key, sort_value), HttpRequestContents::new(), ) .expect("FATAL: failed to construct request from infallible data") } pub fn new_get_sortition_consensus(host: PeerHost, ch: &ConsensusHash) -> StacksHttpRequest { - Self::new_get_sortition(host, "consensus", &format!("{}", ch)) + Self::new_get_sortition(host, "consensus", &ch.to_string()) } } diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index acacc741537..8971a8230f0 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -251,7 +251,7 @@ impl InvGenerator { // The table removals here are for cache maintenance. // // Between successive calls to this function, the Stacks tip (identified by - // `tip_block_ch` and `tip_block_bh) can advance as more blocks are discovered. + // `tip_block_ch` and `tip_block_bh`) can advance as more blocks are discovered. // This means that tenures that had previously been treated as absent could now be // present. By evicting cached data for all tenures between (and including) the // highest ancestor of the current Stacks tip, and the current Stacks tip, we force From 6b459a15a8e237629170acdaf4e834f056fac035 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:41:17 -0400 Subject: [PATCH 753/910] fix: compile issues in tests, and don't use _ --- .../download/nakamoto/tenure_downloader.rs | 18 ++++---- stackslib/src/net/tests/download/nakamoto.rs | 41 +++++++++++++++---- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c11e9d42dd5..63a622a424a 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -581,11 +581,11 @@ impl NakamotoTenureDownloader { response: StacksHttpResponse, ) -> Result>, NetError> { let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureStartBlock(block_id, start_request_time) => { debug!( "Got download response for tenure-start block {} in {}ms", - &_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -594,11 +594,11 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureEndBlock(block_id, start_request_time) => { debug!( "Got download response to tenure-end block {} in {}ms", - &_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -607,11 +607,11 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { debug!( "Got download response for tenure blocks ending at {} in {}ms", - &_end_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &end_block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index cc90d900110..45fa04d8d68 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -50,6 +50,17 @@ use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; use crate::stacks_common::types::Address; use crate::util_lib::db::Error as DBError; +impl NakamotoTenureDownloadState { + pub fn request_time(&self) -> Option { + match self { + Self::GetTenureStartBlock(_, ts) => Some(*ts), + Self::GetTenureEndBlock(_, ts) => Some(*ts), + Self::GetTenureBlocks(_, ts) => Some(*ts), + Self::Done => None, + } + } +} + impl NakamotoDownloadStateMachine { /// Find the list of wanted tenures for the given reward cycle. The reward cycle must /// be complete already. Used for testing. @@ -240,7 +251,10 @@ fn test_nakamoto_tenure_downloader() { // must be first block assert_eq!( td.state, - NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block.header.block_id()) + NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block.header.block_id(), + td.state.request_time().unwrap() + ) ); assert!(td .try_accept_tenure_start_block(blocks.last().unwrap().clone()) @@ -254,7 +268,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_start_block(blocks.first().unwrap().clone()) .is_ok()); - let NakamotoTenureDownloadState::GetTenureEndBlock(block_id) = td.state else { + let NakamotoTenureDownloadState::GetTenureEndBlock(block_id, ..) = td.state else { panic!("wrong state"); }; assert_eq!(block_id, next_tenure_start_block.header.block_id()); @@ -274,7 +288,8 @@ fn test_nakamoto_tenure_downloader() { assert_eq!( td.state, NakamotoTenureDownloadState::GetTenureBlocks( - next_tenure_start_block.header.parent_block_id.clone() + next_tenure_start_block.header.parent_block_id.clone(), + td.state.request_time().unwrap(), ) ); assert_eq!(td.tenure_end_block, Some(next_tenure_start_block.clone())); @@ -299,7 +314,10 @@ fn test_nakamoto_tenure_downloader() { // tail pointer moved assert_eq!( td.state, - NakamotoTenureDownloadState::GetTenureBlocks(block.header.parent_block_id.clone()) + NakamotoTenureDownloadState::GetTenureBlocks( + block.header.parent_block_id.clone(), + td.state.request_time().unwrap() + ) ); } @@ -571,7 +589,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -669,7 +688,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -769,7 +789,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -846,7 +867,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -986,7 +1008,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } From 638b26a0d8419386300b9d08a07b4eee5c552369 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:42:41 -0400 Subject: [PATCH 754/910] fix: typo --- stackslib/src/net/api/getsortition.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 28298eab422..9b22d8b82fc 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -383,7 +383,7 @@ impl HttpResponse for GetSortitionHandler { } impl StacksHttpRequest { - /// Make a new getinfo request to this endpoint + /// Make a new getsortition request to this endpoint pub fn new_get_sortition( host: PeerHost, sort_key: &str, From 66904ccd77d3b599c949edc8e2c197983cbf5163 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 12:51:23 -0700 Subject: [PATCH 755/910] CRC: increase the timeout to reach right before epoch 2.5 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 94a3edaadb3..5a45b35e86d 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -170,7 +170,7 @@ fn microblocks_disabled() { submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 - wait_for(30, || { + wait_for(120, || { let tip_info = get_chain_info(&conf); if tip_info.burn_block_height >= epoch_2_5 - 2 { return Ok(true); From 994ef3b5aae8a34e36e6e087e0b5178000e27946 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 3 Oct 2024 15:02:03 -0500 Subject: [PATCH 756/910] test: add long tx test --- .../src/tests/nakamoto_integrations.rs | 200 +++++++++++++++++- .../src/tests/neon_integrations.rs | 2 +- 2 files changed, 200 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e5a6c87af02..e1880e6e36e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,7 @@ use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; @@ -8926,3 +8926,201 @@ fn v3_signer_api_endpoint() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test asserts that a long running transaction doesn't get mined, +/// but that the stacks-node continues to make progress +fn skip_mining_long_tx() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.nakamoto_attempt_time_ms = 5_000; + let sender_1_sk = Secp256k1PrivateKey::from_seed(&[30]); + let sender_2_sk = Secp256k1PrivateKey::from_seed(&[31]); + // setup sender + recipient for a test stx transfer + let sender_1_addr = tests::to_addr(&sender_1_sk); + let sender_2_addr = tests::to_addr(&sender_2_sk); + let send_amt = 1000; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_1_addr.clone()).to_string(), + send_amt * 15 + send_fee * 15, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_2_addr.clone()).to_string(), + 10000, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_naka_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // submit a long running TX and the transfer TX + let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); + let input_list = input_list.join(" "); + + // Mine a few nakamoto tenures with some interim blocks in them + for i in 0..5 { + let mined_before = mined_naka_blocks.load(Ordering::SeqCst); + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + if i == 0 { + // we trigger the nakamoto miner to evaluate the long running transaction, + // but we disable the block broadcast, so the tx doesn't end up included in a + // confirmed block, even though its been evaluated. + // once we've seen the miner increment the mined counter, we allow it to start + // broadcasting (because at this point, any future blocks produced will skip the long + // running tx because they have an estimate). + wait_for(30, || { + Ok(mined_naka_blocks.load(Ordering::SeqCst) > mined_before) + }) + .unwrap(); + + TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(true); + let tx = make_contract_publish( + &sender_2_sk, + 0, + 9_000, + "large_contract", + &format!( + "(define-constant INP_LIST (list {input_list})) + (define-private (mapping-fn (input int)) + (begin (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 input))))))))) + 0)) + + (define-private (mapping-fn-2 (input int)) + (begin (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) 0)) + + (begin + (map mapping-fn-2 INP_LIST))" + ), + ); + submit_tx(&http_origin, &tx); + + wait_for(90, || { + Ok(mined_naka_blocks.load(Ordering::SeqCst) > mined_before + 1) + }) + .unwrap(); + + TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(false); + } else { + let transfer_tx = + make_stacks_transfer(&sender_1_sk, i - 1, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &sender_1_addr).nonce; + Ok(cur_sender_nonce >= i) + }) + .unwrap(); + } + } + + let sender_1_nonce = get_account(&http_origin, &sender_1_addr).nonce; + let sender_2_nonce = get_account(&http_origin, &sender_2_addr).nonce; + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + "sender_1_nonce" => sender_1_nonce, + "sender_2_nonce" => sender_2_nonce, + ); + + assert_eq!(sender_2_nonce, 0); + assert_eq!(sender_1_nonce, 4); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + + check_nakamoto_empty_block_heuristics(); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2c2055bad90..126f089c359 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -591,7 +591,7 @@ pub mod test_observer { } } let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); - let tx_bytes = hex_bytes(tx_hex).unwrap(); + let tx_bytes = hex_bytes(&tx_hex[2..]).unwrap(); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); Some(tx) From 56ae22d80b5925b80759cf35c449dddc27b0ed69 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 13:44:18 -0700 Subject: [PATCH 757/910] Bind ports should not use the same port numbers Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 27 ++++----- .../src/tests/nakamoto_integrations.rs | 46 ++++++++------- .../src/tests/neon_integrations.rs | 58 ++++++++++++++----- 3 files changed, 81 insertions(+), 50 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a7892b9a2db..8829c9782d8 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -23,7 +23,7 @@ use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; -use rand::RngCore; +use rand::Rng; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StacksTransactionEvent; @@ -295,13 +295,14 @@ pub fn new_test_conf() -> Config { // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); + // Use a non-privileged port between 1024 and 65534 + let rpc_port: u16 = rng.gen_range(1024..65533); + let p2p_port = rpc_port + 1; let mut conf = Config::default(); conf.node.working_dir = format!( "/tmp/stacks-node-tests/integrations-neon/{}-{}", - to_hex(&buf), + to_hex(format!("{rpc_port}{p2p_port}").as_bytes()), get_epoch_time_secs() ); conf.node.seed = @@ -313,14 +314,11 @@ pub fn new_test_conf() -> Config { conf.burnchain.epochs = Some(StacksEpoch::all(0, 0, 0)); - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let localhost = "127.0.0.1"; - conf.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - conf.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - conf.node.data_url = format!("http://{}:{}", localhost, rpc_port); - conf.node.p2p_address = format!("{}:{}", localhost, p2p_port); + conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + conf.node.p2p_address = format!("{localhost}:{p2p_port}"); conf } @@ -344,10 +342,9 @@ pub fn set_random_binds(config: &mut Config) { .unwrap(); let (rpc_port, p2p_port) = loop { let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let rpc_port: u16 = rng.gen_range(1024..65533); + let p2p_port = rpc_port + 1; if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { break (rpc_port, p2p_port); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 722528e0ab4..17840dcb55b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,7 +29,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; -use rand::RngCore; +use rand::Rng; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -3459,18 +3459,20 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); @@ -3813,18 +3815,20 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ab892f25b75..d34cbffe5b4 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12,7 +12,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; -use rand::{Rng, RngCore}; +use rand::Rng; use rusqlite::params; use serde::Deserialize; use serde_json::json; @@ -986,7 +986,16 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12466,18 +12475,21 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + follower_conf.node.pox_sync_sample_secs = 30; let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); @@ -12800,7 +12812,16 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12846,7 +12867,16 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; From 331dc9436e8296513d543f348e9d9b1765fa570f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 4 Oct 2024 11:17:10 -0500 Subject: [PATCH 758/910] feat: wait to build block if min gap won't be met * ci: disable unneeded microblocks tests * test: remove blanket 2 sec wait for nakamoto btc blocks --- .github/workflows/bitcoin-tests.yml | 23 ++++---- .../stacks-node/src/nakamoto_node/miner.rs | 52 +++++++++++++------ .../src/tests/nakamoto_integrations.rs | 1 - 3 files changed, 49 insertions(+), 27 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index cd867340eca..4115118eaf8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -32,7 +32,6 @@ jobs: - tests::bitcoin_regtest::bitcoind_integration_test - tests::integrations::integration_test_get_info - tests::neon_integrations::antientropy_integration_test - - tests::neon_integrations::bad_microblock_pubkey - tests::neon_integrations::bitcoind_forking_test - tests::neon_integrations::bitcoind_integration_test - tests::neon_integrations::block_large_tx_integration_test @@ -43,21 +42,26 @@ jobs: - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window10 - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window5 - tests::neon_integrations::liquid_ustx_integration - - tests::neon_integrations::microblock_fork_poison_integration_test - - tests::neon_integrations::microblock_integration_test + # Microblock tests that are no longer needed on every CI run + # (microblocks are unsupported starting in Epoch 2.5) + # - tests::neon_integrations::bad_microblock_pubkey + # - tests::neon_integrations::microblock_fork_poison_integration_test + # - tests::neon_integrations::microblock_integration_test + # - tests::neon_integrations::microblock_limit_hit_integration_test + # - tests::neon_integrations::test_problematic_microblocks_are_not_mined + # - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored + # - tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test + # - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test + # - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test + # - tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test # Disable this flaky test. Microblocks are no longer supported anyways. # - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - - tests::neon_integrations::microblock_limit_hit_integration_test - tests::neon_integrations::miner_submit_twice - tests::neon_integrations::mining_events_integration_test - tests::neon_integrations::pox_integration_test - tests::neon_integrations::push_boot_receipts - - tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test - tests::neon_integrations::should_fix_2771 - tests::neon_integrations::size_check_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test - tests::neon_integrations::stx_delegate_btc_integration_test - tests::neon_integrations::stx_transfer_btc_integration_test - tests::neon_integrations::stack_stx_burn_op_test @@ -66,8 +70,6 @@ jobs: - tests::neon_integrations::test_flash_block_skip_tenure - tests::neon_integrations::test_problematic_blocks_are_not_mined - tests::neon_integrations::test_problematic_blocks_are_not_relayed_or_stored - - tests::neon_integrations::test_problematic_microblocks_are_not_mined - - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored - tests::neon_integrations::test_problematic_txs_are_not_stored - tests::neon_integrations::use_latest_tip_integration_test - tests::neon_integrations::confirm_unparsed_ongoing_ops @@ -90,6 +92,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::nakamoto_integrations::nakamoto_attempt_time + - tests::nakamoto_integrations::skip_mining_long_tx - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - tests::signer::v0::end_of_tenure diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 850fffaab68..af539db5b1d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -40,6 +40,7 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -949,6 +950,29 @@ impl BlockMinerThread { Some(vrf_proof) } + fn validate_timestamp_info( + &self, + current_timestamp_secs: u64, + stacks_parent_header: &StacksHeaderInfo, + ) -> bool { + let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { + Some(naka_header) => naka_header.timestamp, + None => stacks_parent_header.burn_header_timestamp, + }; + let time_since_parent_ms = current_timestamp_secs.saturating_sub(parent_timestamp) * 1000; + if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { + debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; + "current_timestamp" => current_timestamp_secs, + "parent_block_id" => %stacks_parent_header.index_block_hash(), + "parent_block_height" => stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, + ); + false + } else { + true + } + } + /// Check that the provided block is not mined too quickly after the parent block. /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. fn validate_timestamp(&self, x: &NakamotoBlock) -> Result { @@ -970,22 +994,7 @@ impl BlockMinerThread { ); NakamotoNodeError::ParentNotFound })?; - let current_timestamp = x.header.timestamp; - let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { - Some(naka_header) => naka_header.timestamp, - None => stacks_parent_header.burn_header_timestamp, - }; - let time_since_parent_ms = current_timestamp.saturating_sub(parent_timestamp) * 1000; - if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { - debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; - "current_timestamp" => current_timestamp, - "parent_block_id" => %stacks_parent_header.index_block_hash(), - "parent_block_height" => stacks_parent_header.stacks_block_height, - "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, - ); - return Ok(false); - } - Ok(true) + Ok(self.validate_timestamp_info(x.header.timestamp, &stacks_parent_header)) } // TODO: add tests from mutation testing results #4869 @@ -1042,6 +1051,17 @@ impl BlockMinerThread { let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); + if !self.validate_timestamp_info( + get_epoch_time_secs(), + &parent_block_info.stacks_parent_header, + ) { + // treat a too-soon-to-mine block as an interrupt: this will let the caller sleep and then re-evaluate + // all the pre-mining checks (burnchain tip changes, signal interrupts, etc.) + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); + } + // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3b8cdecb2ed..79e7ed6424b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -728,7 +728,6 @@ pub fn next_block_and_wait_for_commits( (0..commits_before.len()).map(|_| None).collect(); let mut commit_sent_time: Vec> = (0..commits_before.len()).map(|_| None).collect(); - sleep_ms(2000); // Make sure that the proposed stacks block has a different timestamp than its parent next_block_and(btc_controller, timeout_secs, || { for i in 0..commits_submitted.len() { let commits_sent = commits_submitted[i].load(Ordering::SeqCst); From cf5fd4ceef081436d099793c558d85ae5d32e82b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 09:36:05 -0700 Subject: [PATCH 759/910] Cleanup bind ports Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 26 ++-- .../src/tests/nakamoto_integrations.rs | 74 +++++++-- .../src/tests/neon_integrations.rs | 142 ++++++++++++++---- 3 files changed, 189 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 8829c9782d8..e45c22c1626 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -295,9 +295,13 @@ pub fn new_test_conf() -> Config { // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let rpc_port: u16 = rng.gen_range(1024..65533); - let p2p_port = rpc_port + 1; + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b { + break (a, b); + } + }; let mut conf = Config::default(); conf.node.working_dir = format!( @@ -324,6 +328,7 @@ pub fn new_test_conf() -> Config { /// Randomly change the config's network ports to new ports. pub fn set_random_binds(config: &mut Config) { + let mut rng = rand::thread_rng(); let prior_rpc_port: u16 = config .node .rpc_bind @@ -341,12 +346,15 @@ pub fn set_random_binds(config: &mut Config) { .parse() .unwrap(); let (rpc_port, p2p_port) = loop { - let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let rpc_port: u16 = rng.gen_range(1024..65533); - let p2p_port = rpc_port + 1; - if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { - break (rpc_port, p2p_port); + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); } }; let localhost = "127.0.0.1"; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17840dcb55b..e6eabc99d3c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3459,16 +3459,37 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = naka_conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = naka_conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); @@ -3815,16 +3836,37 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = naka_conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = naka_conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d34cbffe5b4..bef5786fb22 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -986,16 +986,31 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12475,16 +12490,37 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); @@ -12670,11 +12706,31 @@ fn mock_miner_replay() { follower_conf.node.local_peer_seed = vec![0x02; 32]; let mut rng = rand::thread_rng(); - + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); let (rpc_port, p2p_port) = loop { let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b { + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { break (a, b); } }; @@ -12812,16 +12868,31 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12867,16 +12938,31 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; From f7cdbaca4028c3773d931dd4a66d469c1631fc59 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 10:40:35 -0700 Subject: [PATCH 760/910] Incorrect path was passed to new_rpc_call_timer calling out of bounds array access Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index c144d0401a7..6168e761069 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -376,10 +376,12 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - let timer = crate::monitoring::new_rpc_call_timer( - "/v3/tenures/fork_info/:start/:stop", - &self.http_origin, + // Use a seperate metrics path to allow the same metric for different start and stop hashes + let metrics_path = format!( + "{}{RPC_TENURE_FORKING_INFO_PATH}/:start/:stop", + self.http_origin ); + let timer = crate::monitoring::new_rpc_call_timer(&metrics_path, &self.http_origin); let send_request = || { self.stacks_node_client .get(&path) From 5b036df16e938987c58886ba1e0df682c7ee91a3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 10:43:31 -0700 Subject: [PATCH 761/910] Typo for Brice Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6168e761069..5caf9d3f42e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -376,7 +376,7 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - // Use a seperate metrics path to allow the same metric for different start and stop hashes + // Use a separate metrics path to allow the same metric for different start and stop hashes let metrics_path = format!( "{}{RPC_TENURE_FORKING_INFO_PATH}/:start/:stop", self.http_origin From 381a8c789ce83ce0fdd6b5a1bf493d9cb4c908d6 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 4 Oct 2024 11:09:00 -0700 Subject: [PATCH 762/910] fix: better implementation of path label for prom metrics --- stacks-signer/src/monitoring/mod.rs | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index e03b03d47af..621886b9c0a 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -92,13 +92,22 @@ pub fn update_signer_nonce(nonce: u64) { prometheus::SIGNER_NONCE.set(nonce as i64); } +// Allow dead code because this is only used in the `monitoring_prom` feature +// but we want to run it in a test +#[allow(dead_code)] +/// Remove the origin from the full path to avoid duplicate metrics for different origins +fn remove_origin_from_path(full_path: &str, origin: &str) -> String { + let path = full_path.replace(origin, ""); + path +} + /// Start a new RPC call timer. /// The `origin` parameter is the base path of the RPC call, e.g. `http://node.com`. /// The `origin` parameter is removed from `full_path` when storing in prometheus. #[cfg(feature = "monitoring_prom")] pub fn new_rpc_call_timer(full_path: &str, origin: &str) -> HistogramTimer { - let path = &full_path[origin.len()..]; - let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[path]); + let path = remove_origin_from_path(full_path, origin); + let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[&path]); histogram.start_timer() } @@ -140,3 +149,16 @@ pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), Stri } Ok(()) } + +#[test] +fn test_remove_origin_from_path() { + let full_path = "http://localhost:20443/v2/info"; + let origin = "http://localhost:20443"; + let path = remove_origin_from_path(full_path, origin); + assert_eq!(path, "/v2/info"); + + let full_path = "/v2/info"; + let origin = "http://localhost:20443"; + let path = remove_origin_from_path(full_path, origin); + assert_eq!(path, "/v2/info"); +} From 30cd8ec965e1103c8618a523853b3aa8cdd7c326 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 12:52:13 -0700 Subject: [PATCH 763/910] Do not abort the runloop unless we were not told to exit Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/run_loop/neon.rs | 40 ++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b1fa0ff53bb..331e7e597ce 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1314,6 +1314,26 @@ impl RunLoop { // // _this will block if the relayer's buffer is full_ if !node.relayer_sortition_notify() { + // First check if we were supposed to cleanly exit + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + let peer_network = node.join(); + liveness_thread.join().unwrap(); + + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + + info!("Exiting stacks-node"); + return Some(data_to_naka); + } // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); return None; @@ -1388,6 +1408,26 @@ impl RunLoop { } if !node.relayer_issue_tenure(ibd) { + // First check if we were supposed to cleanly exit + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + let peer_network = node.join(); + liveness_thread.join().unwrap(); + + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + + info!("Exiting stacks-node"); + return Some(data_to_naka); + } // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); break None; From fc80812111252eddf611d017d5aa808d504bb285 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 13:48:23 -0700 Subject: [PATCH 764/910] CRC: make a global mutex of used ports in tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 60 +++++--- .../src/tests/nakamoto_integrations.rs | 66 +-------- .../src/tests/neon_integrations.rs | 135 ++---------------- 3 files changed, 51 insertions(+), 210 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index e45c22c1626..883970ef095 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -13,9 +13,9 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::atomic::AtomicU64; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; @@ -99,6 +99,34 @@ lazy_static! { ); } +lazy_static! { + static ref USED_PORTS: Mutex> = Mutex::new(HashSet::new()); +} + +/// Generate a random port number between 1024 and 65534 (inclusive) and insert it into the USED_PORTS set. +/// Returns the generated port number. +pub fn gen_random_port() -> u16 { + let mut rng = rand::thread_rng(); + let range_len = (1024..u16::MAX).len(); + loop { + let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if insert_new_port(port) { + return port; + } + assert!( + USED_PORTS.lock().unwrap().len() < range_len, + "No more available ports" + ); + } +} + +// Add a port to the USED_PORTS set. This is used to ensure that we don't try to bind to the same port in tests +// Returns true if the port was inserted, false if it was already in the set. +pub fn insert_new_port(port: u16) -> bool { + let mut ports = USED_PORTS.lock().unwrap(); + ports.insert(port) +} + pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -294,14 +322,8 @@ pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" - let mut rng = rand::thread_rng(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let mut conf = Config::default(); conf.node.working_dir = format!( @@ -328,7 +350,7 @@ pub fn new_test_conf() -> Config { /// Randomly change the config's network ports to new ports. pub fn set_random_binds(config: &mut Config) { - let mut rng = rand::thread_rng(); + // Just in case prior config was not created with `new_test_conf`, we need to add the prior generated ports let prior_rpc_port: u16 = config .node .rpc_bind @@ -345,18 +367,10 @@ pub fn set_random_binds(config: &mut Config) { .unwrap() .parse() .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + insert_new_port(prior_rpc_port); + insert_new_port(prior_p2p_port); + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e6eabc99d3c..6577d5e9854 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,8 +105,8 @@ use crate::tests::neon_integrations::{ test_observer, wait_for_runloop, }; use crate::tests::{ - get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - to_addr, + gen_random_port, get_chain_info, make_contract_publish, make_contract_publish_versioned, + make_stacks_transfer, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -3459,35 +3459,8 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = naka_conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = naka_conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -3836,35 +3809,8 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = naka_conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = naka_conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index bef5786fb22..7c7cb97e34d 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -89,6 +89,7 @@ use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; +use crate::tests::gen_random_port; use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; @@ -986,29 +987,7 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -12490,35 +12469,8 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -12705,35 +12657,8 @@ fn mock_miner_replay() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -12868,29 +12793,7 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -12938,29 +12841,7 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); From d67bdcd8679a149e6e0d785ac86ea13f58e6004a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 14:03:30 -0700 Subject: [PATCH 765/910] CRC: remove unused import Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6577d5e9854..34af301ac89 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,7 +29,6 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; -use rand::Rng; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ From 3690ad98ace9a194b970bdc077bf6f4ce023af46 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 14:13:00 -0700 Subject: [PATCH 766/910] Should check before attempting to insert and return a result Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 883970ef095..ba88584f393 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -109,14 +109,14 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { - let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if insert_new_port(port) { - return port; - } assert!( USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" ); + let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if insert_new_port(port) { + return port; + } } } From 02fa642ed8c411a44ad668b2040f98683e4f7832 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 7 Oct 2024 15:31:10 -0700 Subject: [PATCH 767/910] Fix metric breakage Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 56 ++++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 28 ++++++---- 2 files changed, 51 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 049df9f83a8..5868111047e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1488,16 +1488,19 @@ fn simple_neon_integration() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } info!("Nakamoto miner started..."); @@ -1599,19 +1602,30 @@ fn simple_neon_integration() { let bhh = u64::from(tip.burn_header_height); test_observer::contains_burn_block_range(220..=bhh).unwrap(); - // make sure prometheus returns an updated height + // make sure prometheus returns an updated number of processed blocks #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result_1 = format!( + "stacks_node_stx_blocks_processed_total {}", + tip.stacks_block_height + ); + + let expected_result_2 = format!( + "stacks_node_stacks_tip_height {}", + tip.stacks_block_height - 1 + ); + Ok(res.contains(&expected_result_1) && res.contains(&expected_result_2)) + }) + .expect("Prometheus metrics did not update"); } check_nakamoto_empty_block_heuristics(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6bffea27493..2e1a4483756 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -558,18 +558,22 @@ fn miner_gather_signatures() { // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - let expected_result = format!( - "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", - num_signers - ); - assert!(metrics_response.contains(&expected_result)); + wait_for(30, || { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `10`, even though there are only two blocks proposed. + let expected_result_1 = + format!("stacks_signer_block_proposals_received {}", num_signers * 2); + let expected_result_2 = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers * 2 + ); + Ok(metrics_response.contains(&expected_result_1) + && metrics_response.contains(&expected_result_2)) + }) + .expect("Failed to advance prometheus metrics"); } } From 807c4d4017de02797634c8df495a0d00f5d6b3fc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 8 Oct 2024 12:54:02 -0400 Subject: [PATCH 768/910] feat: make the timeout for event observers configurable --- CHANGELOG.md | 1 + testnet/stacks-node/src/config.rs | 8 +- testnet/stacks-node/src/event_dispatcher.rs | 77 ++++++++++++++++++- testnet/stacks-node/src/tests/epoch_205.rs | 4 + testnet/stacks-node/src/tests/epoch_21.rs | 7 ++ testnet/stacks-node/src/tests/epoch_22.rs | 2 + testnet/stacks-node/src/tests/epoch_23.rs | 1 + testnet/stacks-node/src/tests/epoch_24.rs | 2 + testnet/stacks-node/src/tests/epoch_25.rs | 1 + .../src/tests/nakamoto_integrations.rs | 24 ++++++ .../src/tests/neon_integrations.rs | 39 ++++++++++ testnet/stacks-node/src/tests/signer/mod.rs | 2 + testnet/stacks-node/src/tests/signer/v0.rs | 1 + testnet/stacks-node/src/tests/stackerdb.rs | 2 + 14 files changed, 165 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5c84db9a60..5ccb9b5cac9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint +- Added optional `timeout_ms` to `events_observer` configuration ## [2.5.0.0.7] diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3852bf42241..e90d610040f 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1128,11 +1128,10 @@ impl Config { .map(|e| EventKeyType::from_string(e).unwrap()) .collect(); - let endpoint = format!("{}", observer.endpoint); - observers.insert(EventObserverConfig { - endpoint, + endpoint: observer.endpoint, events_keys, + timeout_ms: observer.timeout_ms.unwrap_or(1_000), }); } observers @@ -1146,6 +1145,7 @@ impl Config { events_observers.insert(EventObserverConfig { endpoint: val, events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, }); () } @@ -2921,12 +2921,14 @@ impl AtlasConfigFile { pub struct EventObserverConfigFile { pub endpoint: String, pub events_keys: Vec, + pub timeout_ms: Option, } #[derive(Clone, Default, Debug, Hash, PartialEq, Eq, PartialOrd)] pub struct EventObserverConfig { pub endpoint: String, pub events_keys: Vec, + pub timeout_ms: u64, } #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd)] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 864a964ee69..faf437c4446 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -69,6 +69,7 @@ use super::config::{EventKeyType, EventObserverConfig}; #[derive(Debug, Clone)] struct EventObserver { endpoint: String, + timeout: Duration, } struct ReceiptPayloadInfo<'a> { @@ -335,8 +336,7 @@ impl EventObserver { .parse() .unwrap_or(PeerHost::DNS(host.to_string(), port)); - let backoff = Duration::from_millis(1000); // 1 second - + let mut backoff = Duration::from_millis(100); loop { let mut request = StacksHttpRequest::new_for_peer( peerhost.clone(), @@ -347,7 +347,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_http_request(host, port, request, backoff) { + match send_http_request(host, port, request, self.timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -368,6 +368,7 @@ impl EventObserver { } } sleep(backoff); + backoff *= 2; } } @@ -1406,6 +1407,7 @@ impl EventDispatcher { info!("Registering event observer at: {}", conf.endpoint); let event_observer = EventObserver { endpoint: conf.endpoint.clone(), + timeout: Duration::from_millis(conf.timeout_ms), }; let observer_index = self.registered_observers.len() as u16; @@ -1498,6 +1500,7 @@ mod test { fn build_block_processed_event() { let observer = EventObserver { endpoint: "nowhere".to_string(), + timeout: Duration::from_secs(3), }; let filtered_events = vec![]; @@ -1558,6 +1561,7 @@ mod test { fn test_block_processed_event_nakamoto() { let observer = EventObserver { endpoint: "nowhere".to_string(), + timeout: Duration::from_secs(3), }; let filtered_events = vec![]; @@ -1699,6 +1703,7 @@ mod test { let observer = EventObserver { endpoint: format!("127.0.0.1:{}", port), + timeout: Duration::from_secs(3), }; let payload = json!({"key": "value"}); @@ -1749,6 +1754,7 @@ mod test { let observer = EventObserver { endpoint: format!("127.0.0.1:{}", port), + timeout: Duration::from_secs(3), }; let payload = json!({"key": "value"}); @@ -1759,4 +1765,69 @@ mod test { rx.recv_timeout(Duration::from_secs(5)) .expect("Server did not receive request in time"); } + + #[test] + fn test_send_payload_timeout() { + let port = get_random_port(); + let timeout = Duration::from_secs(3); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + let mut _request_holder = None; + while let Ok(request) = server.recv() { + attempt += 1; + if attempt == 1 { + debug!("Mock server received request attempt 1"); + // Do not reply, forcing the sender to timeout and retry, + // but don't drop the request or it will receive a 500 error, + _request_holder = Some(request); + } else { + debug!("Mock server received request attempt 2"); + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + } + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + timeout, + }; + + let payload = json!({"key": "value"}); + + // Record the time before sending the payload + let start_time = Instant::now(); + + // Call the function being tested + observer.send_payload(&payload, "/test"); + + // Record the time after the function returns + let elapsed_time = start_time.elapsed(); + + println!("Elapsed time: {:?}", elapsed_time); + assert!( + elapsed_time >= timeout, + "Expected a timeout, but the function returned too quickly" + ); + + assert!( + elapsed_time < timeout + Duration::from_secs(1), + "Expected a timeout, but the function took too long" + ); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 2006abb05e5..6ee96509875 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -112,6 +112,7 @@ fn test_exact_block_costs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -338,6 +339,7 @@ fn test_dynamic_db_method_costs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -775,6 +777,7 @@ fn test_cost_limit_switch_version205() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1032,6 +1035,7 @@ fn bigger_microblock_streams_in_2_05() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 2f74ffa7708..dd350026fea 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -75,6 +75,7 @@ fn advance_to_2_1( conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -579,6 +580,7 @@ fn transition_fixes_bitcoin_rigidity() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -1476,6 +1478,7 @@ fn transition_removes_pox_sunset() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -1791,6 +1794,7 @@ fn transition_empty_blocks() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let keychain = Keychain::default(conf.node.seed.clone()); @@ -4740,6 +4744,7 @@ fn trait_invocation_cross_epoch() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -4986,6 +4991,7 @@ fn test_v1_unlock_height_with_current_stackers() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -5251,6 +5257,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 289d09be642..8b5df5ddaf4 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -138,6 +138,7 @@ fn disable_pox() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -671,6 +672,7 @@ fn pox_2_unlock_all() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 0452be84766..e3fa85dfc04 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -104,6 +104,7 @@ fn trait_invocation_behavior() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 3fc3b3d5905..cdd0b01560b 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -156,6 +156,7 @@ fn fix_to_pox_contract() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -795,6 +796,7 @@ fn verify_auto_unlock_behavior() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 5a45b35e86d..dfddcb8464c 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -87,6 +87,7 @@ fn microblocks_disabled() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 049df9f83a8..e16332744f0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1436,6 +1436,7 @@ fn simple_neon_integration() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -1669,6 +1670,7 @@ fn flash_blocks_on_epoch_3() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -1936,6 +1938,7 @@ fn mine_multiple_per_tenure_integration() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2150,6 +2153,7 @@ fn multiple_miners() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2383,6 +2387,7 @@ fn correct_burn_outs() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2706,6 +2711,7 @@ fn block_proposal_api_endpoint() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3078,6 +3084,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3193,6 +3200,7 @@ fn vote_for_aggregate_key_burn_op() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3441,6 +3449,7 @@ fn follower_bootup() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3764,6 +3773,7 @@ fn follower_bootup_across_multiple_cycles() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3990,6 +4000,7 @@ fn burn_ops_integration_test() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -4593,6 +4604,7 @@ fn forked_tenure_is_ignored() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5374,6 +5386,7 @@ fn nakamoto_attempt_time() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5671,6 +5684,7 @@ fn clarity_burn_state() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5943,6 +5957,7 @@ fn signer_chainstate() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -6551,6 +6566,7 @@ fn continue_tenure_extend() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -6859,6 +6875,7 @@ fn check_block_times() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -7328,6 +7345,7 @@ fn check_block_info() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -7848,6 +7866,7 @@ fn check_block_info_rewards() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -8194,6 +8213,7 @@ fn mock_mining() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -8461,6 +8481,7 @@ fn utxo_check_on_startup_panic() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); @@ -8541,6 +8562,7 @@ fn utxo_check_on_startup_recover() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); @@ -8622,6 +8644,7 @@ fn v3_signer_api_endpoint() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -8806,6 +8829,7 @@ fn skip_mining_long_tx() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0294876931c..1053852cb9c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1028,6 +1028,7 @@ fn bitcoind_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1141,6 +1142,7 @@ fn confirm_unparsed_ongoing_ops() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1472,6 +1474,7 @@ fn deep_contract() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1576,6 +1579,7 @@ fn bad_microblock_pubkey() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1661,6 +1665,7 @@ fn liquid_ustx_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1789,6 +1794,7 @@ fn lockup_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1905,6 +1911,7 @@ fn stx_transfer_btc_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -2174,6 +2181,7 @@ fn stx_delegate_btc_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -2462,6 +2470,7 @@ fn stack_stx_burn_op_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -2867,6 +2876,7 @@ fn vote_for_aggregate_key_burn_op_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3469,6 +3479,7 @@ fn microblock_fork_poison_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3710,6 +3721,7 @@ fn microblock_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -4695,6 +4707,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -4891,6 +4904,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5085,6 +5099,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5351,6 +5366,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5523,6 +5539,7 @@ fn block_replay_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5655,6 +5672,7 @@ fn cost_voting_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -5977,6 +5995,7 @@ fn mining_events_integration_test() { EventKeyType::MinedBlocks, EventKeyType::MinedMicroblocks, ], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6244,6 +6263,7 @@ fn block_limit_hit_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6499,6 +6519,7 @@ fn microblock_limit_hit_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6649,6 +6670,7 @@ fn block_large_tx_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -6787,6 +6809,7 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -6925,6 +6948,7 @@ fn pox_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -7470,6 +7494,7 @@ fn atlas_integration_test() { .insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf_follower_node.node.always_use_affirmation_maps = false; @@ -8010,6 +8035,7 @@ fn antientropy_integration_test() { .insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf_follower_node.node.mine_microblocks = true; @@ -9012,6 +9038,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9192,6 +9219,7 @@ fn use_latest_tip_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9596,6 +9624,7 @@ fn test_problematic_txs_are_not_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9748,6 +9777,7 @@ fn spawn_follower_node( conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances = initial_conf.initial_balances.clone(); @@ -9847,6 +9877,7 @@ fn test_problematic_blocks_are_not_mined() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10204,6 +10235,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10603,6 +10635,7 @@ fn test_problematic_microblocks_are_not_mined() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10987,6 +11020,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -11331,6 +11365,7 @@ fn push_boot_receipts() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -11379,6 +11414,7 @@ fn run_with_custom_wallet() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); // custom wallet @@ -11979,6 +12015,7 @@ fn min_txs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; @@ -12085,6 +12122,7 @@ fn filter_txs_by_type() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; @@ -12201,6 +12239,7 @@ fn filter_txs_by_origin() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 454ccde7804..5dcbc9a16a9 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -649,6 +649,7 @@ fn setup_stx_btc_node ()>( EventKeyType::BlockProposal, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); } @@ -663,6 +664,7 @@ fn setup_stx_btc_node ()>( EventKeyType::MinedBlocks, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); // The signers need some initial balances in order to pay for epoch 2.5 transaction votes diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6bffea27493..1f946fd7e03 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2981,6 +2981,7 @@ fn signer_set_rollover() { EventKeyType::BlockProposal, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); } naka_conf.node.rpc_bind = rpc_bind.clone(); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index e24b5c5c24a..a4dca66ea86 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -116,6 +116,7 @@ fn test_stackerdb_load_store() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let privks = vec![ @@ -249,6 +250,7 @@ fn test_stackerdb_event_observer() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::StackerDBChunks], + timeout_ms: 1000, }); let privks = vec![ From 58f16356e43a11843be62fa77b1e46483ef9bd7b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 8 Oct 2024 22:42:36 -0400 Subject: [PATCH 769/910] feat: support re-sending events to event observers across restarts Record events to be sent to event observers in a new sqlite database so that in the event that the node is killed before successfully sending, they can be re-sent on restart. --- CHANGELOG.md | 1 + Cargo.lock | 244 ++++++++++- testnet/stacks-node/Cargo.toml | 2 + testnet/stacks-node/src/config.rs | 12 + testnet/stacks-node/src/event_dispatcher.rs | 416 +++++++++++++++++-- testnet/stacks-node/src/node.rs | 2 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- 8 files changed, 619 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ccb9b5cac9..50d149b730d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint - Added optional `timeout_ms` to `events_observer` configuration +- Added support for re-sending events to event observers across restarts ## [2.5.0.0.7] diff --git a/Cargo.lock b/Cargo.lock index dc27c931ccb..227cd9d7684 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -190,6 +190,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -626,7 +636,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" name = "clarity" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "hashbrown", "integer-sqrt", "lazy_static", @@ -652,6 +662,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -1347,7 +1367,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap", "slab", "tokio", @@ -1390,7 +1429,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http", + "http 0.2.11", "httpdate", "mime", "sha1 0.10.6", @@ -1402,7 +1441,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", ] [[package]] @@ -1457,6 +1496,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -1464,7 +1514,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1512,9 +1585,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1526,6 +1599,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", +] + [[package]] name = "hyper-rustls" version = "0.24.2" @@ -1533,13 +1626,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "rustls", "tokio", "tokio-rustls", ] +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "tokio", +] + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -1725,7 +1833,7 @@ checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.4.2", "libc", - "redox_syscall", + "redox_syscall 0.4.1", ] [[package]] @@ -1792,6 +1900,16 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.20" @@ -1892,6 +2010,30 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "mockito" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" +dependencies = [ + "assert-json-diff 2.0.2", + "bytes", + "colored", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "log", + "rand 0.8.5", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + [[package]] name = "multer" version = "2.1.0" @@ -1901,7 +2043,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 0.2.11", "httparse", "log", "memchr", @@ -2023,6 +2165,29 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.5.7", + "smallvec", + "windows-targets 0.52.0", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2388,6 +2553,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +dependencies = [ + "bitflags 2.4.2", +] + [[package]] name = "redox_users" version = "0.4.4" @@ -2461,10 +2635,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls", "ipnet", "js-sys", @@ -2739,6 +2913,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "sct" version = "0.7.1" @@ -2969,6 +3149,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "similar" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + [[package]] name = "siphasher" version = "0.3.11" @@ -3080,7 +3266,7 @@ dependencies = [ name = "stacks-common" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", @@ -3124,6 +3310,7 @@ dependencies = [ "lazy_static", "libc", "libsigner", + "mockito", "mutants", "pico-args", "rand 0.8.5", @@ -3140,6 +3327,7 @@ dependencies = [ "stacks-signer", "stackslib", "stx-genesis", + "tempfile", "tikv-jemallocator", "tiny_http", "tokio", @@ -3190,7 +3378,7 @@ dependencies = [ name = "stackslib" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "chrono", "clarity", "criterion", @@ -3369,6 +3557,19 @@ dependencies = [ "libc", ] +[[package]] +name = "tempfile" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +dependencies = [ + "cfg-if 1.0.0", + "fastrand 2.0.1", + "once_cell", + "rustix 0.38.31", + "windows-sys 0.52.0", +] + [[package]] name = "term" version = "0.7.0" @@ -3558,6 +3759,7 @@ dependencies = [ "libc", "mio 0.8.10", "num_cpus", + "parking_lot", "pin-project-lite", "socket2 0.5.5", "windows-sys 0.48.0", @@ -3702,7 +3904,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.11", "httparse", "log", "rand 0.8.5", @@ -3865,8 +4067,8 @@ dependencies = [ "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0b9b59a0e72..958820b491a 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -50,6 +50,8 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" +tempfile = "3.3" +mockito = "1.5" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e90d610040f..5a9be5ab80e 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1188,6 +1188,18 @@ impl Config { }) } + /// Returns the path working directory path, and ensures it exists. + pub fn get_working_dir(&self) -> PathBuf { + let path = PathBuf::from(&self.node.working_dir); + fs::create_dir_all(&path).unwrap_or_else(|_| { + panic!( + "Failed to create working directory at {}", + path.to_string_lossy() + ) + }); + path + } + fn get_burnchain_path(&self) -> PathBuf { let mut path = PathBuf::from(&self.node.working_dir); path.push(&self.burnchain.mode); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index faf437c4446..8c6d66cb8f4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; @@ -25,6 +26,7 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +use rusqlite::{params, Connection}; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -56,6 +58,7 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; @@ -68,7 +71,12 @@ use super::config::{EventKeyType, EventObserverConfig}; #[derive(Debug, Clone)] struct EventObserver { + /// Path to the database where pending payloads are stored. If `None`, then + /// the database is not used and events are not recoverable across restarts. + db_path: Option, + /// URL to which events will be sent endpoint: String, + /// Timeout for sending events to this observer timeout: Duration, } @@ -314,21 +322,90 @@ impl RewardSetEventPayload { } impl EventObserver { - pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + fn init_db(db_path: &str) -> Result { + let conn = Connection::open(db_path)?; + conn.execute( + "CREATE TABLE IF NOT EXISTS pending_payloads ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT NOT NULL, + payload TEXT NOT NULL, + timeout INTEGER NOT NULL + )", + [], + )?; + Ok(conn) + } + + fn insert_payload( + conn: &Connection, + url: &str, + payload: &serde_json::Value, + timeout: Duration, + ) -> Result<(), db_error> { + let payload_text = payload.to_string(); + let timeout_ms: u64 = timeout.as_millis().try_into().expect("Timeout too large"); + conn.execute( + "INSERT INTO pending_payloads (url, payload, timeout) VALUES (?1, ?2, ?3)", + params![url, payload_text, timeout_ms], + )?; + Ok(()) + } + + fn get_pending_payloads( + conn: &Connection, + ) -> Result, db_error> { + let mut stmt = conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads")?; + let payload_iter = stmt.query_and_then( + [], + |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { + let id: i64 = row.get(0)?; + let url: String = row.get(1)?; + let payload_text: String = row.get(2)?; + let payload: serde_json::Value = serde_json::from_str(&payload_text) + .map_err(|e| db_error::SerializationError(e))?; + let timeout_ms: u64 = row.get(3)?; + Ok((id, url, payload, timeout_ms)) + }, + )?; + payload_iter.collect() + } + + fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { + conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; + Ok(()) + } + + fn process_pending_payloads(conn: &Connection) { + let pending_payloads = match Self::get_pending_payloads(conn) { + Ok(payloads) => payloads, + Err(e) => { + error!( + "Event observer: failed to retrieve pending payloads from database"; + "error" => ?e + ); + return; + } + }; + + for (id, url, payload, timeout_ms) in pending_payloads { + let timeout = Duration::from_millis(timeout_ms); + Self::send_payload_directly(&payload, &url, timeout); + if let Err(e) = Self::delete_payload(conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + } + } + + fn send_payload_directly(payload: &serde_json::Value, full_url: &str, timeout: Duration) { debug!( - "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload + "Event dispatcher: Sending payload"; "url" => %full_url, "payload" => ?payload ); - let url = { - let joined_components = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) - } else { - format!("{}/{}", &self.endpoint, path) - }; - let url = format!("http://{}", joined_components); - Url::parse(&url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", url)) - }; + let url = Url::parse(full_url) + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -347,7 +424,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_http_request(host, port, request, self.timeout) { + match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -372,6 +449,55 @@ impl EventObserver { } } + fn new(working_dir: Option, endpoint: String, timeout: Duration) -> Self { + let db_path = if let Some(mut db_path) = working_dir { + db_path.push("event_observers.sqlite"); + + Self::init_db( + db_path + .to_str() + .expect("Failed to convert chainstate path to string"), + ) + .expect("Failed to initialize database for event observer"); + Some(db_path) + } else { + None + }; + + EventObserver { + db_path, + endpoint, + timeout, + } + } + + /// Send the payload to the given URL. + /// Before sending this payload, any pending payloads in the database will be sent first. + pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + // Construct the full URL + let url_str = if path.starts_with('/') { + format!("{}{}", &self.endpoint, path) + } else { + format!("{}/{}", &self.endpoint, path) + }; + let full_url = format!("http://{}", url_str); + + if let Some(db_path) = &self.db_path { + let conn = + Connection::open(db_path).expect("Failed to open database for event observer"); + + // Insert the new payload into the database + Self::insert_payload(&conn, &full_url, payload, self.timeout) + .expect("Failed to insert payload into event observer database"); + + // Process all pending payloads + Self::process_pending_payloads(&conn); + } else { + // No database, just send the payload + Self::send_payload_directly(payload, &full_url, self.timeout); + } + } + fn make_new_mempool_txs_payload(transactions: Vec) -> serde_json::Value { let raw_txs = transactions .into_iter() @@ -1403,12 +1529,13 @@ impl EventDispatcher { } } - pub fn register_observer(&mut self, conf: &EventObserverConfig) { + pub fn register_observer(&mut self, conf: &EventObserverConfig, working_dir: PathBuf) { info!("Registering event observer at: {}", conf.endpoint); - let event_observer = EventObserver { - endpoint: conf.endpoint.clone(), - timeout: Duration::from_millis(conf.timeout_ms), - }; + let event_observer = EventObserver::new( + Some(working_dir), + conf.endpoint.clone(), + Duration::from_millis(conf.timeout_ms), + ); let observer_index = self.registered_observers.len() as u16; @@ -1492,16 +1619,14 @@ mod test { use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; + use tempfile::tempdir; use tiny_http::{Method, Response, Server, StatusCode}; use super::*; #[test] fn build_block_processed_event() { - let observer = EventObserver { - endpoint: "nowhere".to_string(), - timeout: Duration::from_secs(3), - }; + let observer = EventObserver::new(None, "nowhere".to_string(), Duration::from_secs(3)); let filtered_events = vec![]; let block = StacksBlock::genesis_block(); @@ -1559,10 +1684,7 @@ mod test { #[test] fn test_block_processed_event_nakamoto() { - let observer = EventObserver { - endpoint: "nowhere".to_string(), - timeout: Duration::from_secs(3), - }; + let observer = EventObserver::new(None, "nowhere".to_string(), Duration::from_secs(3)); let filtered_events = vec![]; let mut block_header = NakamotoBlockHeader::empty(); @@ -1679,6 +1801,231 @@ mod test { listener.local_addr().unwrap().port() } + #[test] + fn test_init_db() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_init_db.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + // Call init_db + let conn_result = EventObserver::init_db(db_path_str); + assert!(conn_result.is_ok(), "Failed to initialize the database"); + + // Check that the database file exists + assert!(db_path.exists(), "Database file was not created"); + + // Check that the table exists + let conn = conn_result.unwrap(); + let mut stmt = conn + .prepare( + "SELECT name FROM sqlite_master WHERE type='table' AND name='pending_payloads'", + ) + .unwrap(); + let table_exists = stmt.exists([]).unwrap(); + assert!(table_exists, "Table 'pending_payloads' does not exist"); + } + + #[test] + fn test_insert_and_get_pending_payloads() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_payloads.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let url = "http://example.com/api"; + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Insert payload + let insert_result = EventObserver::insert_payload(&conn, url, &payload, timeout); + assert!(insert_result.is_ok(), "Failed to insert payload"); + + // Get pending payloads + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); + + let (_id, retrieved_url, retrieved_payload, timeout_ms) = &pending_payloads[0]; + assert_eq!(retrieved_url, url, "URL does not match"); + assert_eq!(retrieved_payload, &payload, "Payload does not match"); + assert_eq!( + *timeout_ms, + timeout.as_millis() as u64, + "Timeout does not match" + ); + } + + #[test] + fn test_delete_payload() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_delete_payload.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let url = "http://example.com/api"; + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Insert payload + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + // Get pending payloads + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); + + let (id, _, _, _) = pending_payloads[0]; + + // Delete payload + let delete_result = EventObserver::delete_payload(&conn, id); + assert!(delete_result.is_ok(), "Failed to delete payload"); + + // Verify that the pending payloads list is empty + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + } + + #[test] + fn test_process_pending_payloads() { + use mockito::Matcher; + + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_process_payloads.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/api") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let url = &format!("{}/api", &server.url()); + + // Insert payload + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + // Process pending payloads + EventObserver::process_pending_payloads(&conn); + + // Verify that the pending payloads list is empty + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + + // Verify that the mock was called + _m.assert(); + } + + #[test] + fn test_new_event_observer_with_db() { + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + + let endpoint = "http://example.com".to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(Some(working_dir.clone()), endpoint.clone(), timeout); + + // Verify fields + assert_eq!(observer.endpoint, endpoint); + assert_eq!(observer.timeout, timeout); + + // Verify that the database was initialized + let mut db_path = working_dir; + db_path.push("event_observers.sqlite"); + assert!(db_path.exists(), "Database file was not created"); + } + + #[test] + fn test_new_event_observer_without_db() { + let endpoint = "http://example.com".to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(None, endpoint.clone(), timeout); + + // Verify fields + assert_eq!(observer.endpoint, endpoint); + assert_eq!(observer.timeout, timeout); + assert!(observer.db_path.is_none(), "Expected db_path to be None"); + } + + #[test] + fn test_send_payload_with_db() { + use mockito::Matcher; + + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + let payload = json!({"key": "value"}); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/test") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(Some(working_dir.clone()), endpoint, timeout); + + // Call send_payload + observer.send_payload(&payload, "/test"); + + // Verify that the payload was sent and database is empty + _m.assert(); + + // Verify that the database is empty + let db_path = observer.db_path.unwrap(); + let db_path_str = db_path.to_str().unwrap(); + let conn = Connection::open(db_path_str).expect("Failed to open database"); + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + } + + #[test] + fn test_send_payload_without_db() { + use mockito::Matcher; + + let timeout = Duration::from_secs(5); + let payload = json!({"key": "value"}); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/test") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); + + let observer = EventObserver::new(None, endpoint, timeout); + + // Call send_payload + observer.send_payload(&payload, "/test"); + + // Verify that the payload was sent + _m.assert(); + } + #[test] fn test_send_payload_success() { let port = get_random_port(); @@ -1701,10 +2048,8 @@ mod test { tx.send(()).unwrap(); }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout: Duration::from_secs(3), - }; + let observer = + EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -1752,10 +2097,8 @@ mod test { } }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout: Duration::from_secs(3), - }; + let observer = + EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -1799,10 +2142,7 @@ mod test { } }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout, - }; + let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); let payload = json!({"key": "value"}); diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 39095a51d5d..1895912ba52 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -340,7 +340,7 @@ impl Node { let mut event_dispatcher = EventDispatcher::new(); for observer in &config.events_observers { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } let burnchain_config = config.get_burnchain(); diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 6e3222d99d4..04afdd79eed 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -93,7 +93,7 @@ impl RunLoop { let mut event_dispatcher = EventDispatcher::new(); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } Self { diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 331e7e597ce..a18a61988ba 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -236,7 +236,7 @@ impl RunLoop { let mut event_dispatcher = EventDispatcher::new(); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } Self { From 1a08c136875e884a1242a2f39caa5ae987cd3e8a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 9 Oct 2024 09:51:37 -0500 Subject: [PATCH 770/910] docs: set 3.0 primary testnet activation height --- testnet/stacks-node/conf/testnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/testnet-miner-conf.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index de0973f2c70..54814c610c8 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -62,4 +62,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 56_457 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 9b0d88ad422..39af98b0919 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -73,4 +73,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 56_457 From c3edb2073aca6279b6a120d885b50abe015985a4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 9 Oct 2024 09:53:59 -0500 Subject: [PATCH 771/910] chore: bump peer version epoch to 3.0 --- stacks-common/src/libcommon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 5059f6f049d..1a13aa02ed6 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -84,7 +84,7 @@ pub mod consts { /// this should be updated to the latest network epoch version supported by /// this node. this will be checked by the `validate_epochs()` method. - pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32; + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; /// set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; From 1c3302ac1a344c2dd0c23cc194278126ee7a27ff Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 9 Oct 2024 12:33:56 -0400 Subject: [PATCH 772/910] refactor: add `test_observer::register` --- testnet/stacks-node/src/tests/epoch_205.rs | 27 +- testnet/stacks-node/src/tests/epoch_21.rs | 47 +--- testnet/stacks-node/src/tests/epoch_22.rs | 7 +- testnet/stacks-node/src/tests/epoch_23.rs | 7 +- testnet/stacks-node/src/tests/epoch_24.rs | 14 +- testnet/stacks-node/src/tests/epoch_25.rs | 7 +- .../src/tests/nakamoto_integrations.rs | 174 ++---------- .../src/tests/neon_integrations.rs | 264 ++++-------------- testnet/stacks-node/src/tests/stackerdb.rs | 12 +- 9 files changed, 105 insertions(+), 454 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 6ee96509875..0ad70006310 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -109,11 +109,10 @@ fn test_exact_block_costs() { .collect(); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -336,11 +335,7 @@ fn test_dynamic_db_method_costs() { }; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -774,11 +769,7 @@ fn test_cost_limit_switch_version205() { }); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1032,11 +1023,7 @@ fn bigger_microblock_streams_in_2_05() { conf.burnchain.pox_2_activation = Some(10_003); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index dd350026fea..4490fa5b073 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -71,12 +71,7 @@ fn advance_to_2_1( conf.burnchain.peer_host = "localhost".to_string(); conf.initial_balances.append(&mut initial_balances); conf.miner.block_reward_recipient = block_reward_recipient; - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -577,11 +572,7 @@ fn transition_fixes_bitcoin_rigidity() { ]; conf.initial_balances.append(&mut initial_balances); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -1474,12 +1465,7 @@ fn transition_removes_pox_sunset() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -1790,12 +1776,7 @@ fn transition_empty_blocks() { conf.burnchain.epochs = Some(epochs); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let keychain = Keychain::default(conf.node.seed.clone()); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -4741,11 +4722,7 @@ fn trait_invocation_cross_epoch() { amount: 200_000_000, }]; conf.initial_balances.append(&mut initial_balances); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; epochs[2].start_height = epoch_2_05; @@ -4987,12 +4964,7 @@ fn test_v1_unlock_height_with_current_stackers() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -5253,12 +5225,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 8b5df5ddaf4..fecf5c46524 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -134,12 +134,7 @@ fn disable_pox() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index e3fa85dfc04..7d0a5216a0f 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -100,12 +100,7 @@ fn trait_invocation_behavior() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index cdd0b01560b..9c57a732d05 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -152,12 +152,7 @@ fn fix_to_pox_contract() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -792,12 +787,7 @@ fn verify_auto_unlock_behavior() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index dfddcb8464c..345aec4557e 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -83,12 +83,7 @@ fn microblocks_disabled() { conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ae1257f9337..b3d2959f9c1 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1432,12 +1432,7 @@ fn simple_neon_integration() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -1680,12 +1675,7 @@ fn flash_blocks_on_epoch_3() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -1948,12 +1938,7 @@ fn mine_multiple_per_tenure_integration() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2163,12 +2148,7 @@ fn multiple_miners() { ); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2397,12 +2377,7 @@ fn correct_burn_outs() { let signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2721,12 +2696,7 @@ fn block_proposal_api_endpoint() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3094,12 +3064,10 @@ fn miner_writes_proposed_block_to_stackerdb() { let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3210,12 +3178,7 @@ fn vote_for_aggregate_key_burn_op() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3459,12 +3422,7 @@ fn follower_bootup() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3783,12 +3741,7 @@ fn follower_bootup_across_multiple_cycles() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -4010,12 +3963,7 @@ fn burn_ops_integration_test() { ); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -4614,12 +4562,10 @@ fn forked_tenure_is_ignored() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5396,12 +5342,7 @@ fn nakamoto_attempt_time() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut naka_conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5694,12 +5635,7 @@ fn clarity_burn_state() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5967,12 +5903,7 @@ fn signer_chainstate() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -6576,12 +6507,7 @@ fn continue_tenure_extend() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -6885,12 +6811,7 @@ fn check_block_times() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -7355,12 +7276,7 @@ fn check_block_info() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -7876,12 +7792,7 @@ fn check_block_info_rewards() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -8223,12 +8134,7 @@ fn mock_mining() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -8491,12 +8397,7 @@ fn utxo_check_on_startup_panic() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); let (last, rest) = epochs.split_last_mut().unwrap(); @@ -8572,12 +8473,7 @@ fn utxo_check_on_startup_recover() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); let (last, rest) = epochs.split_last_mut().unwrap(); @@ -8654,12 +8550,7 @@ fn v3_signer_api_endpoint() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -8839,12 +8730,7 @@ fn skip_mining_long_tx() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 1053852cb9c..38b34d124d3 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -206,7 +206,9 @@ pub mod test_observer { use warp::Filter; use {tokio, warp}; + use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; + use crate::Config; pub const EVENT_OBSERVER_PORT: u16 = 50303; @@ -631,6 +633,18 @@ pub mod test_observer { Err(format!("Missing the following burn blocks: {missing:?}")) } } + + pub fn register(config: &mut Config, event_keys: &[EventKeyType]) { + config.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{EVENT_OBSERVER_PORT}"), + events_keys: event_keys.to_vec(), + timeout_ms: 1000, + }); + } + + pub fn register_any(config: &mut Config) { + self::register(config, &[EventKeyType::AnyEvent]); + } } const PANIC_TIMEOUT_SECS: u64 = 600; @@ -1024,12 +1038,7 @@ fn bitcoind_integration_test() { conf.burnchain.max_rbf = 1000000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1138,12 +1147,7 @@ fn confirm_unparsed_ongoing_ops() { conf.burnchain.max_rbf = 1000000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1470,12 +1474,7 @@ fn deep_contract() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1575,12 +1574,7 @@ fn bad_microblock_pubkey() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1661,12 +1655,7 @@ fn liquid_ustx_integration() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1790,12 +1779,7 @@ fn lockup_integration() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1908,11 +1892,7 @@ fn stx_transfer_btc_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -2178,11 +2158,7 @@ fn stx_delegate_btc_integration_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -2467,11 +2443,7 @@ fn stack_stx_burn_op_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -2873,11 +2845,7 @@ fn vote_for_aggregate_key_burn_op_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3475,12 +3443,7 @@ fn microblock_fork_poison_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3717,12 +3680,7 @@ fn microblock_integration_test() { conf.node.wait_time_for_microblocks = 0; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4704,11 +4662,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4901,11 +4855,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5096,11 +5046,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5363,11 +5309,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.burnchain.epochs = Some(epochs); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5535,12 +5477,7 @@ fn block_replay_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5668,12 +5605,7 @@ fn cost_voting_integration() { conf.node.wait_time_for_blocks = 1_000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -5987,16 +5919,14 @@ fn mining_events_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![ + test_observer::register( + &mut conf, + &[ EventKeyType::AnyEvent, EventKeyType::MinedBlocks, EventKeyType::MinedMicroblocks, ], - timeout_ms: 1000, - }); + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6259,12 +6189,7 @@ fn block_limit_hit_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6515,12 +6440,7 @@ fn microblock_limit_hit_integration_test() { conf.burnchain.pox_2_activation = Some(10_003); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6666,12 +6586,7 @@ fn block_large_tx_integration_test() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone().into(), @@ -6805,12 +6720,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: addr.clone().into(), @@ -6941,16 +6851,11 @@ fn pox_integration_test() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); + test_observer::register_any(&mut conf); // required for testing post-sunset behavior conf.node.always_use_affirmation_maps = false; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); - let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let second_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let third_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -9035,11 +8940,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value amount: 10000000000, }); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -9215,12 +9116,7 @@ fn use_latest_tip_integration_test() { conf.node.microblock_frequency = 1_000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -9620,12 +9516,7 @@ fn test_problematic_txs_are_not_stored() { conf.burnchain.ast_precheck_size_height = Some(0); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9774,11 +9665,7 @@ fn spawn_follower_node( conf.burnchain.peer_version, ); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); @@ -9873,12 +9760,7 @@ fn test_problematic_blocks_are_not_mined() { conf.burnchain.ast_precheck_size_height = Some(210); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -10231,12 +10113,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { conf.burnchain.ast_precheck_size_height = Some(210); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -10631,12 +10508,7 @@ fn test_problematic_microblocks_are_not_mined() { conf.node.wait_time_for_microblocks = 0; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -11016,12 +10888,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { conf.connection_options.inv_sync_interval = 3; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -11362,11 +11229,7 @@ fn push_boot_receipts() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -11411,11 +11274,7 @@ fn run_with_custom_wallet() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); // custom wallet conf.burnchain.wallet_name = "test_with_custom_wallet".to_string(); @@ -12011,12 +11870,7 @@ fn min_txs() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; @@ -12118,12 +11972,7 @@ fn filter_txs_by_type() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; @@ -12235,12 +12084,7 @@ fn filter_txs_by_origin() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index a4dca66ea86..fbc47e0c3c6 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -113,11 +113,7 @@ fn test_stackerdb_load_store() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let privks = vec![ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R @@ -247,11 +243,7 @@ fn test_stackerdb_event_observer() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::StackerDBChunks], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); let privks = vec![ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R From b8432739a3368be4f9d517f9697bc4c5ffb76716 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 9 Oct 2024 10:02:46 -0700 Subject: [PATCH 773/910] fix: use O(n) instead of mn when checking pox bitvec --- stackslib/src/chainstate/nakamoto/mod.rs | 28 ++++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a8708b0a8db..e000c9c582f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3810,6 +3810,15 @@ impl NakamotoChainState { active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { if !tenure_block_commit.treatment.is_empty() { + let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .fold(HashMap::new(), |mut map, (ix, addr)| { + map.entry(addr).or_insert_with(Vec::new).push(ix); + map + }); + // our block commit issued a punishment, check the reward set and bitvector // to ensure that this was valid. for treated_addr in tenure_block_commit.treatment.iter() { @@ -3820,24 +3829,19 @@ impl NakamotoChainState { } // otherwise, we need to find the indices in the rewarded_addresses // corresponding to this address. - let address_indices = active_reward_set - .rewarded_addresses - .iter() - .enumerate() - .filter_map(|(ix, addr)| { - if addr == treated_addr.deref() { - Some(ix) - } else { - None - } - }); + let empty_vec = vec![]; + let address_indices = address_to_indeces + .get(treated_addr.deref()) + .unwrap_or(&empty_vec); + // if any of them are 0, punishment is okay. // if all of them are 1, punishment is not okay. // if all of them are 0, *must* have punished let bitvec_values: Result, ChainstateError> = address_indices + .iter() .map( |ix| { - let ix = u16::try_from(ix) + let ix = u16::try_from(*ix) .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; let bitvec_value = block_bitvec.get(ix) .unwrap_or_else(|| { From 8dfd1cdcbac1aa1b13d19f87381584b68bba8a2b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 9 Oct 2024 13:21:33 -0700 Subject: [PATCH 774/910] Do not error if block submit fails in mock mining case in Nakamoto Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index dd6fd02a716..ef01f67f4b0 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1066,20 +1066,23 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); - let txid = self - .bitcoin_controller - .submit_operation( - last_committed.get_epoch_id().clone(), - BlockstackOperationType::LeaderBlockCommit( - last_committed.get_block_commit().clone(), - ), - &mut op_signer, - 1, - ) - .map_err(|e| { + let res = self.bitcoin_controller.submit_operation( + last_committed.get_epoch_id().clone(), + BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), + &mut op_signer, + 1, + ); + let txid = match res { + Ok(txid) => txid, + Err(e) => { + if self.config.node.mock_mining { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + return Ok(()); + } warn!("Failed to submit block-commit bitcoin transaction: {e}"); - NakamotoNodeError::BurnchainSubmissionFailed(e) - })?; + return Err(NakamotoNodeError::BurnchainSubmissionFailed(e)); + } + }; info!( "Relayer: Submitted block-commit"; From e7a8fff17ca1799aae44f11e3ae6f4647bd0a623 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 09:40:36 -0700 Subject: [PATCH 775/910] Continually get stacker_set in FIRST block of prepare phase and set pox_sync_sample_secs to positive int Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 38 ++++++++++++------- .../src/tests/neon_integrations.rs | 11 ------ 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5868111047e..64f2960c689 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -255,7 +255,7 @@ pub fn check_nakamoto_empty_block_heuristics() { } } -pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { +pub fn get_stacker_set(http_origin: &str, cycle: u64) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client @@ -263,10 +263,9 @@ pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { .send() .unwrap() .json::() - .unwrap(); + .map_err(|e| format!("{e}"))?; info!("Stacker set response: {res}"); - let res = serde_json::from_value(res).unwrap(); - res + serde_json::from_value(res).map_err(|e| format!("{e}")) } pub fn get_stackerdb_slot_version( @@ -886,19 +885,21 @@ pub fn boot_to_epoch_3( signers.signer_keys = signer_sks.to_vec(); } - let prepare_phase_start = btc_regtest_controller + // the reward set is generally calculated in the first block of the prepare phase hence the + 1 + let reward_set_calculation = btc_regtest_controller .get_burnchain() .pox_constants .prepare_phase_start( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, - ); + ) + + 1; // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - prepare_phase_start, + reward_set_calculation, &naka_conf, ); @@ -909,7 +910,11 @@ pub fn boot_to_epoch_3( let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); - let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + wait_for(30, || { + Ok(get_stacker_set(&http_origin, reward_cycle + 1).is_ok()) + }) + .expect("Timed out waiting for stacker set"); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1).unwrap(); // Vote on the aggregate public key for signer_sk in signer_sks_unique.values() { let signer_index = @@ -1040,19 +1045,21 @@ pub fn boot_to_pre_epoch_3_boundary( signers.signer_keys = signer_sks.to_vec(); } - let prepare_phase_start = btc_regtest_controller + // the reward set is generally calculated in the first block of the prepare phase hence the + 1 + let reward_set_calculation = btc_regtest_controller .get_burnchain() .pox_constants .prepare_phase_start( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, - ); + ) + + 1; // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - prepare_phase_start, + reward_set_calculation, &naka_conf, ); @@ -1063,7 +1070,11 @@ pub fn boot_to_pre_epoch_3_boundary( let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); - let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + wait_for(30, || { + Ok(get_stacker_set(&http_origin, reward_cycle + 1).is_ok()) + }) + .expect("Timed out waiting for stacker set"); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1).unwrap(); // Vote on the aggregate public key for signer_sk in signer_sks_unique.values() { let signer_index = @@ -2566,7 +2577,7 @@ fn correct_burn_outs() { info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle); + let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); assert!(stacker_response.stacker_set.signers.is_some()); assert_eq!( stacker_response.stacker_set.signers.as_ref().unwrap().len(), @@ -8168,6 +8179,7 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0294876931c..143ad89b8f0 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -53,7 +53,6 @@ use stacks::net::api::getaccount::AccountEntryResponse; use stacks::net::api::getcontractsrc::ContractSrcResponse; use stacks::net::api::getinfo::RPCPeerInfoData; use stacks::net::api::getpoxinfo::RPCPoxInfoData; -use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::gettransaction_unconfirmed::UnconfirmedTransactionResponse; use stacks::net::api::postblock::StacksBlockAcceptedData; use stacks::net::api::postfeerate::RPCFeeEstimateResponse; @@ -1437,16 +1436,6 @@ pub fn get_contract_src( } } -pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { - let client = reqwest::blocking::Client::new(); - let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); - let res = client.get(&path).send().unwrap(); - - info!("Got stacker_set response {:?}", &res); - let res = res.json::().unwrap(); - res -} - #[test] #[ignore] fn deep_contract() { From 08abd6a032738ce6db5c13c181ea7c32776f395d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:03:03 -0400 Subject: [PATCH 776/910] feat: make `chain_id` configurable --- testnet/stacks-node/src/config.rs | 559 +++++++++++++++++------------- 1 file changed, 320 insertions(+), 239 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3852bf42241..35fb1c0231b 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -106,240 +106,6 @@ pub struct LegacyMstxConfigFile { pub mstx_balance: Option>, } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_config_file() { - assert_eq!( - format!("Invalid path: No such file or directory (os error 2)"), - ConfigFile::from_path("some_path").unwrap_err() - ); - assert_eq!( - format!("Invalid toml: unexpected character found: `/` at line 1 column 1"), - ConfigFile::from_str("//[node]").unwrap_err() - ); - assert!(ConfigFile::from_str("").is_ok()); - } - - #[test] - fn test_config() { - assert_eq!( - format!("node.seed should be a hex encoded string"), - Config::from_config_file( - ConfigFile::from_str( - r#" - [node] - seed = "invalid-hex-value" - "#, - ) - .unwrap(), - false - ) - .unwrap_err() - ); - - assert_eq!( - format!("node.local_peer_seed should be a hex encoded string"), - Config::from_config_file( - ConfigFile::from_str( - r#" - [node] - local_peer_seed = "invalid-hex-value" - "#, - ) - .unwrap(), - false - ) - .unwrap_err() - ); - - let expected_err_prefix = - "Invalid burnchain.peer_host: failed to lookup address information:"; - let actual_err_msg = Config::from_config_file( - ConfigFile::from_str( - r#" - [burnchain] - peer_host = "bitcoin2.blockstack.com" - "#, - ) - .unwrap(), - false, - ) - .unwrap_err(); - assert_eq!( - expected_err_prefix, - &actual_err_msg[..expected_err_prefix.len()] - ); - - assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); - } - - #[test] - fn should_load_legacy_mstx_balances_toml() { - let config = ConfigFile::from_str( - r#" - [[ustx_balance]] - address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" - amount = 10000000000000000 - - [[ustx_balance]] - address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" - amount = 10000000000000000 - - [[mstx_balance]] # legacy property name - address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" - amount = 10000000000000000 - - [[mstx_balance]] # legacy property name - address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" - amount = 10000000000000000 - "#, - ); - let config = config.unwrap(); - assert!(config.ustx_balance.is_some()); - let balances = config - .ustx_balance - .expect("Failed to parse stx balances from toml"); - assert_eq!(balances.len(), 4); - assert_eq!( - balances[0].address, - "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" - ); - assert_eq!( - balances[1].address, - "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" - ); - assert_eq!( - balances[2].address, - "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" - ); - assert_eq!( - balances[3].address, - "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" - ); - } - - #[test] - fn should_load_auth_token() { - let config = Config::from_config_file( - ConfigFile::from_str( - r#" - [connection_options] - auth_token = "password" - "#, - ) - .unwrap(), - false, - ) - .expect("Expected to be able to parse block proposal token from file"); - - assert_eq!( - config.connection_options.auth_token, - Some("password".to_string()) - ); - } - - #[test] - fn should_load_affirmation_map() { - let affirmation_string = "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; - let affirmation = - AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); - let config = Config::from_config_file( - ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 413 - affirmation = "{affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert_eq!(config.burnchain.affirmation_overrides.len(), 1); - assert_eq!(config.burnchain.affirmation_overrides.get(&0), None); - assert_eq!( - config.burnchain.affirmation_overrides.get(&413), - Some(&affirmation) - ); - } - - #[test] - fn should_fail_to_load_invalid_affirmation_map() { - let bad_affirmation_string = "bad_map"; - let file = ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 1 - affirmation = "{bad_affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"); - - assert!(Config::from_config_file(file, false).is_err()); - } - - #[test] - fn should_load_empty_affirmation_map() { - let config = Config::from_config_file( - ConfigFile::from_str(r#""#) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert!(config.burnchain.affirmation_overrides.is_empty()); - } - - #[test] - fn should_include_xenon_default_affirmation_overrides() { - let config = Config::from_config_file( - ConfigFile::from_str( - r#" - [burnchain] - chain = "bitcoin" - mode = "xenon" - "#, - ) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - // Should default add xenon affirmation overrides - assert_eq!(config.burnchain.affirmation_overrides.len(), 5); - } - - #[test] - fn should_override_xenon_default_affirmation_overrides() { - let affirmation_string = "aaapnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; - let affirmation = - AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); - - let config = Config::from_config_file( - ConfigFile::from_str(&format!( - r#" - [burnchain] - chain = "bitcoin" - mode = "xenon" - - [[burnchain.affirmation_overrides]] - reward_cycle = 413 - affirmation = "{affirmation_string}" - "#, - )) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - // Should default add xenon affirmation overrides, but overwrite with the configured one above - assert_eq!(config.burnchain.affirmation_overrides.len(), 5); - assert_eq!(config.burnchain.affirmation_overrides[&413], affirmation); - } -} - impl ConfigFile { pub fn from_path(path: &str) -> Result { let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; @@ -1554,8 +1320,9 @@ pub struct AffirmationOverride { #[derive(Clone, Deserialize, Default, Debug)] pub struct BurnchainConfigFile { pub chain: Option, - pub burn_fee_cap: Option, pub mode: Option, + pub chain_id: Option, + pub burn_fee_cap: Option, pub commit_anchor_block_within: Option, pub peer_host: Option, pub peer_port: Option, @@ -1702,10 +1469,22 @@ impl BurnchainConfigFile { let mut config = BurnchainConfig { chain: self.chain.unwrap_or(default_burnchain_config.chain), - chain_id: if is_mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET + chain_id: match self.chain_id { + Some(chain_id) => { + if is_mainnet && chain_id != CHAIN_ID_MAINNET { + return Err(format!( + "Attempted to run mainnet node with chain_id {chain_id}", + )); + } + chain_id + } + None => { + if is_mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + } + } }, peer_version: if is_mainnet { PEER_VERSION_MAINNET @@ -1841,6 +1620,7 @@ impl BurnchainConfigFile { Ok(config) } } + #[derive(Clone, Debug)] pub struct NodeConfig { pub name: String, @@ -3022,3 +2802,304 @@ pub struct InitialBalanceFile { pub address: String, pub amount: u64, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_config_file() { + assert_eq!( + format!("Invalid path: No such file or directory (os error 2)"), + ConfigFile::from_path("some_path").unwrap_err() + ); + assert_eq!( + format!("Invalid toml: unexpected character found: `/` at line 1 column 1"), + ConfigFile::from_str("//[node]").unwrap_err() + ); + assert!(ConfigFile::from_str("").is_ok()); + } + + #[test] + fn test_config() { + assert_eq!( + format!("node.seed should be a hex encoded string"), + Config::from_config_file( + ConfigFile::from_str( + r#" + [node] + seed = "invalid-hex-value" + "#, + ) + .unwrap(), + false + ) + .unwrap_err() + ); + + assert_eq!( + format!("node.local_peer_seed should be a hex encoded string"), + Config::from_config_file( + ConfigFile::from_str( + r#" + [node] + local_peer_seed = "invalid-hex-value" + "#, + ) + .unwrap(), + false + ) + .unwrap_err() + ); + + let expected_err_prefix = + "Invalid burnchain.peer_host: failed to lookup address information:"; + let actual_err_msg = Config::from_config_file( + ConfigFile::from_str( + r#" + [burnchain] + peer_host = "bitcoin2.blockstack.com" + "#, + ) + .unwrap(), + false, + ) + .unwrap_err(); + assert_eq!( + expected_err_prefix, + &actual_err_msg[..expected_err_prefix.len()] + ); + + assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); + } + + #[test] + fn should_load_legacy_mstx_balances_toml() { + let config = ConfigFile::from_str( + r#" + [[ustx_balance]] + address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" + amount = 10000000000000000 + + [[ustx_balance]] + address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" + amount = 10000000000000000 + + [[mstx_balance]] # legacy property name + address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" + amount = 10000000000000000 + + [[mstx_balance]] # legacy property name + address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" + amount = 10000000000000000 + "#, + ); + let config = config.unwrap(); + assert!(config.ustx_balance.is_some()); + let balances = config + .ustx_balance + .expect("Failed to parse stx balances from toml"); + assert_eq!(balances.len(), 4); + assert_eq!( + balances[0].address, + "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" + ); + assert_eq!( + balances[1].address, + "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" + ); + assert_eq!( + balances[2].address, + "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" + ); + assert_eq!( + balances[3].address, + "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" + ); + } + + #[test] + fn should_load_auth_token() { + let config = Config::from_config_file( + ConfigFile::from_str( + r#" + [connection_options] + auth_token = "password" + "#, + ) + .unwrap(), + false, + ) + .expect("Expected to be able to parse block proposal token from file"); + + assert_eq!( + config.connection_options.auth_token, + Some("password".to_string()) + ); + } + + #[test] + fn should_load_affirmation_map() { + let affirmation_string = "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; + let affirmation = + AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); + let config = Config::from_config_file( + ConfigFile::from_str(&format!( + r#" + [[burnchain.affirmation_overrides]] + reward_cycle = 413 + affirmation = "{affirmation_string}" + "# + )) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + + assert_eq!(config.burnchain.affirmation_overrides.len(), 1); + assert_eq!(config.burnchain.affirmation_overrides.get(&0), None); + assert_eq!( + config.burnchain.affirmation_overrides.get(&413), + Some(&affirmation) + ); + } + + #[test] + fn should_fail_to_load_invalid_affirmation_map() { + let bad_affirmation_string = "bad_map"; + let file = ConfigFile::from_str(&format!( + r#" + [[burnchain.affirmation_overrides]] + reward_cycle = 1 + affirmation = "{bad_affirmation_string}" + "# + )) + .expect("Expected to be able to parse config file from string"); + + assert!(Config::from_config_file(file, false).is_err()); + } + + #[test] + fn should_load_empty_affirmation_map() { + let config = Config::from_config_file( + ConfigFile::from_str(r#""#) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + + assert!(config.burnchain.affirmation_overrides.is_empty()); + } + + #[test] + fn should_include_xenon_default_affirmation_overrides() { + let config = Config::from_config_file( + ConfigFile::from_str( + r#" + [burnchain] + chain = "bitcoin" + mode = "xenon" + "#, + ) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + // Should default add xenon affirmation overrides + assert_eq!(config.burnchain.affirmation_overrides.len(), 5); + } + + #[test] + fn should_override_xenon_default_affirmation_overrides() { + let affirmation_string = "aaapnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; + let affirmation = + AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); + + let config = Config::from_config_file( + ConfigFile::from_str(&format!( + r#" + [burnchain] + chain = "bitcoin" + mode = "xenon" + + [[burnchain.affirmation_overrides]] + reward_cycle = 413 + affirmation = "{affirmation_string}" + "#, + )) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + // Should default add xenon affirmation overrides, but overwrite with the configured one above + assert_eq!(config.burnchain.affirmation_overrides.len(), 5); + assert_eq!(config.burnchain.affirmation_overrides[&413], affirmation); + } + + #[test] + fn test_into_config_default_chain_id() { + // Helper function to create BurnchainConfigFile with mode and optional chain_id + fn make_burnchain_config_file(mainnet: bool, chain_id: Option) -> BurnchainConfigFile { + let mut config = BurnchainConfigFile::default(); + if mainnet { + config.mode = Some("mainnet".to_string()); + } + config.chain_id = chain_id; + config + } + let default_burnchain_config = BurnchainConfig::default(); + + // **Case 1a:** Should panic when `is_mainnet` is true and `chain_id` != `CHAIN_ID_MAINNET` + { + let config_file = make_burnchain_config_file(true, Some(CHAIN_ID_TESTNET)); + + let result = config_file.into_config_default(default_burnchain_config.clone()); + + assert!( + result.is_err(), + "Expected error when chain_id != CHAIN_ID_MAINNET on mainnet" + ); + } + + // **Case 1b:** Should not panic when `is_mainnet` is true and `chain_id` == `CHAIN_ID_MAINNET` + { + let config_file = make_burnchain_config_file(true, Some(CHAIN_ID_MAINNET)); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_MAINNET); + } + + // **Case 1c:** Should not panic when `is_mainnet` is false; chain_id should be as provided + { + let chain_id = 123456; + let config_file = make_burnchain_config_file(false, Some(chain_id)); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, chain_id); + } + + // **Case 2a:** Should not panic when `chain_id` is None and `is_mainnet` is true + { + let config_file = make_burnchain_config_file(true, None); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_MAINNET); + } + + // **Case 2b:** Should not panic when `chain_id` is None and `is_mainnet` is false + { + let config_file = make_burnchain_config_file(false, None); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_TESTNET); + } + } +} From 8f9efb9e80871d93328afd3ffc3f6fad7d5078ea Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:31:57 -0400 Subject: [PATCH 777/910] feat: check for unknown fields in config file Error if an unknown field is found in a node's config file. This is helpful to catch errors in the config file. --- testnet/stacks-node/src/config.rs | 127 ++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 35fb1c0231b..e203dadb164 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -89,6 +89,7 @@ const INV_REWARD_CYCLES_TESTNET: u64 = 6; const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct ConfigFile { pub __path: Option, // Only used for config file reloads pub burnchain: Option, @@ -1318,6 +1319,7 @@ pub struct AffirmationOverride { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct BurnchainConfigFile { pub chain: Option, pub mode: Option, @@ -2200,6 +2202,7 @@ impl Default for MinerConfig { } #[derive(Clone, Default, Deserialize, Debug)] +#[serde(deny_unknown_fields)] pub struct ConnectionOptionsFile { pub inbox_maxlen: Option, pub outbox_maxlen: Option, @@ -2383,6 +2386,7 @@ impl ConnectionOptionsFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct NodeConfigFile { pub name: Option, pub seed: Option, @@ -2517,6 +2521,7 @@ impl NodeConfigFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct FeeEstimationConfigFile { pub cost_estimator: Option, pub fee_estimator: Option, @@ -2528,6 +2533,7 @@ pub struct FeeEstimationConfigFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct MinerConfigFile { pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, @@ -2670,6 +2676,7 @@ impl MinerConfigFile { } } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct AtlasConfigFile { pub attachments_max_size: Option, pub max_uninstantiated_attachments: Option, @@ -2698,6 +2705,7 @@ impl AtlasConfigFile { } #[derive(Clone, Deserialize, Default, Debug, Hash, PartialEq, Eq, PartialOrd)] +#[serde(deny_unknown_fields)] pub struct EventObserverConfigFile { pub endpoint: String, pub events_keys: Vec, @@ -2798,6 +2806,7 @@ pub struct InitialBalance { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct InitialBalanceFile { pub address: String, pub amount: u64, @@ -2873,6 +2882,124 @@ mod tests { assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); } + #[test] + fn test_deny_unknown_fields() { + { + let err = ConfigFile::from_str( + r#" + [node] + name = "test" + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [burnchain] + chain_id = 0x00000500 + unknown_field = "test" + chain = "bitcoin" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [node] + rpc_bind = "0.0.0.0:20443" + unknown_field = "test" + p2p_bind = "0.0.0.0:20444" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [[ustx_balance]] + address = "ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0" + amount = 10000000000000000 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [[events_observer]] + endpoint = "localhost:30000" + unknown_field = "test" + events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [connection_options] + inbox_maxlen = 100 + outbox_maxlen = 200 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [fee_estimation] + cost_estimator = "foo" + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [miner] + first_attempt_time_ms = 180_000 + unknown_field = "test" + subsequent_attempt_time_ms = 360_000 + "#, + ) + .unwrap_err(); + println!("{}", err); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [atlas] + attachments_max_size = 100 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + } + #[test] fn should_load_legacy_mstx_balances_toml() { let config = ConfigFile::from_str( From 9b86c5080e29d69951302ced5b5b06db842f7226 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:55:33 -0400 Subject: [PATCH 778/910] test: parse all example configs in unit tests --- testnet/stacks-node/src/config.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e203dadb164..e1742f34659 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2814,6 +2814,8 @@ pub struct InitialBalanceFile { #[cfg(test)] mod tests { + use std::path::Path; + use super::*; #[test] @@ -3000,6 +3002,26 @@ mod tests { } } + #[test] + fn test_example_confs() { + // For each config file in the ../conf/ directory, we should be able to parse it + let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); + println!("Reading config files from: {:?}", conf_dir); + let conf_files = fs::read_dir(conf_dir).unwrap(); + + for entry in conf_files { + let entry = entry.unwrap(); + let path = entry.path(); + if path.is_file() { + let file_name = path.file_name().unwrap().to_str().unwrap(); + if file_name.ends_with(".toml") { + let _config = ConfigFile::from_path(path.to_str().unwrap()).unwrap(); + debug!("Parsed config file: {}", file_name); + } + } + } + } + #[test] fn should_load_legacy_mstx_balances_toml() { let config = ConfigFile::from_str( From 788a14925d2079bbdf6e524d0320826e2f2d6600 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 15:16:40 -0400 Subject: [PATCH 779/910] chore: remove `retry_count` in commented lines --- testnet/stacks-node/conf/local-follower-conf.toml | 1 - testnet/stacks-node/conf/mainnet-follower-conf.toml | 1 - testnet/stacks-node/conf/mocknet-follower-conf.toml | 1 - testnet/stacks-node/conf/regtest-follower-conf.toml | 1 - testnet/stacks-node/conf/testnet-follower-conf.toml | 1 - 5 files changed, 5 deletions(-) diff --git a/testnet/stacks-node/conf/local-follower-conf.toml b/testnet/stacks-node/conf/local-follower-conf.toml index c828c183730..8186b57f54c 100644 --- a/testnet/stacks-node/conf/local-follower-conf.toml +++ b/testnet/stacks-node/conf/local-follower-conf.toml @@ -15,7 +15,6 @@ peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 2ecbc806862..6f6bab70d88 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -16,5 +16,4 @@ peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] diff --git a/testnet/stacks-node/conf/mocknet-follower-conf.toml b/testnet/stacks-node/conf/mocknet-follower-conf.toml index 3cb9beb5d78..e9a0e7a6430 100644 --- a/testnet/stacks-node/conf/mocknet-follower-conf.toml +++ b/testnet/stacks-node/conf/mocknet-follower-conf.toml @@ -13,7 +13,6 @@ mode = "mocknet" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml index a2a71c8acb9..151446fbaf3 100644 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ b/testnet/stacks-node/conf/regtest-follower-conf.toml @@ -17,7 +17,6 @@ peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index cb23477b27d..5fe717bfb14 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -17,7 +17,6 @@ peer_port = 18333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] From bae605d0a0a1e401b56d5ac603b2b761ff126053 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 11:40:47 -0700 Subject: [PATCH 780/910] Add pox_sync_sample_secs to follower_bootup_across_multiple_cycles and fix prom monitoring issues Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 65 +++++++++++-------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5868111047e..34c60406b88 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3750,6 +3750,7 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 5; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); @@ -6009,15 +6010,18 @@ fn signer_chainstate() { .unwrap() .stacks_block_height; let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Failed waiting for prometheus metrics to update") } info!("Nakamoto miner started..."); @@ -6619,15 +6623,18 @@ fn continue_tenure_extend() { #[cfg(feature = "monitoring_prom")] { let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } info!("Nakamoto miner started..."); @@ -6815,15 +6822,19 @@ fn continue_tenure_extend() { #[cfg(feature = "monitoring_prom")] { let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = + format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } coord_channel From a3f32303e4ef7ad112f2737e4d990f6c98796f1e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 17:07:43 -0400 Subject: [PATCH 781/910] fix: handled deprecated `mstx_balance` correctly --- testnet/stacks-node/src/config.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e1742f34659..7bdffa76b66 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -95,6 +95,8 @@ pub struct ConfigFile { pub burnchain: Option, pub node: Option, pub ustx_balance: Option>, + /// Deprecated: use `ustx_balance` instead + pub mstx_balance: Option>, pub events_observer: Option>, pub connection_options: Option, pub fee_estimation: Option, @@ -102,11 +104,6 @@ pub struct ConfigFile { pub atlas: Option, } -#[derive(Clone, Deserialize, Default)] -pub struct LegacyMstxConfigFile { - pub mstx_balance: Option>, -} - impl ConfigFile { pub fn from_path(path: &str) -> Result { let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; @@ -118,13 +115,16 @@ impl ConfigFile { pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; - let legacy_config: LegacyMstxConfigFile = toml::from_str(content).unwrap(); - if let Some(mstx_balance) = legacy_config.mstx_balance { - warn!("'mstx_balance' inside toml config is deprecated, replace with 'ustx_balance'"); - config.ustx_balance = match config.ustx_balance { - Some(balance) => Some([balance, mstx_balance].concat()), - None => Some(mstx_balance), - }; + if let Some(mstx_balance) = config.mstx_balance.take() { + warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); + match config.ustx_balance { + Some(ref mut ustx_balance) => { + ustx_balance.extend(mstx_balance); + } + None => { + config.ustx_balance = Some(mstx_balance); + } + } } Ok(config) } From 31bc50569aefc5e1184fa1aa4d5e9355e917eb03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:49:55 -0400 Subject: [PATCH 782/910] chore: log neighbors unconditionally in tests --- stackslib/src/net/p2p.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d7ea9684f38..eb224f3e80c 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4998,7 +4998,7 @@ impl PeerNetwork { /// Log our neighbors. /// Used for testing and debuggin fn log_neighbors(&mut self) { - if self.get_connection_opts().log_neighbors_freq == 0 { + if !cfg!(test) && self.get_connection_opts().log_neighbors_freq == 0 { return; } From d4a06198edfbfc464372045d774802cfc86d5abd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:50:09 -0400 Subject: [PATCH 783/910] feat: track stackerdb peer eviction time --- stackslib/src/net/stackerdb/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 40fbc7711a0..57d1a427dcc 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -443,6 +443,8 @@ pub struct StackerDBSync { rounds: u128, /// Round when we last pushed push_round: u128, + /// time we last deliberately evicted a peer + last_eviction_time: u64, } impl StackerDBSyncResult { From ce9a1611693c42c26cdf97e9bc2bffb82f6e13ef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:50:29 -0400 Subject: [PATCH 784/910] fix: (1) connect_begin() succeeds when at least one replica is connected. Subsequent calls to connect_begin() in subsequent passes will connect and keep connected more and more peers. (2) Evict peers periodically so we get some churn. (3) Always, always, always try to send up to request_capacity messages (doing a full cycle through the push schedule). --- stackslib/src/net/stackerdb/sync.rs | 69 ++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 08e6e978eab..d6610c20fcd 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -79,6 +79,7 @@ impl StackerDBSync { num_attempted_connections: 0, rounds: 0, push_round: 0, + last_eviction_time: get_epoch_time_secs(), }; dbsync.reset(None, config); dbsync @@ -217,10 +218,32 @@ impl StackerDBSync { self.expected_versions.clear(); self.downloaded_chunks.clear(); - // reset comms, but keep all connected replicas pinned + // reset comms, but keep all connected replicas pinned. + // Randomly evict one every so often. self.comms.reset(); if let Some(network) = network { - for naddr in self.replicas.iter() { + let mut eviction_index = None; + if self.last_eviction_time + 60 < get_epoch_time_secs() { + self.last_eviction_time = get_epoch_time_secs(); + if self.replicas.len() > 0 { + eviction_index = Some(thread_rng().gen::() % self.replicas.len()); + } + } + + let mut remove_naddr = None; + for (i, naddr) in self.replicas.iter().enumerate() { + if let Some(eviction_index) = eviction_index.as_ref() { + if *eviction_index == i { + debug!( + "{:?}: {}: don't reuse connection for replica {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + ); + remove_naddr = Some(naddr.clone()); + continue; + } + } if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { self.comms.pin_connection(event_id); debug!( @@ -232,6 +255,9 @@ impl StackerDBSync { ); } } + if let Some(naddr) = remove_naddr.take() { + self.replicas.remove(&naddr); + } } // reload from config @@ -668,7 +694,8 @@ impl StackerDBSync { /// We might not be connected to any yet. /// Clears self.replicas, and fills in self.connected_replicas with already-connected neighbors /// Returns Ok(true) if we can proceed to sync - /// Returns Ok(false) if we have no known peers + /// Returns Ok(false) if we should try this again + /// Returns Err(NoSuchNeighbor) if we don't have anyone to talk to /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.replicas.len() == 0 { @@ -686,7 +713,7 @@ impl StackerDBSync { ); if self.replicas.len() == 0 { // nothing to do - return Ok(false); + return Err(net_error::NoSuchNeighbor); } let naddrs = mem::replace(&mut self.replicas, HashSet::new()); @@ -729,11 +756,12 @@ impl StackerDBSync { ); self.num_attempted_connections += 1; self.num_connections += 1; + self.connected_replicas.insert(naddr); } Ok(false) => { // need to retry - self.replicas.insert(naddr); self.num_attempted_connections += 1; + self.replicas.insert(naddr); } Err(_e) => { debug!( @@ -746,7 +774,7 @@ impl StackerDBSync { } } } - Ok(self.replicas.len() == 0) + Ok(self.connected_replicas.len() > 0) } /// Finish up connecting to our replicas. @@ -1154,7 +1182,8 @@ impl StackerDBSync { ); // fill up our comms with $capacity requests - for _i in 0..self.request_capacity { + let mut num_sent = 0; + for _i in 0..self.chunk_push_priorities.len() { if self.comms.count_inflight() >= self.request_capacity { break; } @@ -1173,6 +1202,9 @@ impl StackerDBSync { chunk_push.chunk_data.slot_id, chunk_push.chunk_data.slot_version, ); + + // next-prioritized chunk + cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); continue; }; @@ -1213,6 +1245,11 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); + + num_sent += 1; + if num_sent > self.request_capacity { + break; + } } self.next_chunk_push_priority = cur_priority; Ok(self @@ -1370,14 +1407,22 @@ impl StackerDBSync { let mut blocked = true; match self.state { StackerDBSyncState::ConnectBegin => { - let done = self.connect_begin(network)?; + let done = match self.connect_begin(network) { + Ok(done) => done, + Err(net_error::NoSuchNeighbor) => { + // nothing to do + self.state = StackerDBSyncState::Finished; + blocked = false; + false + } + Err(e) => { + return Err(e); + } + }; if done { self.state = StackerDBSyncState::ConnectFinish; - } else { - // no replicas; try again - self.state = StackerDBSyncState::Finished; + blocked = false; } - blocked = false; } StackerDBSyncState::ConnectFinish => { let done = self.connect_try_finish(network)?; From 96cf7d567cc5e046b1bfdc644ad67fac0424b097 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:52:18 -0400 Subject: [PATCH 785/910] fix: fix broken unit test --- stackslib/src/net/stackerdb/tests/sync.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 565a97f4222..746a3f09634 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -183,7 +183,12 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { fn check_sync_results(network_sync: &NetworkResult) { for res in network_sync.stacker_db_sync_results.iter() { - assert!(res.num_connections >= res.num_attempted_connections); + assert!( + res.num_connections <= res.num_attempted_connections, + "{} < {}", + res.num_connections, + res.num_attempted_connections + ); } } From 6e0afa8b03e0ab1ea75c9df8a0ff25949b36db63 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 13:10:34 -0700 Subject: [PATCH 786/910] Increase pox_sync_sample_secs to 30 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 7 +++---- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9145d298cc..e25b7799a9f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2108,7 +2108,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -3728,7 +3728,7 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); @@ -3852,7 +3852,6 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); - follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( @@ -8120,7 +8119,7 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8c48eda5e89..d1ceedfebf6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1472,7 +1472,7 @@ fn multiple_miners() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1767,7 +1767,7 @@ fn miner_forking() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -3444,7 +3444,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3707,7 +3707,7 @@ fn partial_tenure_fork() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 3b5101fb012ceeba6d1a2402788021e9aa23acd7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 08:21:50 -0400 Subject: [PATCH 787/910] chore: cleanup unused imports --- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 2 +- testnet/stacks-node/src/tests/epoch_23.rs | 2 +- testnet/stacks-node/src/tests/epoch_24.rs | 2 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/stackerdb.rs | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0ad70006310..0cf567058dc 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 4490fa5b073..ba95cbd55a4 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -35,7 +35,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 7d0a5216a0f..8028e3b92c3 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -22,7 +22,7 @@ use stacks::core; use stacks::core::STACKS_EPOCH_MAX; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 9c57a732d05..8a959aaf872 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -35,7 +35,7 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 345aec4557e..be95a65003d 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -21,7 +21,7 @@ use stacks::core; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9145d298cc..52ee1383e83 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index fbc47e0c3c6..f7089c3f33c 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -25,7 +25,7 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; From e3e656b993b619784e6f2e5685c278f508b69c74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 11:26:04 -0400 Subject: [PATCH 788/910] fix: log reward cycle of reward set, and use mod 0 indexing to locate the reward set to use to validate blocks (instead of the mod 1 indexing) --- stackslib/src/net/download/nakamoto/tenure.rs | 15 ++++++--------- .../download/nakamoto/tenure_downloader_set.rs | 6 ++++++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 98f102969a1..53f91051565 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -325,15 +325,12 @@ impl TenureStartEnd { wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, - downloader_block_height_to_reward_cycle( - pox_constants, - first_burn_height, - wt_start.burn_height, - ) - .expect(&format!( - "FATAL: tenure from before system start ({} <= {})", - wt_start.burn_height, first_burn_height - )), + pox_constants + .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) + .expect(&format!( + "FATAL: tenure from before system start ({} <= {})", + wt_start.burn_height, first_burn_height + )), wt.processed, ); tenure_start_end.fetch_end_block = true; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 160bad309e2..88fdf77c7af 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -407,6 +407,12 @@ impl NakamotoTenureDownloaderSet { continue; }; + info!("Download tenure {}", &ch; + "tenure_start_block" => %tenure_info.start_block_id, + "tenrue_end_block" => %tenure_info.end_block_id, + "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, + "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); + debug!( "Download tenure {} (start={}, end={}) (rc {},{})", &ch, From 04b9392dfbbfee7c63b9a1e5fdc1e013cda79b03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 11:26:41 -0400 Subject: [PATCH 789/910] fix: log getnakamotoinv nack remote peer --- stackslib/src/net/inv/nakamoto.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 8971a8230f0..3f4fcb61655 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -679,6 +679,7 @@ impl NakamotoTenureInv { } StacksMessageType::Nack(nack_data) => { info!("{:?}: remote peer NACKed our GetNakamotoInv", network.get_local_peer(); + "remote_peer" => %self.neighbor_address, "error_code" => nack_data.error_code); if nack_data.error_code != NackErrorCodes::NoSuchBurnchainBlock { From 616345bf20a6e3f8cbe1f389a94a7db8eb8733c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 14:26:47 -0400 Subject: [PATCH 790/910] fix: typo --- stackslib/src/net/download/nakamoto/tenure_downloader_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 88fdf77c7af..49b32c26343 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -409,7 +409,7 @@ impl NakamotoTenureDownloaderSet { info!("Download tenure {}", &ch; "tenure_start_block" => %tenure_info.start_block_id, - "tenrue_end_block" => %tenure_info.end_block_id, + "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); From 787e3e9628f55e5fd0a4f35a8aea0eb63400e3c0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 14:24:34 -0400 Subject: [PATCH 791/910] fix: always use chain_id from config when building txs --- testnet/stacks-node/src/tests/epoch_205.rs | 147 +++-- testnet/stacks-node/src/tests/epoch_21.rs | 80 ++- testnet/stacks-node/src/tests/epoch_22.rs | 25 +- testnet/stacks-node/src/tests/epoch_23.rs | 19 + testnet/stacks-node/src/tests/epoch_24.rs | 12 + testnet/stacks-node/src/tests/epoch_25.rs | 18 +- testnet/stacks-node/src/tests/integrations.rs | 184 ++++-- testnet/stacks-node/src/tests/mempool.rs | 149 ++++- testnet/stacks-node/src/tests/mod.rs | 51 +- .../src/tests/nakamoto_integrations.rs | 247 ++++++-- .../src/tests/neon_integrations.rs | 589 +++++++++++++----- testnet/stacks-node/src/tests/signer/v0.rs | 222 +++++-- testnet/stacks-node/src/tests/stackerdb.rs | 18 +- 13 files changed, 1358 insertions(+), 403 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0cf567058dc..076a5f61f36 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -78,8 +78,14 @@ fn test_exact_block_costs() { (ok 1))) "; - let contract_publish_tx = - make_contract_publish(&spender_sk, 0, 210_000, contract_name, contract_content); + let contract_publish_tx = make_contract_publish( + &spender_sk, + 0, + 210_000, + conf.burnchain.chain_id, + contract_name, + contract_content, + ); // make txs that alternate between let txs: Vec<_> = (1..transactions_to_broadcast + 1) @@ -89,6 +95,7 @@ fn test_exact_block_costs() { &spender_sk, nonce, 200_000, + conf.burnchain.chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -99,6 +106,7 @@ fn test_exact_block_costs() { &spender_sk, nonce, 200_000, + conf.burnchain.chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -307,14 +315,22 @@ fn test_dynamic_db_method_costs() { amount: 200_000_000, }); - let contract_publish_tx = - make_contract_publish(&spender_sk, 0, 210_000, contract_name, contract_content); + let contract_publish_tx = make_contract_publish( + &spender_sk, + 0, + 210_000, + conf.burnchain.chain_id, + contract_name, + contract_content, + ); + let chain_id = conf.burnchain.chain_id; let make_db_get1_call = |nonce| { make_contract_call( &spender_sk, nonce, 200_000, + chain_id, &spender_addr_c32, contract_name, "db-get1", @@ -327,6 +343,7 @@ fn test_dynamic_db_method_costs() { &spender_sk, nonce, 200_000, + chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -800,6 +817,7 @@ fn test_cost_limit_switch_version205() { &creator_sk, 0, 1100000, + conf.burnchain.chain_id, "increment-contract", &giant_contract, ), @@ -828,6 +846,7 @@ fn test_cost_limit_switch_version205() { &alice_sk, 0, 1000, + conf.burnchain.chain_id, &creator_addr.into(), "increment-contract", "increment-many", @@ -862,6 +881,7 @@ fn test_cost_limit_switch_version205() { &bob_sk, 0, 1000, + conf.burnchain.chain_id, &creator_addr.into(), "increment-contract", "increment-many", @@ -902,65 +922,6 @@ fn bigger_microblock_streams_in_2_05() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - // almost fills a whole block - make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - &format!("large-{}", ix), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) - ) - ) - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -1022,6 +983,66 @@ fn bigger_microblock_streams_in_2_05() { ]); conf.burnchain.pox_2_activation = Some(10_003); + let txs: Vec> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + // almost fills a whole block + make_contract_publish_microblock_only( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + &format!("large-{}", ix), + &format!(" + ;; a single one of these transactions consumes over half the runtime budget + (define-constant BUFF_TO_BYTE (list + 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f + 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f + 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f + 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f + 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f + 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f + 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f + 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f + 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f + 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f + 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf + 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf + 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf + 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf + 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef + 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff + )) + (define-private (crash-me-folder (input (buff 1)) (ctr uint)) + (begin + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (+ u1 ctr) + ) + ) + (define-public (crash-me (name (string-ascii 128))) + (begin + (fold crash-me-folder BUFF_TO_BYTE u0) + (print name) + (ok u0) + ) + ) + (begin + (crash-me \"{}\")) + ", + &format!("large-contract-{}", &ix) + ) + ) + }) + .collect(); + test_observer::spawn(); test_observer::register_any(&mut conf); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index ba95cbd55a4..8f6c4663187 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -360,6 +360,7 @@ fn transition_adds_burn_block_height() { &spender_sk, 0, (2 * contract.len()) as u64, + conf.burnchain.chain_id, "test-burn-headers", contract, ); @@ -373,6 +374,7 @@ fn transition_adds_burn_block_height() { &spender_sk, 1, (2 * contract.len()) as u64, + conf.burnchain.chain_id, &spender_addr_c32, "test-burn-headers", "test-burn-headers", @@ -1107,6 +1109,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sk, 0, 300, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1147,6 +1150,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sk, 0, 300, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1184,6 +1188,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sks[0], 1, (2 * contract.len()) as u64, + conf.burnchain.chain_id, "test-get-pox-addrs", contract, ); @@ -1209,6 +1214,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sks[0], 2, (2 * contract.len()) as u64, + conf.burnchain.chain_id, &spender_addr_c32, "test-get-pox-addrs", "test-get-pox-addrs", @@ -1578,6 +1584,7 @@ fn transition_removes_pox_sunset() { &spender_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -1640,6 +1647,7 @@ fn transition_removes_pox_sunset() { &spender_sk, 1, 260 * 2, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2273,6 +2281,7 @@ fn test_pox_reorgs_three_flaps() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2296,7 +2305,9 @@ fn test_pox_reorgs_three_flaps() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -2810,6 +2821,7 @@ fn test_pox_reorg_one_flap() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2833,7 +2845,9 @@ fn test_pox_reorg_one_flap() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -3235,6 +3249,7 @@ fn test_pox_reorg_flap_duel() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -3258,7 +3273,9 @@ fn test_pox_reorg_flap_duel() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -3670,6 +3687,7 @@ fn test_pox_reorg_flap_reward_cycles() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -3693,7 +3711,9 @@ fn test_pox_reorg_flap_reward_cycles() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4099,6 +4119,7 @@ fn test_pox_missing_five_anchor_blocks() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -4122,7 +4143,9 @@ fn test_pox_missing_five_anchor_blocks() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4500,6 +4523,7 @@ fn test_sortition_divergence_pre_21() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -4523,7 +4547,9 @@ fn test_sortition_divergence_pre_21() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4798,13 +4824,34 @@ fn trait_invocation_cross_epoch() { let tip_info = get_chain_info(&conf); assert_eq!(tip_info.burn_block_height, epoch_2_05 + 1); - let tx = make_contract_publish(&spender_sk, 0, 10_000, "simple-trait", trait_contract); + let tx = make_contract_publish( + &spender_sk, + 0, + 10_000, + conf.burnchain.chain_id, + "simple-trait", + trait_contract, + ); let trait_txid = submit_tx(&http_origin, &tx); - let tx = make_contract_publish(&spender_sk, 1, 10_000, "impl-simple", impl_contract); + let tx = make_contract_publish( + &spender_sk, + 1, + 10_000, + conf.burnchain.chain_id, + "impl-simple", + impl_contract, + ); let impl_txid = submit_tx(&http_origin, &tx); - let tx = make_contract_publish(&spender_sk, 2, 10_000, "use-simple", use_contract); + let tx = make_contract_publish( + &spender_sk, + 2, + 10_000, + conf.burnchain.chain_id, + "use-simple", + use_contract, + ); let use_txid = submit_tx(&http_origin, &tx); // mine the transactions and advance to epoch 2.1 @@ -4815,7 +4862,14 @@ fn trait_invocation_cross_epoch() { let tip_info = get_chain_info(&conf); assert_eq!(tip_info.burn_block_height, epoch_2_1 + 1); - let tx = make_contract_publish(&spender_sk, 3, 10_000, "invoke-simple", invoke_contract); + let tx = make_contract_publish( + &spender_sk, + 3, + 10_000, + conf.burnchain.chain_id, + "invoke-simple", + invoke_contract, + ); let invoke_txid = submit_tx(&http_origin, &tx); for _ in 0..2 { @@ -4826,6 +4880,7 @@ fn trait_invocation_cross_epoch() { &spender_sk, 4, 10_000, + conf.burnchain.chain_id, &spender_addr_c32, "invoke-simple", "invocation-1", @@ -4837,6 +4892,7 @@ fn trait_invocation_cross_epoch() { &spender_sk, 5, 10_000, + conf.burnchain.chain_id, &spender_addr_c32, "invoke-simple", "invocation-2", @@ -5042,6 +5098,7 @@ fn test_v1_unlock_height_with_current_stackers() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -5078,6 +5135,7 @@ fn test_v1_unlock_height_with_current_stackers() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -5306,6 +5364,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -5354,6 +5413,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index fecf5c46524..9bffca7c8a6 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -227,6 +227,7 @@ fn disable_pox() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -275,6 +276,7 @@ fn disable_pox() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -293,6 +295,7 @@ fn disable_pox() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -324,6 +327,7 @@ fn disable_pox() { &spender_sk, 2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -350,6 +354,7 @@ fn disable_pox() { &spender_sk, aborted_increase_nonce, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -761,6 +766,7 @@ fn pox_2_unlock_all() { &spender_sk, 0, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -811,6 +817,7 @@ fn pox_2_unlock_all() { &spender_sk, 1, tx_fee, + conf.burnchain.chain_id, "unlock-height", "(define-public (unlock-height (x principal)) (ok (get unlock-height (stx-account x))))", ); @@ -820,6 +827,7 @@ fn pox_2_unlock_all() { &spender_sk, 2, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -839,6 +847,7 @@ fn pox_2_unlock_all() { &spender_2_sk, 0, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -869,6 +878,7 @@ fn pox_2_unlock_all() { &spender_sk, 3, tx_fee, + conf.burnchain.chain_id, &to_addr(&spender_sk), "unlock-height", "unlock-height", @@ -888,6 +898,7 @@ fn pox_2_unlock_all() { &spender_sk, 4, tx_fee, + conf.burnchain.chain_id, &to_addr(&spender_sk), "unlock-height", "unlock-height", @@ -977,7 +988,14 @@ fn pox_2_unlock_all() { ); // perform a transfer - let tx = make_stacks_transfer(&spender_sk, 5, tx_fee, &spender_3_addr, 1_000_000); + let tx = make_stacks_transfer( + &spender_sk, + 5, + tx_fee, + conf.burnchain.chain_id, + &spender_3_addr, + 1_000_000, + ); info!("Submit stack transfer tx to {:?}", &http_origin); submit_tx(&http_origin, &tx); @@ -1508,6 +1526,7 @@ fn test_pox_reorg_one_flap() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1531,7 +1550,9 @@ fn test_pox_reorg_one_flap() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 8028e3b92c3..2355f7521d8 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -180,6 +180,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "simple-trait", trait_contract, ); @@ -190,6 +191,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "impl-simple", impl_contract, ); @@ -200,6 +202,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "use-simple", use_contract, ); @@ -210,6 +213,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "invoke-simple", invoke_contract, ); @@ -241,6 +245,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -253,6 +258,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -274,6 +280,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -286,6 +293,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -312,6 +320,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -324,6 +333,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -344,6 +354,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "wrap-simple", wrapper_contract, ); @@ -359,6 +370,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -371,6 +383,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -397,6 +410,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -409,6 +423,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -431,6 +446,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -443,6 +459,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -464,6 +481,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -476,6 +494,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 8a959aaf872..26ad007ca79 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -249,6 +249,7 @@ fn fix_to_pox_contract() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -298,6 +299,7 @@ fn fix_to_pox_contract() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -332,6 +334,7 @@ fn fix_to_pox_contract() { &spender_sk, aborted_increase_nonce_2_2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -357,6 +360,7 @@ fn fix_to_pox_contract() { &spender_sk, aborted_increase_nonce_2_3, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -395,6 +399,7 @@ fn fix_to_pox_contract() { &spender_sk, 4, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -414,6 +419,7 @@ fn fix_to_pox_contract() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -445,6 +451,7 @@ fn fix_to_pox_contract() { &spender_sk, 5, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-increase", @@ -889,6 +896,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -938,6 +946,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1023,6 +1032,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -1042,6 +1052,7 @@ fn verify_auto_unlock_behavior() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -1123,6 +1134,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 3, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-increase", diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index be95a65003d..6af1bee626d 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -162,7 +162,14 @@ fn microblocks_disabled() { // push us to block 205 next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 0, 500, &spender_2_addr, 500); + let tx = make_stacks_transfer_mblock_only( + &spender_1_sk, + 0, + 500, + conf.burnchain.chain_id, + &spender_2_addr, + 500, + ); submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 @@ -194,7 +201,14 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 1, 500, &spender_2_addr, 500); + let tx = make_stacks_transfer_mblock_only( + &spender_1_sk, + 1, + 500, + conf.burnchain.chain_id, + &spender_2_addr, + 500, + ); submit_tx(&http_origin, &tx); let mut last_block_height = get_chain_info(&conf).burn_block_height; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 694d27ca155..236d76b0002 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -25,7 +25,7 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; @@ -211,8 +211,14 @@ fn integration_test_get_info() { if round == 1 { // block-height = 2 eprintln!("Tenure in 1 started!"); - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "get-info", GET_INFO_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "get-info", + GET_INFO_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -225,8 +231,14 @@ fn integration_test_get_info() { &StacksEpochId::Epoch21, ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "other", OTHER_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "other", + OTHER_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -239,8 +251,14 @@ fn integration_test_get_info() { &StacksEpochId::Epoch21, ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 2, 10, "main", CALL_READ_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 2, + 10, + CHAIN_ID_TESTNET, + "main", + CALL_READ_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -272,6 +290,7 @@ fn integration_test_get_info() { &contract_sk, 3, 10, + CHAIN_ID_TESTNET, "impl-trait-contract", IMPL_TRAIT_CONTRACT, ); @@ -294,6 +313,7 @@ fn integration_test_get_info() { &principal_sk, (round - 3).into(), 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "get-info", "update-info", @@ -319,6 +339,7 @@ fn integration_test_get_info() { &spender_sk, (round - 1).into(), 10, + CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 100, ); @@ -797,8 +818,13 @@ fn integration_test_get_info() { eprintln!("Test: POST {} (valid)", path); // tx_xfer is 180 bytes long - let tx_xfer = make_stacks_transfer(&spender_sk, round.into(), 200, - &StacksAddress::from_string(ADDR_4).unwrap().into(), 123); + let tx_xfer = make_stacks_transfer( + &spender_sk, + round.into(), + 200, + CHAIN_ID_TESTNET, + &StacksAddress::from_string(ADDR_4).unwrap().into(), + 123); let res: String = client.post(&path) .header("Content-Type", "application/octet-stream") @@ -829,7 +855,8 @@ fn integration_test_get_info() { eprintln!("Test: POST {} (invalid)", path); // tx_xfer_invalid is 180 bytes long - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, // bad nonce + // bad nonce + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -1114,8 +1141,14 @@ fn contract_stx_transfer() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk_3, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1130,8 +1163,14 @@ fn contract_stx_transfer() { .unwrap(); } else if round == 2 { // block-height > 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -1146,8 +1185,14 @@ fn contract_stx_transfer() { .unwrap(); } else if round == 3 { // try to publish again - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, @@ -1170,6 +1215,7 @@ fn contract_stx_transfer() { &sk_2, 0, 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "faucet", "spout", @@ -1194,6 +1240,7 @@ fn contract_stx_transfer() { &sk_3, 1 + i, 200, + CHAIN_ID_TESTNET, &contract_identifier.clone().into(), 1000, ); @@ -1215,8 +1262,14 @@ fn contract_stx_transfer() { .unwrap(); } // this one should fail because the nonce is already in the mempool - let xfer_to_contract = - make_stacks_transfer(&sk_3, 3, 190, &contract_identifier.clone().into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 3, + 190, + CHAIN_ID_TESTNET, + &contract_identifier.clone().into(), + 1000, + ); let xfer_to_contract = StacksTransaction::consensus_deserialize(&mut &xfer_to_contract[..]).unwrap(); match tenure @@ -1446,8 +1499,14 @@ fn mine_transactions_out_of_order() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk, 1, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 1, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1462,7 +1521,8 @@ fn mine_transactions_out_of_order() { .unwrap(); } else if round == 2 { // block-height > 2 - let publish_tx = make_contract_publish(&sk, 2, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = + make_contract_publish(&sk, 2, 10, CHAIN_ID_TESTNET, "faucet", FAUCET_CONTRACT); tenure .mem_pool .submit_raw( @@ -1476,8 +1536,14 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 3 { - let xfer_to_contract = - make_stacks_transfer(&sk, 3, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 3, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1491,8 +1557,14 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 4 { - let xfer_to_contract = - make_stacks_transfer(&sk, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1593,8 +1665,14 @@ fn mine_contract_twice() { if round == 1 { // block-height = 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1691,8 +1769,14 @@ fn bad_contract_tx_rollback() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk_3, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1711,7 +1795,8 @@ fn bad_contract_tx_rollback() { .unwrap(); } else if round == 2 { // block-height = 3 - let xfer_to_contract = make_stacks_transfer(&sk_3, 1, 10, &addr_2.into(), 1000); + let xfer_to_contract = + make_stacks_transfer(&sk_3, 1, 10, CHAIN_ID_TESTNET, &addr_2.into(), 1000); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1730,7 +1815,8 @@ fn bad_contract_tx_rollback() { .unwrap(); // doesn't consistently get mined by the StacksBlockBuilder, because order matters! - let xfer_to_contract = make_stacks_transfer(&sk_3, 2, 10, &addr_2.into(), 3000); + let xfer_to_contract = + make_stacks_transfer(&sk_3, 2, 10, CHAIN_ID_TESTNET, &addr_2.into(), 3000); tenure .mem_pool .submit_raw( @@ -1744,8 +1830,14 @@ fn bad_contract_tx_rollback() { ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -1759,8 +1851,14 @@ fn bad_contract_tx_rollback() { ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -2014,6 +2112,7 @@ fn block_limit_runtime_test() { &contract_sk, 0, 10, + CHAIN_ID_TESTNET, "hello-contract", EXPENSIVE_CONTRACT.as_str(), ); @@ -2042,6 +2141,7 @@ fn block_limit_runtime_test() { sk, 0, 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "hello-contract", "do-it", @@ -2132,8 +2232,14 @@ fn mempool_errors() { if round == 1 { // block-height = 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "get-info", GET_INFO_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "get-info", + GET_INFO_CONTRACT, + ); eprintln!("Tenure in 1 started!"); tenure .mem_pool @@ -2176,6 +2282,7 @@ fn mempool_errors() { &spender_sk, 30, // bad nonce -- too much chaining 200, + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2217,6 +2324,7 @@ fn mempool_errors() { &spender_sk, 0, 1, // bad fee + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2250,6 +2358,7 @@ fn mempool_errors() { &contract_sk, 1, 2000, // not enough funds! + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2294,6 +2403,7 @@ fn mempool_errors() { 1 + MAXIMUM_MEMPOOL_TX_CHAINING, 1, 2000, + CHAIN_ID_TESTNET, &send_to, 1000, ); diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 6221c6cf11f..b701e70a151 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -112,8 +112,14 @@ fn mempool_setup_chainstate() { if round == 1 { eprintln!("Tenure in 1 started!"); - let publish_tx1 = - make_contract_publish(&contract_sk, 0, 100, "foo_contract", FOO_CONTRACT); + let publish_tx1 = make_contract_publish( + &contract_sk, + 0, + 100, + CHAIN_ID_TESTNET, + "foo_contract", + FOO_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -127,8 +133,14 @@ fn mempool_setup_chainstate() { ) .unwrap(); - let publish_tx2 = - make_contract_publish(&contract_sk, 1, 100, "trait-contract", TRAIT_CONTRACT); + let publish_tx2 = make_contract_publish( + &contract_sk, + 1, + 100, + CHAIN_ID_TESTNET, + "trait-contract", + TRAIT_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -146,6 +158,7 @@ fn mempool_setup_chainstate() { &contract_sk, 2, 100, + CHAIN_ID_TESTNET, "use-trait-contract", USE_TRAIT_CONTRACT, ); @@ -166,6 +179,7 @@ fn mempool_setup_chainstate() { &contract_sk, 3, 100, + CHAIN_ID_TESTNET, "implement-trait-contract", IMPLEMENT_TRAIT_CONTRACT, ); @@ -186,6 +200,7 @@ fn mempool_setup_chainstate() { &contract_sk, 4, 100, + CHAIN_ID_TESTNET, "bad-trait-contract", BAD_TRAIT_CONTRACT, ); @@ -234,8 +249,14 @@ fn mempool_setup_chainstate() { // let's throw some transactions at it. // first a couple valid ones: - let tx_bytes = - make_contract_publish(&contract_sk, 5, 1000, "bar_contract", FOO_CONTRACT); + let tx_bytes = make_contract_publish( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + "bar_contract", + FOO_CONTRACT, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state @@ -252,6 +273,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "bar", @@ -269,7 +291,8 @@ fn mempool_setup_chainstate() { ) .unwrap(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state @@ -321,6 +344,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &bad_addr, "foo_contract", "bar", @@ -354,7 +378,8 @@ fn mempool_setup_chainstate() { .unwrap() .into(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &bad_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &bad_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -373,7 +398,8 @@ fn mempool_setup_chainstate() { }); // bad fees - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 0, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 0, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -393,7 +419,8 @@ fn mempool_setup_chainstate() { }); // bad nonce - let tx_bytes = make_stacks_transfer(&contract_sk, 0, 200, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 0, 200, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -413,7 +440,14 @@ fn mempool_setup_chainstate() { }); // not enough funds - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 110000, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 110000, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -434,7 +468,14 @@ fn mempool_setup_chainstate() { // sender == recipient let contract_princ = PrincipalData::from(contract_addr.clone()); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &contract_princ, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 300, + CHAIN_ID_TESTNET, + &contract_princ, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -457,7 +498,14 @@ fn mempool_setup_chainstate() { let mut mainnet_recipient = to_addr(&other_sk); mainnet_recipient.version = C32_ADDRESS_VERSION_MAINNET_SINGLESIG; let mainnet_princ = mainnet_recipient.into(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &mainnet_princ, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 300, + CHAIN_ID_TESTNET, + &mainnet_princ, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -488,6 +536,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 300, + CHAIN_ID_TESTNET, TransactionAnchorMode::OnChainOnly, TransactionVersion::Mainnet, ); @@ -510,7 +559,8 @@ fn mempool_setup_chainstate() { }); // send amount must be positive - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &other_addr, 0); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 300, CHAIN_ID_TESTNET, &other_addr, 0); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -530,7 +580,14 @@ fn mempool_setup_chainstate() { }); // not enough funds - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 110000, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 110000, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -549,7 +606,14 @@ fn mempool_setup_chainstate() { false }); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 99700, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 99700, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -572,6 +636,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "bar_contract", "bar", @@ -599,6 +664,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "foobar", @@ -626,6 +692,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "bar", @@ -649,8 +716,14 @@ fn mempool_setup_chainstate() { false }); - let tx_bytes = - make_contract_publish(&contract_sk, 5, 1000, "foo_contract", FOO_CONTRACT); + let tx_bytes = make_contract_publish( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + "foo_contract", + FOO_CONTRACT, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -685,7 +758,14 @@ fn mempool_setup_chainstate() { signature: MessageSignature([1; 65]), }; - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -716,7 +796,14 @@ fn mempool_setup_chainstate() { signature: MessageSignature([0; 65]), }; - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -750,7 +837,14 @@ fn mempool_setup_chainstate() { microblock_1.sign(&other_sk).unwrap(); microblock_2.sign(&other_sk).unwrap(); - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -765,7 +859,7 @@ fn mempool_setup_chainstate() { eprintln!("Err: {:?}", e); assert!(matches!(e, MemPoolRejection::Other(_))); - let tx_bytes = make_coinbase(&contract_sk, 5, 1000); + let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -823,7 +917,14 @@ fn mempool_setup_chainstate() { microblock_1.sign(&secret_key).unwrap(); microblock_2.sign(&secret_key).unwrap(); - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -848,6 +949,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 250, + CHAIN_ID_TESTNET, &contract_addr, "use-trait-contract", "baz", @@ -875,6 +977,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 250, + CHAIN_ID_TESTNET, &contract_addr, "use-trait-contract", "baz", diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index ba88584f393..cfa1653287f 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -94,6 +94,7 @@ lazy_static! { .unwrap(), 0, 10, + CHAIN_ID_TESTNET, "store", STORE_CONTRACT ); @@ -134,6 +135,7 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( sender_nonce: u64, payer_nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -144,6 +146,7 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( sender_nonce, Some(payer_nonce), tx_fee, + chain_id, anchor_mode, version, ) @@ -154,12 +157,14 @@ pub fn serialize_sign_standard_single_sig_tx( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, ) -> Vec { serialize_sign_standard_single_sig_tx_anchor_mode( payload, sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OnChainOnly, ) } @@ -169,6 +174,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, ) -> Vec { serialize_sign_standard_single_sig_tx_anchor_mode_version( @@ -176,6 +182,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode( sender, nonce, tx_fee, + chain_id, anchor_mode, TransactionVersion::Testnet, ) @@ -186,6 +193,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -196,6 +204,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( nonce, None, tx_fee, + chain_id, anchor_mode, version, ) @@ -208,6 +217,7 @@ pub fn serialize_sign_tx_anchor_mode_version( sender_nonce: u64, payer_nonce: Option, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -234,7 +244,7 @@ pub fn serialize_sign_tx_anchor_mode_version( let mut unsigned_tx = StacksTransaction::new(version, auth, payload); unsigned_tx.anchor_mode = anchor_mode; unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = CHAIN_ID_TESTNET; + unsigned_tx.chain_id = chain_id; let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); tx_signer.sign_origin(sender).unwrap(); @@ -255,6 +265,7 @@ pub fn make_contract_publish_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, version: Option, @@ -265,23 +276,33 @@ pub fn make_contract_publish_versioned( let payload = TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_contract_publish( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, ) -> Vec { - make_contract_publish_versioned(sender, nonce, tx_fee, contract_name, contract_content, None) + make_contract_publish_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) } pub fn make_contract_publish_microblock_only_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, version: Option, @@ -297,6 +318,7 @@ pub fn make_contract_publish_microblock_only_versioned( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -305,6 +327,7 @@ pub fn make_contract_publish_microblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, ) -> Vec { @@ -312,6 +335,7 @@ pub fn make_contract_publish_microblock_only( sender, nonce, tx_fee, + chain_id, contract_name, contract_content, None, @@ -392,12 +416,13 @@ pub fn make_stacks_transfer( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_sponsored_stacks_transfer_on_testnet( @@ -406,6 +431,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( sender_nonce: u64, payer_nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { @@ -418,6 +444,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( sender_nonce, payer_nonce, tx_fee, + chain_id, TransactionAnchorMode::OnChainOnly, TransactionVersion::Testnet, ) @@ -427,6 +454,7 @@ pub fn make_stacks_transfer_mblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { @@ -437,6 +465,7 @@ pub fn make_stacks_transfer_mblock_only( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -445,22 +474,24 @@ pub fn make_poison( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, header_1: StacksMicroblockHeader, header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } -pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> Vec { +pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_addr: &StacksAddress, contract_name: &str, function_name: &str, @@ -476,13 +507,14 @@ pub fn make_contract_call( function_args: function_args.iter().map(|x| x.clone()).collect(), }; - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_addr: &StacksAddress, contract_name: &str, function_name: &str, @@ -503,6 +535,7 @@ pub fn make_contract_call_mblock_only( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -921,7 +954,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { 1 => { // On round 1, publish the KV contract let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let publish_contract = make_contract_publish(&contract_sk, 0, 10, "store", STORE_CONTRACT); + let publish_contract = make_contract_publish(&contract_sk, 0, 10, CHAIN_ID_TESTNET, "store", STORE_CONTRACT); tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,publish_contract, &ExecutionCost::max_value(), &StacksEpochId::Epoch20, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 52ee1383e83..eb76ac3e7a2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -78,7 +78,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; +use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -848,7 +848,7 @@ pub fn boot_to_epoch_3( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 12_u128, u128::MAX, 1, @@ -862,6 +862,7 @@ pub fn boot_to_epoch_3( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -924,6 +925,7 @@ pub fn boot_to_epoch_3( signer_sk, 0, 300, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), SIGNERS_VOTING_NAME, SIGNERS_VOTING_FUNCTION_NAME, @@ -1008,7 +1010,7 @@ pub fn boot_to_pre_epoch_3_boundary( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 12_u128, u128::MAX, 1, @@ -1022,6 +1024,7 @@ pub fn boot_to_pre_epoch_3_boundary( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1084,6 +1087,7 @@ pub fn boot_to_pre_epoch_3_boundary( signer_sk, 0, 300, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), SIGNERS_VOTING_NAME, SIGNERS_VOTING_FUNCTION_NAME, @@ -1245,7 +1249,7 @@ pub fn setup_epoch_3_reward_set( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, lock_period, u128::MAX, 1, @@ -1258,6 +1262,7 @@ pub fn setup_epoch_3_reward_set( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1527,7 +1532,14 @@ fn simple_neon_integration() { } // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1787,7 +1799,14 @@ fn flash_blocks_on_epoch_3() { } // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -2025,8 +2044,14 @@ fn mine_multiple_per_tenure_integration() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -2277,8 +2302,14 @@ fn multiple_miners() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(20, || { @@ -2484,7 +2515,7 @@ fn correct_burn_outs() { &sender_signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 1_u128, u128::MAX, 1, @@ -2496,6 +2527,7 @@ fn correct_burn_outs() { &account.0, account.2.nonce, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -2837,6 +2869,7 @@ fn block_proposal_api_endpoint() { &account_keys[0], 0, 100, + conf.burnchain.chain_id, &to_addr(&account_keys[1]).into(), 10000, ); @@ -3514,7 +3547,7 @@ fn follower_bootup() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -3574,8 +3607,14 @@ fn follower_bootup() { let sender_nonce = account .nonce .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); last_nonce = Some(sender_nonce); @@ -3861,7 +3900,7 @@ fn follower_bootup_across_multiple_cycles() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -4153,6 +4192,7 @@ fn burn_ops_integration_test() { &signer_sk_1, 1, 500, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "set-signer-key-authorization", @@ -4355,8 +4395,14 @@ fn burn_ops_integration_test() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, 200, &stacker_addr_1.into(), 10000); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + 200, + naka_conf.burnchain.chain_id, + &stacker_addr_1.into(), + 10000, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -4766,8 +4812,14 @@ fn forked_tenure_is_ignored() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in Tenure C to mine a second block"); @@ -4965,6 +5017,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -5050,6 +5103,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -5066,6 +5120,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -5173,8 +5228,14 @@ fn check_block_heights() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -5465,8 +5526,14 @@ fn nakamoto_attempt_time() { let mut sender_nonce = account.nonce; for _ in 0..txs_per_block { - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &recipient, + amount, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); } @@ -5558,8 +5625,14 @@ fn nakamoto_attempt_time() { 'submit_txs: loop { let acct = &mut account[acct_idx]; for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let transfer_tx = - make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + let transfer_tx = make_stacks_transfer( + &acct.privk, + acct.nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &recipient, + amount, + ); submit_tx(&http_origin, &transfer_tx); tx_total_size += transfer_tx.len(); tx_count += 1; @@ -5709,6 +5782,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract_name, contract, ); @@ -5740,6 +5814,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, tx_fee, + naka_conf.burnchain.chain_id, &sender_addr, contract_name, "bar", @@ -5828,6 +5903,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, tx_fee, + naka_conf.burnchain.chain_id, &sender_addr, contract_name, "bar", @@ -6123,8 +6199,14 @@ fn signer_chainstate() { // submit a tx to trigger an intermediate block let sender_nonce = i; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); let timer = Instant::now(); @@ -6638,7 +6720,14 @@ fn continue_tenure_extend() { .unwrap(); // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -6870,6 +6959,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -6913,6 +7003,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -6930,6 +7021,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7025,8 +7117,14 @@ fn check_block_times() { info!("Mining Nakamoto block"); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7106,8 +7204,14 @@ fn check_block_times() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -7353,6 +7457,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -7391,6 +7496,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -7423,6 +7529,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7529,8 +7636,14 @@ fn check_block_info() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7636,8 +7749,14 @@ fn check_block_info() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -7862,6 +7981,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -7900,6 +8020,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -7925,6 +8046,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7945,8 +8067,14 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7972,8 +8100,14 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -8234,7 +8368,7 @@ fn mock_mining() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -8299,8 +8433,14 @@ fn mock_mining() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -8679,8 +8819,14 @@ fn v3_signer_api_endpoint() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra stacks block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(30, || { @@ -8834,6 +8980,7 @@ fn skip_mining_long_tx() { &sender_2_sk, 0, 9_000, + naka_conf.burnchain.chain_id, "large_contract", &format!( "(define-constant INP_LIST (list {input_list})) @@ -8857,8 +9004,14 @@ fn skip_mining_long_tx() { TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(false); } else { - let transfer_tx = - make_stacks_transfer(&sender_1_sk, i - 1, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_1_sk, + i - 1, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(30, || { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 14ec15447fe..d6373a3b444 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1517,6 +1517,7 @@ fn deep_contract() { &spender_sk, 0, 1000, + conf.burnchain.chain_id, "test-publish", &exceeds_stack_depth_list, ); @@ -1694,11 +1695,25 @@ fn liquid_ustx_integration() { let _sort_height = channel.get_sortitions_processed(); - let publish = make_contract_publish(&spender_sk, 0, 1000, "caller", caller_src); + let publish = make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ); let replaced_txid = submit_tx(&http_origin, &publish); - let publish = make_contract_publish(&spender_sk, 0, 1100, "caller", caller_src); + let publish = make_contract_publish( + &spender_sk, + 0, + 1100, + conf.burnchain.chain_id, + "caller", + caller_src, + ); submit_tx(&http_origin, &publish); let dropped_txs = test_observer::get_memtx_drops(); @@ -1715,6 +1730,7 @@ fn liquid_ustx_integration() { &spender_sk, 1, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute", @@ -2274,6 +2290,7 @@ fn stx_delegate_btc_integration_test() { &recipient_sk, 0, 293, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "delegate-stack-stx", @@ -2562,6 +2579,7 @@ fn stack_stx_burn_op_test() { &signer_sk_1, 0, 500, + conf.burnchain.chain_id, &boot_code_addr(false), POX_4_NAME, "set-signer-key-authorization", @@ -2923,6 +2941,7 @@ fn vote_for_aggregate_key_burn_op_test() { &spender_sk, 0, 500, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-4", "stack-stx", @@ -3145,6 +3164,7 @@ fn bitcoind_resubmission_test() { &spender_sk, 0, 100, + conf.burnchain.chain_id, &PrincipalData::from(StacksAddress::burn_address(false)), 1000, ); @@ -3488,12 +3508,24 @@ fn microblock_fork_poison_integration_test() { info!("Test microblock"); let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&second_spender_sk, 0, 1000, &recipient.into(), 1500); + let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &second_spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1500, + ); let second_unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); @@ -3722,7 +3754,14 @@ fn microblock_integration_test() { // okay, let's push a transaction that is marked microblock only! let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); submit_tx(&http_origin, &tx); info!("Try to mine a microblock-only tx"); @@ -3752,12 +3791,24 @@ fn microblock_integration_test() { // push another two transactions that are marked microblock only let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&spender_sk, 1, 1000, &recipient.into(), 1000); + let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&second_spender_sk, 0, 1000, &recipient.into(), 1500); + let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &second_spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1500, + ); let second_unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); @@ -4080,6 +4131,7 @@ fn microblock_integration_test() { &spender_sk, next_nonce, 1000, + conf.burnchain.chain_id, &recipient.into(), 1000, ); @@ -4163,6 +4215,14 @@ fn filter_low_fee_tx_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } + let txs: Vec<_> = spender_sks .iter() .enumerate() @@ -4171,22 +4231,28 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee - make_stacks_transfer(&spender_sk, 0, 1000 + (ix as u64), &recipient.into(), 1000) + make_stacks_transfer( + &spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) } else { // high-fee - make_stacks_transfer(&spender_sk, 0, 2000 + (ix as u64), &recipient.into(), 1000) + make_stacks_transfer( + &spender_sk, + 0, + 2000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) } }) .collect(); - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -4257,15 +4323,6 @@ fn filter_long_runtime_tx_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - make_stacks_transfer(&spender_sk, 0, 1000 + (ix as u64), &recipient.into(), 1000) - }) - .collect(); - let (mut conf, _) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { @@ -4278,6 +4335,22 @@ fn filter_long_runtime_tx_integration_test() { conf.miner.first_attempt_time_ms = 0; conf.miner.subsequent_attempt_time_ms = 0; + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + make_stacks_transfer( + &spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + }) + .collect(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -4343,8 +4416,6 @@ fn miner_submit_twice() { (define-private (bar) (foo 56)) "; - let tx_1 = make_contract_publish(&spender_sk, 0, 50_000, "first-contract", contract_content); - let tx_2 = make_contract_publish(&spender_sk, 1, 50_000, "second-contract", contract_content); let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -4357,6 +4428,23 @@ fn miner_submit_twice() { conf.miner.first_attempt_time_ms = 20; conf.miner.subsequent_attempt_time_ms = 30_000; + let tx_1 = make_contract_publish( + &spender_sk, + 0, + 50_000, + conf.burnchain.chain_id, + "first-contract", + contract_content, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 50_000, + conf.burnchain.chain_id, + "second-contract", + contract_content, + ); + // note: this test depends on timing of how long it takes to assemble a block, // but it won't flake if the miner behaves correctly: a correct miner should // always be able to mine both transactions by the end of this test. an incorrect @@ -4435,18 +4523,28 @@ fn size_check_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + // make a bunch of txs that will only fit one per block. let txs: Vec<_> = spender_sks .iter() .enumerate() .map(|(ix, spender_sk)| { if ix % 2 == 0 { - make_contract_publish(spender_sk, 0, 1049230, "large-0", &giant_contract) + make_contract_publish( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ) } else { let tx = make_contract_publish_microblock_only( spender_sk, 0, 1049230, + conf.burnchain.chain_id, "large-0", &giant_contract, ); @@ -4457,8 +4555,6 @@ fn size_check_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -4603,6 +4699,8 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + let txs: Vec> = spender_sks .iter() .enumerate() @@ -4613,6 +4711,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { spender_sk, 0, 1100000, + conf.burnchain.chain_id, "large-0", &giant_contract, )] @@ -4623,6 +4722,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { spender_sk, i as u64, 1100000, + conf.burnchain.chain_id, &format!("small-{}", i), &small_contract, ); @@ -4633,8 +4733,6 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -4810,20 +4908,6 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec<_> = spender_sks - .iter() - .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 600000, - "small", - &small_contract, - ); - tx - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -4843,6 +4927,21 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let txs: Vec<_> = spender_sks + .iter() + .map(|spender_sk| { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + tx + }) + .collect(); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -4997,20 +5096,6 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec> = spender_sks - .iter() - .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 1149230, - "small", - &small_contract, - ); - tx - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -5027,6 +5112,21 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; + let txs: Vec> = spender_sks + .iter() + .map(|spender_sk| { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 1149230, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + tx + }) + .collect(); + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); @@ -5160,6 +5260,27 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let spender_addrs_c32: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 15000; + conf.miner.microblock_attempt_time_ms = 120_000; + + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; + conf.burnchain.epochs = Some(epochs); + let txs: Vec> = spender_sks .iter() .enumerate() @@ -5170,6 +5291,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { spender_sk, 0, 1049230, + conf.burnchain.chain_id, &format!("large-{}", ix), &format!(" ;; a single one of these transactions consumes over half the runtime budget @@ -5224,6 +5346,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { spender_sk, i as u64, 210000, + conf.burnchain.chain_id, &format!("small-{}-{}", ix, i), &format!(" ;; a single one of these transactions consumes over half the runtime budget @@ -5276,27 +5399,6 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 15000; - conf.miner.microblock_attempt_time_ms = 120_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -5514,7 +5616,14 @@ fn block_replay_integration_test() { assert_eq!(account.nonce, 0); let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer(&spender_sk, 0, 1000, &recipient.into(), 1000); + let tx = make_stacks_transfer( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); submit_tx(&http_origin, &tx); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -5652,9 +5761,30 @@ fn cost_voting_integration() { assert_eq!(res.nonce, 0); let transactions = vec![ - make_contract_publish(&spender_sk, 0, 1000, "cost-definer", cost_definer_src), - make_contract_publish(&spender_sk, 1, 1000, "caller", caller_src), - make_contract_publish(&spender_sk, 2, 1000, "voter", power_vote_src), + make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "cost-definer", + cost_definer_src, + ), + make_contract_publish( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ), + make_contract_publish( + &spender_sk, + 2, + 1000, + conf.burnchain.chain_id, + "voter", + power_vote_src, + ), ]; for tx in transactions.into_iter() { @@ -5668,6 +5798,7 @@ fn cost_voting_integration() { &spender_sk, 3, 1000, + conf.burnchain.chain_id, &spender_addr, "voter", "propose-vote-confirm", @@ -5678,6 +5809,7 @@ fn cost_voting_integration() { &spender_sk, 4, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute-2", @@ -5729,6 +5861,7 @@ fn cost_voting_integration() { &spender_sk, 5, 1000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "cost-voting", "confirm-miners", @@ -5779,6 +5912,7 @@ fn cost_voting_integration() { &spender_sk, 6, 1000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "cost-voting", "confirm-miners", @@ -5823,6 +5957,7 @@ fn cost_voting_integration() { &spender_sk, 7, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute-2", @@ -5884,11 +6019,6 @@ fn mining_events_integration_test() { let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let addr_2 = to_addr(&spender_sk_2); - let tx = make_contract_publish(&spender_sk, 0, 600000, "small", &small_contract); - let tx_2 = make_contract_publish(&spender_sk, 1, 610000, "small", &small_contract); - let mb_tx = - make_contract_publish_microblock_only(&spender_sk_2, 0, 620000, "small", &small_contract); - let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -5907,6 +6037,31 @@ fn mining_events_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let tx = make_contract_publish( + &spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 610000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let mb_tx = make_contract_publish_microblock_only( + &spender_sk_2, + 0, + 620000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + test_observer::spawn(); test_observer::register( &mut conf, @@ -6146,15 +6301,6 @@ fn block_limit_hit_integration_test() { let third_spender_sk = StacksPrivateKey::new(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - // included in first block - let tx = make_contract_publish(&spender_sk, 0, 555_000, "over", &oversize_contract_src); - // contract limit hit; included in second block - let tx_2 = make_contract_publish(&spender_sk, 1, 555_000, "over-2", &oversize_contract_src); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish(&second_spender_sk, 0, 150_000, "max", &max_contract_src); - // included in first block - let tx_4 = make_stacks_transfer(&third_spender_sk, 0, 180, &PrincipalData::from(addr), 100); - let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -6177,6 +6323,43 @@ fn block_limit_hit_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // included in first block + let tx = make_contract_publish( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6330,39 +6513,6 @@ fn microblock_limit_hit_integration_test() { let third_spender_sk = StacksPrivateKey::new(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - // included in the first block - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 555_000, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 555_000, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish_microblock_only( - &second_spender_sk, - 0, - 150_000, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer_mblock_only( - &third_spender_sk, - 0, - 180, - &PrincipalData::from(addr), - 100, - ); - let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -6428,6 +6578,43 @@ fn microblock_limit_hit_integration_test() { ]); conf.burnchain.pox_2_activation = Some(10_003); + // included in the first block + let tx = make_contract_publish_microblock_only( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish_microblock_only( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish_microblock_only( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer_mblock_only( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6569,10 +6756,6 @@ fn block_large_tx_integration_test() { let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); - // higher fee for tx means it will get mined first - let tx = make_contract_publish(&spender_sk, 0, 671_000, "small", &small_contract_src); - let tx_2 = make_contract_publish(&spender_sk, 1, 670_000, "over", &oversize_contract_src); - let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6593,6 +6776,24 @@ fn block_large_tx_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // higher fee for tx means it will get mined first + let tx = make_contract_publish( + &spender_sk, + 0, + 671_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -6691,21 +6892,6 @@ fn microblock_large_tx_integration_test_FLAKY() { let spender_sk = StacksPrivateKey::new(); let addr = to_addr(&spender_sk); - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 150_000, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 670_000, - "over", - &oversize_contract_src, - ); - let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); @@ -6728,6 +6914,23 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; + let tx = make_contract_publish_microblock_only( + &spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish_microblock_only( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -6981,6 +7184,7 @@ fn pox_integration_test() { &spender_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7096,6 +7300,7 @@ fn pox_integration_test() { &spender_2_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7119,6 +7324,7 @@ fn pox_integration_test() { &spender_3_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7461,6 +7667,7 @@ fn atlas_integration_test() { &user_1, 0, 260, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-preorder", @@ -7520,6 +7727,7 @@ fn atlas_integration_test() { &user_1, 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-reveal", @@ -7582,6 +7790,7 @@ fn atlas_integration_test() { &user_1, 2, 500, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -7699,6 +7908,7 @@ fn atlas_integration_test() { }; let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let chain_id = conf_follower_node.burnchain.chain_id; let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); eprintln!("Chain bootstrapped..."); @@ -7777,6 +7987,7 @@ fn atlas_integration_test() { &user_1, 2 + i, 500, + chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -8245,6 +8456,7 @@ fn atlas_stress_integration_test() { &user_1, 0, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-preorder", @@ -8304,6 +8516,7 @@ fn atlas_stress_integration_test() { &user_1, 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-reveal", @@ -8392,6 +8605,7 @@ fn atlas_stress_integration_test() { &user_1, 2 + (batch_size * i + j) as u64, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -8461,6 +8675,7 @@ fn atlas_stress_integration_test() { &user_1, 2 + (batch_size as u64) * (batches as u64), 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-ready", @@ -8521,6 +8736,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 0, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-preorder", @@ -8580,6 +8796,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-register", @@ -8643,6 +8860,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 2, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-update", @@ -8705,6 +8923,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 3, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-renewal", @@ -8960,6 +9179,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value &spender_sk, 0, 110000, + conf.burnchain.chain_id, "increment-contract", &max_contract_src, ), @@ -8977,6 +9197,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value &spender_sk, i, // nonce i * 100000, // payment + conf.burnchain.chain_id, &spender_addr.into(), "increment-contract", "increment-many", @@ -9172,15 +9393,27 @@ fn use_latest_tip_integration_test() { // Make microblock with two transactions. let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let transfer_tx = - make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let transfer_tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let caller_src = " (define-public (execute) (ok stx-liquid-supply)) "; - let publish_tx = - make_contract_publish_microblock_only(&spender_sk, 1, 1000, "caller", caller_src); + let publish_tx = make_contract_publish_microblock_only( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ); let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); @@ -9527,6 +9760,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_1, 0, (tx_edge_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-edge", &tx_edge_body, ); @@ -9544,6 +9778,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -9561,6 +9796,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -9770,6 +10006,7 @@ fn test_problematic_blocks_are_not_mined() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -9787,6 +10024,7 @@ fn test_problematic_blocks_are_not_mined() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10123,6 +10361,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10139,6 +10378,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10518,6 +10758,7 @@ fn test_problematic_microblocks_are_not_mined() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10536,6 +10777,7 @@ fn test_problematic_microblocks_are_not_mined() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10898,6 +11140,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10916,6 +11159,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -11389,6 +11633,7 @@ enum TxChainStrategy { pub fn make_expensive_tx_chain( privk: &StacksPrivateKey, fee_plus: u64, + chain_id: u32, mblock_only: bool, ) -> Vec> { let addr = to_addr(&privk); @@ -11403,6 +11648,7 @@ pub fn make_expensive_tx_chain( privk, nonce, 1049230 + nonce + fee_plus, + chain_id, &contract_name, &make_runtime_sized_contract(256, nonce, &addr_prefix), ) @@ -11411,6 +11657,7 @@ pub fn make_expensive_tx_chain( privk, nonce, 1049230 + nonce + fee_plus, + chain_id, &contract_name, &make_runtime_sized_contract(256, nonce, &addr_prefix), ) @@ -11423,6 +11670,7 @@ pub fn make_expensive_tx_chain( pub fn make_random_tx_chain( privk: &StacksPrivateKey, fee_plus: u64, + chain_id: u32, mblock_only: bool, ) -> Vec> { let addr = to_addr(&privk); @@ -11448,6 +11696,7 @@ pub fn make_random_tx_chain( privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), ) @@ -11456,6 +11705,7 @@ pub fn make_random_tx_chain( privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), ) @@ -11465,7 +11715,7 @@ pub fn make_random_tx_chain( chain } -fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64) -> Vec> { +fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { let addr = to_addr(&privk); let mut chain = vec![]; @@ -11488,6 +11738,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64) -> Vec> privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(1, nonce, &addr_prefix), ); @@ -11547,6 +11798,8 @@ fn test_competing_miners_build_on_same_chain( confs.push(conf); } + let chain_id = confs[0].burnchain.chain_id; + let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); for i in 1..num_miners { let chain_id = confs[0].burnchain.chain_id; @@ -11674,8 +11927,12 @@ fn test_competing_miners_build_on_same_chain( .iter() .enumerate() .map(|(i, pk)| match chain_strategy { - TxChainStrategy::Expensive => make_expensive_tx_chain(pk, (25 * i) as u64, mblock_only), - TxChainStrategy::Random => make_random_tx_chain(pk, (25 * i) as u64, mblock_only), + TxChainStrategy::Expensive => { + make_expensive_tx_chain(pk, (25 * i) as u64, chain_id, mblock_only) + } + TxChainStrategy::Random => { + make_random_tx_chain(pk, (25 * i) as u64, chain_id, mblock_only) + } }) .collect(); let mut cnt = 0; @@ -11755,6 +12012,7 @@ fn test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain() { #[ignore] fn microblock_miner_multiple_attempts() { let (mut conf, miner_account) = neon_integration_test_conf(); + let chain_id = conf.burnchain.chain_id; conf.node.mine_microblocks = true; conf.miner.microblock_attempt_time_ms = 2_000; @@ -11823,7 +12081,7 @@ fn microblock_miner_multiple_attempts() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64)) + .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64, chain_id)) .collect(); let _handle = thread::spawn(move || { @@ -11923,6 +12181,7 @@ fn min_txs() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); @@ -12026,6 +12285,7 @@ fn filter_txs_by_type() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); @@ -12136,6 +12396,7 @@ fn filter_txs_by_origin() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8c48eda5e89..347c7c6c589 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -139,6 +139,7 @@ impl SignerTest { &stacker_sk, 0, 1000, + self.running_nodes.conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1140,8 +1141,14 @@ fn forked_tenure_testing( let start_time = Instant::now(); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in Tenure C to mine a second block"); while mined_blocks.load(Ordering::SeqCst) <= blocks_before { @@ -2083,8 +2090,14 @@ fn end_of_tenure() { let start_height = info.stacks_tip_height; // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -2194,8 +2207,14 @@ fn retry_on_rejection() { let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine the first Nakamoto block"); @@ -2231,8 +2250,14 @@ fn retry_on_rejection() { .load(Ordering::SeqCst); // submit a tx so that the miner will mine a block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -2338,8 +2363,14 @@ fn signers_broadcast_signed_blocks() { // submit a tx so that the miner will mine a blockn let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); debug!("Transaction sent; waiting for block-mining"); @@ -2484,8 +2515,14 @@ fn empty_sortition() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); @@ -3040,8 +3077,14 @@ fn signer_set_rollover() { info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); @@ -3091,6 +3134,7 @@ fn signer_set_rollover() { &stacker_sk, 0, 1000, + signer_test.running_nodes.conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -3163,8 +3207,14 @@ fn signer_set_rollover() { info!("---- Mining a block to verify new signer set -----"); let sender_nonce = 1; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); @@ -3248,8 +3298,14 @@ fn min_gap_between_blocks() { // Submit a tx so that the miner will mine a block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); @@ -3574,8 +3630,14 @@ fn multiple_miners_with_nakamoto_blocks() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -3915,8 +3977,14 @@ fn partial_tenure_fork() { // submit a tx so that the miner will mine an extra block let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); // This may fail if the forking miner wins too many tenures and this account's // nonces get too high (TooMuchChaining) match submit_tx_fallible(&http_origin, &transfer_tx) { @@ -4100,8 +4168,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let blocks_before = mined_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); wait_for(short_timeout_secs, || { @@ -4143,8 +4217,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} to mine block N+1"); @@ -4171,8 +4251,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(Vec::new()); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1'"); @@ -4271,8 +4357,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); @@ -4325,8 +4417,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); @@ -4381,8 +4479,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .replace(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); wait_for(30, || { @@ -4474,8 +4578,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); @@ -4516,8 +4626,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { test_observer::clear(); // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); @@ -4684,8 +4800,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); @@ -4727,8 +4849,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; let tx = submit_tx(&http_origin, &transfer_tx); @@ -4890,8 +5018,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } // Induce block N+2 to get mined - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+2"); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index f7089c3f33c..aa620d349b3 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -206,7 +206,14 @@ fn test_stackerdb_load_store() { let http_origin = format!("http://{}", &conf.node.rpc_bind); eprintln!("Send contract-publish..."); - let tx = make_contract_publish(&privks[0], 0, 10_000, "hello-world", stackerdb_contract); + let tx = make_contract_publish( + &privks[0], + 0, + 10_000, + conf.burnchain.chain_id, + "hello-world", + stackerdb_contract, + ); submit_tx(&http_origin, &tx); // mine it @@ -336,7 +343,14 @@ fn test_stackerdb_event_observer() { let http_origin = format!("http://{}", &conf.node.rpc_bind); eprintln!("Send contract-publish..."); - let tx = make_contract_publish(&privks[0], 0, 10_000, "hello-world", stackerdb_contract); + let tx = make_contract_publish( + &privks[0], + 0, + 10_000, + conf.burnchain.chain_id, + "hello-world", + stackerdb_contract, + ); submit_tx(&http_origin, &tx); // mine it From e6d54fbe3193d05d809d82594d4e13e229e0c5cd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 14:25:02 -0400 Subject: [PATCH 792/910] test: add integration test verifying `chain_id` config --- .../src/tests/nakamoto_integrations.rs | 332 ++++++++++++++++++ 1 file changed, 332 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index eb76ac3e7a2..4b331121151 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3958,6 +3958,338 @@ fn follower_bootup_across_multiple_cycles() { follower_thread.join().unwrap(); } +/// Boot up a node and a follower with a non-default chain id +#[test] +#[ignore] +fn follower_bootup_custom_chain_id() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.burnchain.chain_id = 0x87654321; + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let mut follower_conf = naka_conf.clone(); + follower_conf.node.miner = false; + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + follower_conf.node.pox_sync_sample_secs = 30; + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + naka_conf.burnchain.chain_id, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_nonce = None; + + debug!( + "follower_bootup: Miner mines interum blocks for tenure {}", + tenure_ix + ); + + // mine the interim blocks + for _ in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("follower_bootup: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; + }; + + let sender_nonce = account + .nonce + .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + last_nonce = Some(sender_nonce); + + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + + debug!("follower_bootup: Miner account: {:?}", &account); + debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + + let now = get_epoch_time_secs(); + while get_epoch_time_secs() < now + 10 { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: Could not get miner chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + if follower_info.burn_block_height < info.burn_block_height { + debug!("follower_bootup: Follower is behind miner's burnchain view"); + thread::sleep(Duration::from_millis(100)); + continue; + } + + if info.stacks_tip == last_tip { + debug!( + "follower_bootup: Miner stacks tip hasn't changed ({})", + &info.stacks_tip + ); + thread::sleep(Duration::from_millis(100)); + continue; + } + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + if blocks_processed > blocks_processed_before { + break; + } + + debug!("follower_bootup: No blocks processed yet"); + thread::sleep(Duration::from_millis(100)); + } + + // compare chain tips + loop { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: failed to load tip info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + if info.stacks_tip == follower_info.stacks_tip { + debug!( + "follower_bootup: Follower has advanced to miner's tip {}", + &info.stacks_tip + ); + } else { + debug!( + "follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", + &info.stacks_tip, follower_info.stacks_tip + ); + } + + last_tip = info.stacks_tip; + break; + } + } + + debug!("follower_bootup: Wait for next block-commit"); + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + debug!("follower_bootup: Block commit submitted"); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + // wait for follower to reach the chain tip + loop { + sleep_ms(1000); + let follower_node_info = get_chain_info(&follower_conf); + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash() + { + break; + } + } + + // Verify both nodes have the correct chain id + let miner_info = get_chain_info(&naka_conf); + assert_eq!(miner_info.network_id, 0x87654321); + + let follower_info = get_chain_info(&follower_conf); + assert_eq!(follower_info.network_id, 0x87654321); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} + #[test] #[ignore] /// Test out various burn operations being processed in Nakamoto. From 7b238b5a5217d19a9e50a0f0206679cf5ce98198 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 15:04:42 -0400 Subject: [PATCH 793/910] chore: address PR feedback and fix failing unit test --- stackslib/src/net/stackerdb/sync.rs | 36 ++++++++++++----------- stackslib/src/net/stackerdb/tests/sync.rs | 9 +++++- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index d6610c20fcd..237f582d262 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -226,24 +226,29 @@ impl StackerDBSync { if self.last_eviction_time + 60 < get_epoch_time_secs() { self.last_eviction_time = get_epoch_time_secs(); if self.replicas.len() > 0 { - eviction_index = Some(thread_rng().gen::() % self.replicas.len()); + eviction_index = Some(thread_rng().gen_range(0..self.replicas.len())); } } - let mut remove_naddr = None; - for (i, naddr) in self.replicas.iter().enumerate() { - if let Some(eviction_index) = eviction_index.as_ref() { - if *eviction_index == i { - debug!( - "{:?}: {}: don't reuse connection for replica {:?}", - network.get_local_peer(), - &self.smart_contract_id, - &naddr, - ); - remove_naddr = Some(naddr.clone()); - continue; - } + let remove_naddr = eviction_index.and_then(|idx| { + let removed = self.replicas.iter().nth(idx).cloned(); + if let Some(naddr) = removed.as_ref() { + debug!( + "{:?}: {}: don't reuse connection for replica {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + ); } + removed + }); + + if let Some(naddr) = remove_naddr { + self.replicas.remove(&naddr); + } + + // retain the remaining replica connections + for naddr in self.replicas.iter() { if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { self.comms.pin_connection(event_id); debug!( @@ -255,9 +260,6 @@ impl StackerDBSync { ); } } - if let Some(naddr) = remove_naddr.take() { - self.replicas.remove(&naddr); - } } // reload from config diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 746a3f09634..5f6e8a7beda 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -199,7 +199,14 @@ fn test_reconnect(network: &mut PeerNetwork) { .expect("FATAL: did not replace stacker dbs"); for (_sc, stacker_db_sync) in stacker_db_syncs.iter_mut() { - stacker_db_sync.connect_begin(network).unwrap(); + match stacker_db_sync.connect_begin(network) { + Ok(x) => {} + Err(net_error::PeerNotConnected) => {} + Err(net_error::NoSuchNeighbor) => {} + Err(e) => { + panic!("Failed to connect_begin: {:?}", &e); + } + } } network.stacker_db_syncs = Some(stacker_db_syncs); From c4b4635da08cc817dd41ea65dce257e1de7c2311 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 11 Oct 2024 20:27:03 -0700 Subject: [PATCH 794/910] Do not issue a BurnchainTipChanged error unless there is a new sortition Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index af539db5b1d..dad7719c44e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1201,11 +1201,14 @@ impl BlockMinerThread { } /// Check if the tenure needs to change -- if so, return a BurnchainTipChanged error + /// The tenure should change if there is a new burnchain tip with a valid sortition fn check_burn_tip_changed(&self, sortdb: &SortitionDB) -> Result<(), NakamotoNodeError> { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash { + if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash + && cur_burn_chain_tip.sortition_id != self.burn_block.sortition_id + { info!("Miner: Cancel block assembly; burnchain tip has changed"); self.globals.counters.bump_missed_tenures(); Err(NakamotoNodeError::BurnchainTipChanged) From 98f187c1aef54a6c706acc95e68bf20c5f042c8d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 13:16:01 -0700 Subject: [PATCH 795/910] feat: update continue_tenure_extend to ensure naka blocks after TenureExtend --- .../src/tests/nakamoto_integrations.rs | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9f..eb0ba196480 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -6506,10 +6506,10 @@ fn continue_tenure_extend() { // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; - let send_fee = 100; + let send_fee = 200; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - send_amt * 2 + send_fee, + (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -6519,6 +6519,7 @@ fn continue_tenure_extend() { ); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + let mut transfer_nonce = 0; test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -6637,7 +6638,8 @@ fn continue_tenure_extend() { .unwrap(); // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = + make_stacks_transfer(&sender_sk, transfer_nonce, send_fee, &recipient, send_amt); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -6694,6 +6696,20 @@ fn continue_tenure_extend() { }) .unwrap(); + // Mine 3 nakamoto tenures + for i in 0..3 { + info!("Triggering Nakamoto blocks after extend ({})", i + 1); + transfer_nonce += 1; + let transfer_tx = + make_stacks_transfer(&sender_sk, transfer_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + wait_for(10, || { + let sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(sender_nonce >= transfer_nonce) + }) + .expect("Timed out waiting for transfer TX to confirm"); + } + info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -6731,7 +6747,9 @@ fn continue_tenure_extend() { let mut tenure_extends = vec![]; let mut tenure_block_founds = vec![]; let mut transfer_tx_included = false; + let mut last_block_had_extend = false; for block in test_observer::get_blocks() { + let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); if raw_tx == &transfer_tx_hex { @@ -6745,12 +6763,21 @@ fn continue_tenure_extend() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); match &parsed.payload { TransactionPayload::TenureChange(payload) => match payload.cause { - TenureChangeCause::Extended => tenure_extends.push(parsed), - TenureChangeCause::BlockFound => tenure_block_founds.push(parsed), + TenureChangeCause::Extended => { + has_extend = true; + tenure_extends.push(parsed); + } + TenureChangeCause::BlockFound => { + if last_block_had_extend { + panic!("Expected a Nakamoto block to happen after tenure extend block"); + } + tenure_block_founds.push(parsed); + } }, _ => {} }; } + last_block_had_extend = has_extend; } assert!( !tenure_extends.is_empty(), From 738db8d1fa0ff020034a191e4026fea41ca34243 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 15:14:06 -0700 Subject: [PATCH 796/910] feat: signer test for naka blocks after TenureExtend --- .../stacks-node/src/nakamoto_node/miner.rs | 6 +- .../src/nakamoto_node/sign_coordinator.rs | 8 +- testnet/stacks-node/src/tests/signer/v0.rs | 125 +++++++++++++++++- 3 files changed, 133 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index dad7719c44e..a27f617fffb 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -383,7 +383,7 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - return Err(e); + continue; } _ => { error!("Error while gathering signatures: {e:?}. Will try mining again."; @@ -1247,7 +1247,9 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash + && burn_chain_tip.sortition_id != check_burn_block.sortition_id + { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index f570009be54..cf0eaa67699 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -251,11 +251,13 @@ impl SignCoordinator { } /// Check if the tenure needs to change - fn check_burn_tip_changed(sortdb: &SortitionDB, consensus_hash: &ConsensusHash) -> bool { + fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != *consensus_hash { + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash + && cur_burn_chain_tip.sortition_id != burn_block.sortition_id + { info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); true } else { @@ -365,7 +367,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + if Self::check_burn_tip_changed(&sortdb, &burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d1ceedfebf6..52427daf463 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -34,6 +34,7 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoC use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; @@ -42,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; +use stacks::util::hash::{hex_bytes, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -4942,6 +4943,128 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_ne!(block_n_2, block_n); } +#[test] +#[ignore] +/// Test that we can mine a tenure extend and then continue mining afterwards. +fn continue_after_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], + ); + let timeout = Duration::from_secs(200); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("------------------------- Mine Normal Tenure -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + info!("------------------------- Extend Tenure -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + // It's possible that we have a pending block commit already. + // Mine two BTC blocks to "flush" this commit. + + let mut blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + for i in 0..2 { + info!( + "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", + i + 1 + ); + + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(60, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for tenure extend block"); + } + + // The last block should have a single instruction in it, the tenure extend + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => {} + _ => panic!("Expected tenure extend transaction, got {:?}", parsed), + }; + + // Verify that the miner can continue mining in the tenure with the tenure extend + info!("------------------------- Mine After Tenure Extend -------------------------"); + let mut sender_nonce = 0; + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + for _ in 0..5 { + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for block proposal"); + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + info!("Block {blocks_processed_before} processed, continuing"); + } + + signer_test.shutdown(); +} + #[test] #[ignore] /// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle From ff8f55d7657695cf656438f2376c7772fbb2839a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 15:43:14 -0700 Subject: [PATCH 797/910] fix: add new signer test to bitcoin-tests --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 9 ++------- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4115118eaf8..7ad1014cdea 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -118,6 +118,7 @@ jobs: - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle + - tests::signer::v0::continue_after_tenure_extend - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 52427daf463..6336cc5d873 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4988,18 +4988,13 @@ fn continue_after_tenure_extend() { // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. - let mut blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - for i in 0..2 { info!( "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", i + 1 ); - blocks_processed_before = coord_channel + let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); @@ -5035,7 +5030,7 @@ fn continue_after_tenure_extend() { // Verify that the miner can continue mining in the tenure with the tenure extend info!("------------------------- Mine After Tenure Extend -------------------------"); let mut sender_nonce = 0; - blocks_processed_before = coord_channel + let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); From db9d22e4463971452793acfa66ec622096237a59 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:32:42 +0200 Subject: [PATCH 798/910] Return `tenure_height` in `v2/info` --- stacks-signer/src/client/mod.rs | 1 + stackslib/src/net/api/getinfo.rs | 46 +++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3fe9f09354e..2cb8155f61a 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -322,6 +322,7 @@ pub(crate) mod tests { stacks_tip_consensus_hash: generate_random_consensus_hash(), unanchored_tip: None, unanchored_seq: Some(0), + tenure_height: None, exit_at_block_height: None, is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 237205f63a8..cae71908490 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -27,11 +27,12 @@ use stacks_common::util::hash::{Hash160, Sha256Sum}; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::core::mempool::MemPoolDB; use crate::net::http::{ parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, }; use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, @@ -81,6 +82,7 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, + pub tenure_height: Option, pub exit_at_block_height: Option, pub is_fully_synced: bool, #[serde(default)] @@ -106,6 +108,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, + coinbase_height: Option, ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( @@ -148,7 +151,7 @@ impl RPCPeerInfoData { stacks_tip_consensus_hash: network.stacks_tip.consensus_hash.clone(), unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, - exit_at_block_height: exit_at_block_height, + exit_at_block_height, is_fully_synced, genesis_chainstate_hash: genesis_chainstate_hash.clone(), node_public_key: Some(public_key_buf), @@ -169,6 +172,7 @@ impl RPCPeerInfoData { .map(|cid| format!("{}", cid)) .collect(), ), + tenure_height: coinbase_height, } } } @@ -217,16 +221,46 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let ibd = node.ibd; - let rpc_peer_info = - node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { - RPCPeerInfoData::from_network( + + let rpc_peer_info: Result = + node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { + let header = self + .get_stacks_chain_tip(&preamble, sortdb, chainstate) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)), + ) + })?; + + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &header.index_block_hash(), + ) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e)), + ) + })?; + + Ok(RPCPeerInfoData::from_network( network, chainstate, rpc_args.exit_at_block_height.clone(), &rpc_args.genesis_chainstate_hash, + coinbase_height, ibd, - ) + )) }); + + let rpc_peer_info = match rpc_peer_info { + Ok(rpc_peer_info) => rpc_peer_info, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&rpc_peer_info)?; From 53018c16e070b776fce5b4b12e2f380b4314de37 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:33:32 +0200 Subject: [PATCH 799/910] Add unit tests --- stackslib/src/net/api/tests/getinfo.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 7d8aeff01c8..2a0ae5eaf93 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -50,7 +50,7 @@ fn test_try_parse_request() { .try_parse_request(&parsed_preamble.expect_request(), &bytes[offset..]) .unwrap(); - // parsed request consumes headers that would not be in a constructed reqeuest + // parsed request consumes headers that would not be in a constructed request parsed_request.clear_headers(); parsed_request.add_header( "X-Canonical-Stacks-Tip-Height".to_string(), @@ -66,7 +66,7 @@ fn test_getinfo_compat() { let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 2423}"#; // they all parse for json_obj in &[ @@ -102,4 +102,6 @@ fn test_try_make_response() { Some(1) ); let resp = response.decode_peer_info().unwrap(); + + assert_eq!(resp.tenure_height, Some(1)); } From f5d818fcdfd85aea005c746b4e278b16fee514f2 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:33:47 +0200 Subject: [PATCH 800/910] Add integration tests --- .../src/tests/nakamoto_integrations.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9f..5c058ffee43 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -5014,6 +5014,11 @@ fn check_block_heights() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); + + // With the first Nakamoto block, the chain tip and the number of tenures + // must be the same (before Nakamoto every block counts as a tenure) + assert_eq!(info.tenure_height.unwrap(), info.stacks_tip_height); + let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; let mut last_tenure_height = last_stacks_block_height as u128; @@ -5145,6 +5150,9 @@ fn check_block_heights() { ); last_tenure_height = bh1; + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + let sbh = heights3 .get("stacks-block-height") .unwrap() @@ -5247,6 +5255,9 @@ fn check_block_heights() { "Tenure height should not have changed" ); + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + let sbh = heights3 .get("stacks-block-height") .unwrap() @@ -5287,6 +5298,12 @@ fn check_block_heights() { "Should have mined 1 + (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!( + info.tenure_height.unwrap(), + block_height_pre_3_0 + tenure_count + ); + coord_channel .lock() .expect("Mutex poisoned") From e415848c46f6e0b576cabf6e8e9b446612cdc2f4 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:34:01 +0200 Subject: [PATCH 801/910] Cleanup unused imports --- stackslib/src/net/api/tests/gettenureinfo.rs | 1 - stackslib/src/net/tests/relay/nakamoto.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/stackslib/src/net/api/tests/gettenureinfo.rs b/stackslib/src/net/api/tests/gettenureinfo.rs index db53a5daca2..51a8a117852 100644 --- a/stackslib/src/net/api/tests/gettenureinfo.rs +++ b/stackslib/src/net/api/tests/gettenureinfo.rs @@ -25,7 +25,6 @@ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use super::test_rpc; -use crate::net::api::getinfo::RPCPeerInfoData; use crate::net::api::tests::TestRPC; use crate::net::api::*; use crate::net::connection::ConnectionOptions; diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index fb9db70d5b6..606f1f3fb2d 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -54,7 +54,6 @@ use crate::chainstate::stacks::tests::{ use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityConnection; use crate::core::*; -use crate::net::api::getinfo::RPCPeerInfoData; use crate::net::asn::*; use crate::net::chat::*; use crate::net::codec::*; From ad16188d029fa82da82c4a46137308322c7e032d Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:34:41 +0200 Subject: [PATCH 802/910] Update OpenAPI specs --- docs/rpc/api/core-node/get-info.example.json | 1 + docs/rpc/api/core-node/get-info.schema.json | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/docs/rpc/api/core-node/get-info.example.json b/docs/rpc/api/core-node/get-info.example.json index afc42e6f686..77ece128c37 100644 --- a/docs/rpc/api/core-node/get-info.example.json +++ b/docs/rpc/api/core-node/get-info.example.json @@ -11,6 +11,7 @@ "stacks_tip": "b1807a2d3f7f8c7922f7c1d60d7c34145ade05d789640dc7dc9ec1021e07bb54", "stacks_tip_consensus_hash": "17f76e597bab45646956f38dd39573085d72cbc0", "unanchored_tip": "0000000000000000000000000000000000000000000000000000000000000000", + "tenure_height": 523, "exit_at_block_height": null, "is_fully_synced": false } diff --git a/docs/rpc/api/core-node/get-info.schema.json b/docs/rpc/api/core-node/get-info.schema.json index 16b560ed5ef..e997a2d19c7 100644 --- a/docs/rpc/api/core-node/get-info.schema.json +++ b/docs/rpc/api/core-node/get-info.schema.json @@ -17,6 +17,7 @@ "stacks_tip", "stacks_tip_consensus_hash", "unanchored_tip", + "tenure_height", "exit_at_block_height", "is_fully_synced" ], @@ -69,6 +70,10 @@ "type": "string", "description": "the latest microblock hash if any microblocks were processed. if no microblock has been processed for the current block, a 000.., hex array is returned" }, + "tenure_height": { + "type": "integer", + "description": "the latest Stacks tenure height" + }, "exit_at_block_height": { "type": "integer", "description": "the block height at which the testnet network will be reset. not applicable for mainnet" From 30354d75b15b5bd0aa3ab89ed0f25f4fce786d2c Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:42:43 +0200 Subject: [PATCH 803/910] Update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ccb9b5cac9..eeb514f6b86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint +- Added `tenure_height` to `/v2/info` endpoint - Added optional `timeout_ms` to `events_observer` configuration ## [2.5.0.0.7] From cc89b148491ea624f04ac264f1a1be7f607aa606 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 08:48:07 -0700 Subject: [PATCH 804/910] Fix occasional port bind error happening in mock mining by checking if in feature == testing and reserving test observer port Signed-off-by: Jacinta Ferrant --- stackslib/src/net/poll.rs | 2 +- testnet/stacks-node/src/tests/mod.rs | 7 +++++- .../src/tests/nakamoto_integrations.rs | 24 +++++++++---------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index bdda12e6d42..0362745f900 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -89,7 +89,7 @@ impl NetworkState { } fn bind_address(addr: &SocketAddr) -> Result { - if !cfg!(test) { + if !cfg!(test) && !cfg!(feature = "testing") { mio_net::TcpListener::bind(addr).map_err(|e| { error!("Failed to bind to {:?}: {:?}", addr, e); net_error::BindError diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index ba88584f393..c054be72450 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -23,6 +23,7 @@ use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; +use neon_integrations::test_observer::EVENT_OBSERVER_PORT; use rand::Rng; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::StacksChainState; @@ -109,11 +110,15 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { + // Note it needs to be +1 because we reserve one port for the event observer assert!( - USED_PORTS.lock().unwrap().len() < range_len, + USED_PORTS.lock().unwrap().len() + 1 < range_len, "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if port == EVENT_OBSERVER_PORT { + continue; + } if insert_new_port(port) { return port; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9f..4cf9736559c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8132,16 +8132,16 @@ fn mock_mining() { let send_amt = 100; let send_fee = 180; - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; - naka_conf.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - naka_conf.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - naka_conf.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - naka_conf.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + naka_conf.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + naka_conf.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + naka_conf.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + naka_conf.node.p2p_address = format!("{localhost}:{node_1_p2p}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( @@ -8221,10 +8221,10 @@ fn mock_mining() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - follower_conf.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - follower_conf.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - follower_conf.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - follower_conf.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + follower_conf.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + follower_conf.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + follower_conf.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + follower_conf.node.p2p_address = format!("{localhost}:{node_2_p2p}"); let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( From d526f07b665759121401bbd035cc48495b44a32a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 09:17:36 -0700 Subject: [PATCH 805/910] Initialize the USED_PORTS with the EVENT_OBSERVER_PORT Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 9 +++++---- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index c054be72450..a9e36c55dfb 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -101,7 +101,11 @@ lazy_static! { } lazy_static! { - static ref USED_PORTS: Mutex> = Mutex::new(HashSet::new()); + static ref USED_PORTS: Mutex> = Mutex::new({ + let mut set = HashSet::new(); + set.insert(EVENT_OBSERVER_PORT); + set + }); } /// Generate a random port number between 1024 and 65534 (inclusive) and insert it into the USED_PORTS set. @@ -116,9 +120,6 @@ pub fn gen_random_port() -> u16 { "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if port == EVENT_OBSERVER_PORT { - continue; - } if insert_new_port(port) { return port; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4cf9736559c..02484405cb1 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; From 2ea393699899af2d4885434125f4a19007400c71 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 09:21:41 -0700 Subject: [PATCH 806/910] Fix range check Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a9e36c55dfb..4393b0ab90d 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -116,7 +116,7 @@ pub fn gen_random_port() -> u16 { loop { // Note it needs to be +1 because we reserve one port for the event observer assert!( - USED_PORTS.lock().unwrap().len() + 1 < range_len, + USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 From daeed4cdeb5db514fb2ce39c10813a3147a1f7e8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 10:03:20 -0700 Subject: [PATCH 807/910] Remove outdated comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 4393b0ab90d..d2d0760b708 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -114,7 +114,6 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { - // Note it needs to be +1 because we reserve one port for the event observer assert!( USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" From 3412534e22194d906be042afb9690c7de17a04e6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 14:42:42 -0400 Subject: [PATCH 808/910] feat: ensure pending payloads are retrieved in order --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8c6d66cb8f4..cd9053caaa0 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -354,7 +354,8 @@ impl EventObserver { fn get_pending_payloads( conn: &Connection, ) -> Result, db_error> { - let mut stmt = conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads")?; + let mut stmt = + conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; let payload_iter = stmt.query_and_then( [], |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { From be32137b097777f9dd80123a10d2bb5fdba8e83a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 15:44:32 -0400 Subject: [PATCH 809/910] test: add test to verify payload resend --- testnet/stacks-node/src/event_dispatcher.rs | 112 ++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index cd9053caaa0..771df60318d 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -321,6 +321,10 @@ impl RewardSetEventPayload { } } +#[cfg(test)] +static TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD: std::sync::Mutex> = + std::sync::Mutex::new(None); + impl EventObserver { fn init_db(db_path: &str) -> Result { let conn = Connection::open(db_path)?; @@ -377,6 +381,16 @@ impl EventObserver { } fn process_pending_payloads(conn: &Connection) { + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: skipping retry of payload"); + return; + } + let pending_payloads = match Self::get_pending_payloads(conn) { Ok(payloads) => payloads, Err(e) => { @@ -2171,4 +2185,102 @@ mod test { rx.recv_timeout(Duration::from_secs(5)) .expect("Server did not receive request in time"); } + + #[test] + fn test_send_payload_with_db_force_restart() { + let port = get_random_port(); + let timeout = Duration::from_secs(3); + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + info!("Starting mock server on port {}", port); + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + let mut _request_holder = None; + while let Ok(mut request) = server.recv() { + attempt += 1; + match attempt { + 1 => { + debug!("Mock server received request attempt 1"); + // Do not reply, forcing the sender to timeout and retry, + // but don't drop the request or it will receive a 500 error, + _request_holder = Some(request); + } + 2 => { + debug!("Mock server received request attempt 2"); + + // Verify the payload + let mut payload = String::new(); + request.as_reader().read_to_string(&mut payload).unwrap(); + let expected_payload = r#"{"key":"value"}"#; + assert_eq!(payload, expected_payload); + + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + } + 3 => { + debug!("Mock server received request attempt 3"); + + // Verify the payload + let mut payload = String::new(); + request.as_reader().read_to_string(&mut payload).unwrap(); + let expected_payload = r#"{"key":"value2"}"#; + assert_eq!(payload, expected_payload); + + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // When we receive attempt 3 (message 1, re-sent message 1, message 2), + // notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + _ => panic!("Unexpected request attempt"), + } + } + }); + + let observer = EventObserver::new( + Some(working_dir.clone()), + format!("127.0.0.1:{}", port), + timeout, + ); + + let payload = json!({"key": "value"}); + let payload2 = json!({"key": "value2"}); + + // Disable retrying so that it sends the payload only once + // and that payload will be ignored by the test server. + TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .replace(true); + + info!("Sending payload 1"); + + // Send the payload + observer.send_payload(&payload, "/test"); + + // Re-enable retrying + TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .replace(false); + + info!("Sending payload 2"); + + // Send another payload + observer.send_payload(&payload2, "/test"); + + // Wait for the server to process the requests + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } From f3562118b92667b8e898ae7d8ea5053716340182 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:47:10 -0700 Subject: [PATCH 810/910] adding some sample configs for signer event_observer --- .../conf/mainnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-signer.toml | 24 ++++++ .../conf/testnet-follower-conf.toml | 15 ++++ testnet/stacks-node/conf/testnet-signer.toml | 80 +++++++++++++++++++ 4 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 testnet/stacks-node/conf/mainnet-signer.toml create mode 100644 testnet/stacks-node/conf/testnet-signer.toml diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 291f3335230..941b3490349 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -11,5 +11,5 @@ peer_host = "127.0.0.1" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] +# timeout_ms = 60_000 diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/testnet/stacks-node/conf/mainnet-signer.toml new file mode 100644 index 00000000000..226fcae806c --- /dev/null +++ b/testnet/stacks-node/conf/mainnet-signer.toml @@ -0,0 +1,24 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +mode = "mainnet" +peer_host = "127.0.0.1" + +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 54814c610c8..80226c5b89b 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -12,6 +12,21 @@ peer_port = 18444 pox_prepare_length = 100 pox_reward_length = 900 +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password + [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" amount = 10000000000000000 diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/testnet/stacks-node/conf/testnet-signer.toml new file mode 100644 index 00000000000..80226c5b89b --- /dev/null +++ b/testnet/stacks-node/conf/testnet-signer.toml @@ -0,0 +1,80 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +mode = "krypton" +peer_host = "bitcoin.regtest.hiro.so" +peer_port = 18444 +pox_prepare_length = 100 +pox_reward_length = 900 + +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password + +[[ustx_balance]] +address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" +amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 56_457 From e2bd98230483a3ee18d03cbfccfc795dbdcb2004 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 14 Oct 2024 17:17:40 -0700 Subject: [PATCH 811/910] feat: add chain_id to signer config --- stacks-signer/src/client/stackerdb.rs | 1 + stacks-signer/src/client/stacks_client.rs | 28 +++++--- stacks-signer/src/config.rs | 71 ++++++++++++++++--- stacks-signer/src/main.rs | 2 +- stacks-signer/src/tests/chainstate.rs | 2 + .../tests/conf/signer-custom-chain-id.toml | 7 ++ .../src/tests/nakamoto_integrations.rs | 3 +- testnet/stacks-node/src/tests/signer/mod.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 1 + 9 files changed, 97 insertions(+), 19 deletions(-) create mode 100644 stacks-signer/src/tests/conf/signer-custom-chain-id.toml diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index b3f6528232b..0fc43350db9 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -255,6 +255,7 @@ mod tests { Some(100_000), None, Some(9000), + None, ); let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); let signer_config = generate_signer_config(&config, 5); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5caf9d3f42e..cae6a210b7f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -45,7 +45,7 @@ use serde::Deserialize; use serde_json::json; use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use stacks_common::consts::CHAIN_ID_MAINNET; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; @@ -99,7 +99,7 @@ impl From<&GlobalConfig> for StacksClient { stacks_address: config.stacks_address, http_origin: format!("http://{}", config.node_host), tx_version: config.network.to_transaction_version(), - chain_id: config.network.to_chain_id(), + chain_id: config.to_chain_id(), stacks_node_client: reqwest::blocking::Client::new(), mainnet: config.network.is_mainnet(), auth_password: config.auth_password.clone(), @@ -114,6 +114,7 @@ impl StacksClient { node_host: String, auth_password: String, mainnet: bool, + chain_id: u32, ) -> Self { let pubkey = StacksPublicKey::from_private(&stacks_private_key); let tx_version = if mainnet { @@ -121,11 +122,6 @@ impl StacksClient { } else { TransactionVersion::Testnet }; - let chain_id = if mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET - }; let stacks_address = StacksAddress::p2pkh(mainnet, &pubkey); Self { stacks_private_key, @@ -145,7 +141,13 @@ impl StacksClient { node_host: String, auth_password: String, ) -> Result { - let mut stacks_client = Self::new(stacks_private_key, node_host, auth_password, true); + let mut stacks_client = Self::new( + stacks_private_key, + node_host, + auth_password, + true, + CHAIN_ID_MAINNET, + ); let pubkey = StacksPublicKey::from_private(&stacks_private_key); let info = stacks_client.get_peer_info()?; if info.network_id == CHAIN_ID_MAINNET { @@ -154,7 +156,7 @@ impl StacksClient { stacks_client.tx_version = TransactionVersion::Mainnet; } else { stacks_client.mainnet = false; - stacks_client.chain_id = CHAIN_ID_TESTNET; + stacks_client.chain_id = info.network_id; stacks_client.tx_version = TransactionVersion::Testnet; } stacks_client.stacks_address = StacksAddress::p2pkh(stacks_client.mainnet, &pubkey); @@ -1219,4 +1221,12 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), reward_cycle as u128); } + + #[test] + fn get_chain_id_from_config() { + let mock = MockServerClient::from_config( + GlobalConfig::load_from_file("./src/tests/conf/signer-custom-chain-id.toml").unwrap(), + ); + assert_eq!(mock.client.chain_id, 0x80000100); + } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 9f72e171e56..375ed1a171d 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -76,14 +76,6 @@ impl std::fmt::Display for Network { } impl Network { - /// Converts a Network enum variant to a corresponding chain id - pub const fn to_chain_id(&self) -> u32 { - match self { - Self::Mainnet => CHAIN_ID_MAINNET, - Self::Testnet | Self::Mocknet => CHAIN_ID_TESTNET, - } - } - /// Convert a Network enum variant to a corresponding address version pub const fn to_address_version(&self) -> u8 { match self { @@ -163,6 +155,8 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// An optional custom Chain ID + chain_id: Option, } /// Internal struct for loading up the config file @@ -190,6 +184,8 @@ struct RawConfigFile { pub first_proposal_burn_block_timing_secs: Option, /// How much time to wait for a miner to propose a block following a sortition in milliseconds pub block_proposal_timeout_ms: Option, + /// An optional custom Chain ID + pub chain_id: Option, } impl RawConfigFile { @@ -278,6 +274,7 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, + chain_id: raw_data.chain_id, }) } } @@ -308,6 +305,7 @@ impl GlobalConfig { Some(endpoint) => endpoint.to_string(), None => "None".to_string(), }; + let chain_id = format!("{:x}", self.to_chain_id()); format!( r#" Stacks node host: {node_host} @@ -315,6 +313,7 @@ Signer endpoint: {endpoint} Stacks address: {stacks_address} Public key: {public_key} Network: {network} +Chain ID: 0x{chain_id} Database path: {db_path} Metrics endpoint: {metrics_endpoint} "#, @@ -329,6 +328,14 @@ Metrics endpoint: {metrics_endpoint} metrics_endpoint = metrics_endpoint, ) } + + /// Get the chain ID for the network + pub fn to_chain_id(&self) -> u32 { + self.chain_id.unwrap_or_else(|| match self.network { + Network::Mainnet => CHAIN_ID_MAINNET, + Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, + }) + } } impl Display for GlobalConfig { @@ -356,6 +363,7 @@ pub fn build_signer_config_tomls( max_tx_fee_ustx: Option, tx_fee_ustx: Option, mut metrics_port_start: Option, + chain_id: Option, ) -> Vec { let mut signer_config_tomls = vec![]; @@ -421,6 +429,15 @@ metrics_endpoint = "{metrics_endpoint}" metrics_port_start = Some(metrics_port + 1); } + if let Some(chain_id) = chain_id { + signer_config_toml = format!( + r#" +{signer_config_toml} +chain_id = {chain_id} +"# + ) + } + signer_config_tomls.push(signer_config_toml); } @@ -453,6 +470,7 @@ mod tests { None, None, Some(4000), + None, ); let config = @@ -460,6 +478,8 @@ mod tests { assert_eq!(config.auth_password, "melon"); assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); + let global_config = GlobalConfig::try_from(config).unwrap(); + assert_eq!(global_config.to_chain_id(), CHAIN_ID_TESTNET); } #[test] @@ -473,8 +493,10 @@ Signer endpoint: 127.0.0.1:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet +Chain ID: 0x80000000 Database path: :memory: Metrics endpoint: 0.0.0.0:9090 +Chain ID: 2147483648 "#; let expected_str_v6 = r#" @@ -483,6 +505,7 @@ Signer endpoint: [::1]:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet +Chain ID: 0x80000000 Database path: :memory: Metrics endpoint: 0.0.0.0:9090 "#; @@ -531,5 +554,37 @@ db_path = ":memory:" ); let config = GlobalConfig::load_from_str(&config_toml).unwrap(); assert_eq!(config.stacks_address.to_string(), expected_addr); + assert_eq!(config.to_chain_id(), CHAIN_ID_MAINNET); + } + + #[test] + fn test_custom_chain_id() { + let pk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let node_host = "localhost"; + let network = Network::Testnet; + let password = "melon"; + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + None, + Some(4000), + Some(0x80000100), + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + assert_eq!(config.chain_id, Some(0x80000100)); + let global_config = GlobalConfig::try_from(config).unwrap(); + assert_eq!(global_config.to_chain_id(), 0x80000100); } } diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 520d4552584..bb680aae0bc 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -121,7 +121,7 @@ fn handle_generate_stacking_signature( &private_key, // args.reward_cycle.into(), args.method.topic(), - config.network.to_chain_id(), + config.to_chain_id(), args.period.into(), args.max_amount, args.auth_id, diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 432325daf25..886480f0630 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -32,6 +32,7 @@ use clarity::util::vrf::VRFProof; use libsigner::BlockProposal; use slog::slog_info; use stacks_common::bitvec::BitVec; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::info; use stacks_common::types::chainstate::{ ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -96,6 +97,7 @@ fn setup_test_environment( SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).to_string(), "FOO".into(), false, + CHAIN_ID_TESTNET, ); let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; diff --git a/stacks-signer/src/tests/conf/signer-custom-chain-id.toml b/stacks-signer/src/tests/conf/signer-custom-chain-id.toml new file mode 100644 index 00000000000..1d1de36f1f6 --- /dev/null +++ b/stacks-signer/src/tests/conf/signer-custom-chain-id.toml @@ -0,0 +1,7 @@ +stacks_private_key = "126e916e77359ccf521e168feea1fcb9626c59dc375cae00c7464303381c7dff01" +node_host = "127.0.0.1:20444" +endpoint = "localhost:30001" +network = "testnet" +auth_password = "12345" +db_path = ":memory:" +chain_id = 0x80000100 diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 690a47f71db..93f4ac41065 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -78,7 +78,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -6400,6 +6400,7 @@ fn signer_chainstate() { .clone() .unwrap_or("".into()), false, + CHAIN_ID_TESTNET, ); wait_for_first_naka_block_commit(60, &commits_submitted); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5dcbc9a16a9..2e67234285a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -173,6 +173,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = (0..new_num_signers) From 8e1e73161e015ff7a6e4f77e78d3655222d47df3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 21:24:43 -0400 Subject: [PATCH 812/910] test: improve fault injection in event dispatcher --- testnet/stacks-node/src/event_dispatcher.rs | 42 ++++++++++++--------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 771df60318d..43714f3573d 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -322,8 +322,7 @@ impl RewardSetEventPayload { } #[cfg(test)] -static TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD: std::sync::Mutex> = - std::sync::Mutex::new(None); +static TEST_EVENT_OBSERVER_SKIP_RETRY: std::sync::Mutex> = std::sync::Mutex::new(None); impl EventObserver { fn init_db(db_path: &str) -> Result { @@ -381,16 +380,6 @@ impl EventObserver { } fn process_pending_payloads(conn: &Connection) { - #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD - .lock() - .unwrap() - .unwrap_or(false) - { - warn!("Fault injection: skipping retry of payload"); - return; - } - let pending_payloads = match Self::get_pending_payloads(conn) { Ok(payloads) => payloads, Err(e) => { @@ -405,6 +394,17 @@ impl EventObserver { for (id, url, payload, timeout_ms) in pending_payloads { let timeout = Duration::from_millis(timeout_ms); Self::send_payload_directly(&payload, &url, timeout); + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: delete_payload"); + return; + } + if let Err(e) = Self::delete_payload(conn, id) { error!( "Event observer: failed to delete pending payload from database"; @@ -459,6 +459,17 @@ impl EventObserver { ); } } + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: skipping retry of payload"); + return; + } + sleep(backoff); backoff *= 2; } @@ -2258,10 +2269,7 @@ mod test { // Disable retrying so that it sends the payload only once // and that payload will be ignored by the test server. - TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD - .lock() - .unwrap() - .replace(true); + TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(true); info!("Sending payload 1"); @@ -2269,7 +2277,7 @@ mod test { observer.send_payload(&payload, "/test"); // Re-enable retrying - TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + TEST_EVENT_OBSERVER_SKIP_RETRY .lock() .unwrap() .replace(false); From d253dd7f325ac1f2064aa58482430d186c259f78 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 15 Oct 2024 10:10:11 -0400 Subject: [PATCH 813/910] feat: support custom chain ids in blockstack-cli `--testnet` will use the default testnet chain id and `--testnet=0x80000100` will set it to `0x80000100`. --- stackslib/src/blockstack_cli.rs | 75 +++++++++++++++++++++++++++------ 1 file changed, 63 insertions(+), 12 deletions(-) diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index 6fb9f45ed60..dbecb0393d2 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -73,7 +73,9 @@ For usage information on those methods, call `blockstack-cli [method] -h` `blockstack-cli` accepts flag options as well: - --testnet instruct the transaction generator to use a testnet version byte instead of MAINNET (default) + --testnet[=chain-id] + instruct the transaction generator to use a testnet version byte instead of MAINNET (default) + optionally, you can specify a custom chain ID to use for the transaction "; @@ -185,6 +187,7 @@ enum CliError { ClarityGeneralError(ClarityError), Message(String), Usage, + InvalidChainId(std::num::ParseIntError), } impl std::error::Error for CliError { @@ -204,6 +207,7 @@ impl std::fmt::Display for CliError { CliError::ClarityGeneralError(e) => write!(f, "Clarity error: {}", e), CliError::Message(e) => write!(f, "{}", e), CliError::Usage => write!(f, "{}", USAGE), + CliError::InvalidChainId(e) => write!(f, "Invalid chain ID: {}", e), } } } @@ -848,18 +852,26 @@ fn main() { } fn main_handler(mut argv: Vec) -> Result { - let tx_version = if let Some(ix) = argv.iter().position(|x| x == "--testnet") { - argv.remove(ix); - TransactionVersion::Testnet - } else { - TransactionVersion::Mainnet - }; + let mut tx_version = TransactionVersion::Mainnet; + let mut chain_id = CHAIN_ID_MAINNET; + + // Look for the `--testnet` flag + if let Some(ix) = argv.iter().position(|x| x.starts_with("--testnet")) { + let flag = argv.remove(ix); + + // Check if `--testnet=` is used + if let Some(custom_chain_id) = flag.split('=').nth(1) { + // Attempt to parse the custom chain ID from hex + chain_id = u32::from_str_radix(custom_chain_id.trim_start_matches("0x"), 16) + .map_err(|err| CliError::InvalidChainId(err))?; + } else { + // Use the default testnet chain ID + chain_id = CHAIN_ID_TESTNET; + } - let chain_id = if tx_version == TransactionVersion::Testnet { - CHAIN_ID_TESTNET - } else { - CHAIN_ID_MAINNET - }; + // Set the transaction version to Testnet + tx_version = TransactionVersion::Testnet; + } if let Some((method, args)) = argv.split_first() { match method.as_str() { @@ -1220,4 +1232,43 @@ mod test { let result = main_handler(to_string_vec(&header_args)).unwrap(); eprintln!("result:\n{}", result); } + + #[test] + fn custom_chain_id() { + // Standard chain id + let tt_args = [ + "--testnet", + "token-transfer", + "043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3", + "1", + "0", + "ST1A14RBKJ289E3DP89QAZE2RRHDPWP5RHMYFRCHV", + "10", + ]; + + let result = main_handler(to_string_vec(&tt_args)); + assert!(result.is_ok()); + + let result = result.unwrap(); + let tx = decode_transaction(&[result], TransactionVersion::Testnet).unwrap(); + assert!(tx.contains("chain_id\":2147483648")); + + // Custom chain id + let tt_args = [ + "--testnet=0x12345678", + "token-transfer", + "043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3", + "1", + "0", + "ST1A14RBKJ289E3DP89QAZE2RRHDPWP5RHMYFRCHV", + "10", + ]; + + let result = main_handler(to_string_vec(&tt_args)); + assert!(result.is_ok()); + + let result = result.unwrap(); + let tx = decode_transaction(&[result], TransactionVersion::Testnet).unwrap(); + assert!(tx.contains("chain_id\":305419896")); + } } From e64de155feb7ae89f7e67ee65e6858af15794daf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 15 Oct 2024 08:54:45 -0700 Subject: [PATCH 814/910] Only return BurnchainTipChanged error iff the sortition id also changed Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a27f617fffb..ffc3c49fabf 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -383,7 +383,7 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - continue; + return Err(e); } _ => { error!("Error while gathering signatures: {e:?}. Will try mining again."; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b3..f192f889695 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4324,7 +4324,9 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash + && burn_chain_tip.sortition_id != check_burn_block.sortition_id + { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, From b6c2e5572166161fd14bd98831ac47021b59fa5f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 15 Oct 2024 15:03:31 -0700 Subject: [PATCH 815/910] Use the ACTUAL burnchain tip in run_sign_v0 instead of the election block snapshot Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 26 +++---------------- .../src/nakamoto_node/sign_coordinator.rs | 4 +-- testnet/stacks-node/src/neon_node.rs | 4 +-- testnet/stacks-node/src/tests/signer/v0.rs | 16 +++++++++++- 4 files changed, 20 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ffc3c49fabf..a08c0ab353d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -531,22 +531,6 @@ impl BlockMinerThread { )) })?; - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to retrieve chain tip: {:?}", - e - )) - }) - .and_then(|result| { - result.ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) - }) - })?; - let reward_set = self.load_signer_set()?; if self.config.get_node_config(false).mock_mining { @@ -574,7 +558,7 @@ impl BlockMinerThread { let signature = coordinator.run_sign_v0( new_block, - &tip, + &self.burn_block, &self.burnchain, &sort_db, &mut chain_state, @@ -1206,9 +1190,7 @@ impl BlockMinerThread { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash - && cur_burn_chain_tip.sortition_id != self.burn_block.sortition_id - { + if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash { info!("Miner: Cancel block assembly; burnchain tip has changed"); self.globals.counters.bump_missed_tenures(); Err(NakamotoNodeError::BurnchainTipChanged) @@ -1247,9 +1229,7 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash - && burn_chain_tip.sortition_id != check_burn_block.sortition_id - { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index cf0eaa67699..2694d1d9ca8 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -255,9 +255,7 @@ impl SignCoordinator { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash - && cur_burn_chain_tip.sortition_id != burn_block.sortition_id - { + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); true } else { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f192f889695..dcfa855c9b3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4324,9 +4324,7 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash - && burn_chain_tip.sortition_id != check_burn_block.sortition_id - { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d888d41c700..e063279f97b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5121,7 +5121,11 @@ fn continue_after_tenure_extend() { // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. - + let burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; for i in 0..2 { info!( "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", @@ -5147,6 +5151,16 @@ fn continue_after_tenure_extend() { .expect("Timed out waiting for tenure extend block"); } + wait_for(30, || { + let new_burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + Ok(new_burn_height == burn_height + 2) + }) + .expect("Timed out waiting for burnchain to advance"); + // The last block should have a single instruction in it, the tenure extend let blocks = test_observer::get_blocks(); let last_block = blocks.last().unwrap(); From 07a06c0ccddd9d8e8e844bbfa4d8b3cbba0efbdc Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:20:22 +0100 Subject: [PATCH 816/910] Update stackslib/src/net/api/getinfo.rs Co-authored-by: Aaron Blankstein --- stackslib/src/net/api/getinfo.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index cae71908490..3ffc9b69081 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -235,7 +235,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &header.index_block_hash(), + &StacksBlockId::new(&network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash), ) .map_err(|e| { StacksHttpResponse::new_error( From 6e4db07ebf5516f83fc28a1150ab0262c65ba4e7 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:22:18 +0100 Subject: [PATCH 817/910] chore: reuse cached state also in `postmempoolquery.rs` --- stackslib/src/net/api/getinfo.rs | 14 ++++---------- stackslib/src/net/api/postmempoolquery.rs | 15 +++++++++------ 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 3ffc9b69081..5f08044242d 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -224,18 +224,12 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let rpc_peer_info: Result = node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { - let header = self - .get_stacks_chain_tip(&preamble, sortdb, chainstate) - .map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)), - ) - })?; - let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &StacksBlockId::new(&network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash), + &StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ), ) .map_err(|e| { StacksHttpResponse::new_error( diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 21558632208..c6a830569c1 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -116,9 +116,9 @@ impl StacksMemPoolStream { Self { tx_query, - last_randomized_txid: last_randomized_txid, + last_randomized_txid, num_txs: 0, - max_txs: max_txs, + max_txs, coinbase_height, corked: false, finished: false, @@ -276,10 +276,13 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let header = self.get_stacks_chain_tip(&preamble, sortdb, chainstate) - .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e))))?; - - let coinbase_height = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &header.index_block_hash()) + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash + ), + ) .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? .unwrap_or(0); From f5e71b3da64022c312856f036ff7560563455197 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:23:29 +0100 Subject: [PATCH 818/910] feat: make the deserializer more backwards compatible Co-authored-by: Aaron Blankstein --- stackslib/src/net/api/getinfo.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 5f08044242d..846e5a18d23 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,6 +82,8 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub tenure_height: Option, pub exit_at_block_height: Option, pub is_fully_synced: bool, From 2497d07c5a2715b85e5bec589e82f0ef508f1466 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:27:18 +0100 Subject: [PATCH 819/910] chore: formatting --- stackslib/src/net/api/postmempoolquery.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index c6a830569c1..3710db7dc81 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -279,7 +279,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), &StacksBlockId::new( - &network.stacks_tip.consensus_hash, + &network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash ), ) From 9c11fd6af24a66389d745ea14e21c067d60eabe0 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 16 Oct 2024 09:53:39 -0700 Subject: [PATCH 820/910] feat: better exposing of signer version --- stacks-signer/src/cli.rs | 27 ++------------------------ stacks-signer/src/lib.rs | 11 +++++++++++ stacks-signer/src/main.rs | 7 ++++++- stacks-signer/src/monitoring/server.rs | 2 ++ 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 3b74635cbcb..97829b69777 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,7 +29,6 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -39,31 +38,9 @@ use stacks_common::address::{ use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -extern crate alloc; +use crate::VERSION_STRING; -const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); -const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); -#[cfg(debug_assertions)] -const BUILD_TYPE: &str = "debug"; -#[cfg(not(debug_assertions))] -const BUILD_TYPE: &str = "release"; - -lazy_static! { - static ref VERSION_STRING: String = { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); - format!( - "{} ({}:{}, {} build, {} [{}])", - pkg_version, - git_branch, - git_commit, - BUILD_TYPE, - std::env::consts::OS, - std::env::consts::ARCH - ) - }; -} +extern crate alloc; #[derive(Parser, Debug)] #[command(author, version, about)] diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 20c2bc2ca87..3555435eaa7 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -48,8 +48,10 @@ mod tests; use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; +use blockstack_lib::version_string; use chainstate::SortitionsView; use config::GlobalConfig; +use lazy_static::lazy_static; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; use runloop::SignerResult; use slog::{slog_info, slog_warn}; @@ -59,6 +61,14 @@ use crate::client::StacksClient; use crate::config::SignerConfig; use crate::runloop::RunLoop; +lazy_static! { + /// The version string for the signer + pub static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + version_string("stacks-signer", pkg_version) + }; +} + /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance @@ -113,6 +123,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner /// Create a new spawned signer pub fn new(config: GlobalConfig) -> Self { let endpoint = config.endpoint; + info!("Stacks signer version {:?}", VERSION_STRING.as_str()); info!("Starting signer with config: {:?}", config); warn!( "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index bb680aae0bc..56f322b1853 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -47,6 +47,7 @@ use stacks_signer::config::GlobalConfig; use stacks_signer::monitor_signers::SignerMonitor; use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; +use stacks_signer::VERSION_STRING; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -157,7 +158,11 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!("Config: {}", config); + println!( + "Signer version: {}\nConfig: \n{}", + VERSION_STRING.to_string(), + config + ); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index ffde008c9ff..f5e3cceef15 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -28,6 +28,7 @@ use crate::client::{ClientError, StacksClient}; use crate::config::{GlobalConfig, Network}; use crate::monitoring::prometheus::gather_metrics_string; use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; +use crate::VERSION_STRING; #[derive(thiserror::Error, Debug)] /// Monitoring server errors @@ -215,6 +216,7 @@ impl MonitoringServer { "signerPublicKey": to_hex(&self.public_key.to_bytes_compressed()), "network": self.network.to_string(), "stxAddress": self.stacks_client.get_signer_address().to_string(), + "version": VERSION_STRING.to_string(), })) .expect("Failed to serialize JSON") } From d7f0ba2404313eae7eb5f0782244dad93b2614e4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 12:27:52 -0500 Subject: [PATCH 821/910] feat: interpret block heights as tenure heights in Clarity 2 contracts executing post-3.0 --- clarity/src/vm/database/clarity_db.rs | 31 ++ clarity/src/vm/docs/mod.rs | 8 + clarity/src/vm/functions/database.rs | 18 ++ clarity/src/vm/test_util/mod.rs | 8 + stackslib/src/chainstate/nakamoto/mod.rs | 2 +- .../chainstate/stacks/boot/contract_tests.rs | 8 + stackslib/src/clarity_cli.rs | 8 + stackslib/src/clarity_vm/database/mod.rs | 110 +++++++ stackslib/src/cli.rs | 285 +++++++++++++++++- 9 files changed, 475 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index cdf411fc3ed..4f64bc08bb4 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -127,6 +127,11 @@ pub trait HeadersDB { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option; + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option; } pub trait BurnStateDB { @@ -285,6 +290,13 @@ impl HeadersDB for NullHeadersDB { ) -> Option { None } + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + _tenure_height: u32, + ) -> Option { + None + } } #[allow(clippy::panic)] @@ -915,6 +927,25 @@ impl<'a> ClarityDatabase<'a> { } } + pub fn get_block_height_for_tenure_height( + &mut self, + tenure_height: u32, + ) -> Result> { + let current_tenure_height = self.get_tenure_height()?; + if current_tenure_height < tenure_height { + return Ok(None); + } + if current_tenure_height == tenure_height { + return Ok(Some(self.get_current_block_height())); + } + let current_height = self.get_current_block_height(); + // query from the parent + let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; + Ok(self + .headers_db + .get_stacks_height_for_tenure_height(&query_tip, tenure_height.into())) + } + /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! /// This is the burnchain block height of the parent of the Stacks block at the current Stacks diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 65b08e3102a..d718ff5366a 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2863,6 +2863,14 @@ mod test { ) -> Option { Some(12000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } struct DocBurnStateDB {} diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index f048a595367..eecb5e2ba0f 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -769,6 +769,24 @@ pub fn special_get_block_info( _ => return Ok(Value::none()), }; + let height_value = if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity3 { + if env.global_context.epoch_id < StacksEpochId::Epoch30 { + height_value + } else { + // interpretting height_value as a tenure height + let height_opt = env + .global_context + .database + .get_block_height_for_tenure_height(height_value)?; + match height_opt { + Some(x) => x, + None => return Ok(Value::none()), + } + } + } else { + height_value + }; + let current_block_height = env.global_context.database.get_current_block_height(); if height_value >= current_block_height { return Ok(Value::none()); diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 2df79766a24..f2b6d4dd09b 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -229,6 +229,14 @@ impl HeadersDB for UnitTestHeaderDB { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } impl BurnStateDB for UnitTestBurnStateDB { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e000c9c582f..8abbe058f53 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3886,7 +3886,7 @@ impl NakamotoChainState { /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. - fn append_block<'a>( + pub(crate) fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, burn_dbconn: &mut SortitionHandleConn, diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 04b74ba2e90..4d4e875ba37 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -649,6 +649,14 @@ impl HeadersDB for TestSimHeadersDB { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } #[test] diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 21cf55dea6d..f23be191ff1 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -766,6 +766,14 @@ impl HeadersDB for CLIHeadersDB { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } fn get_eval_input(invoked_by: &str, args: &[String]) -> EvalInput { diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 81f0bac43c3..b12c4470efa 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,7 +47,22 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError>; fn conn(&self) -> &Connection; + fn get_tenure_block_id_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let Some(tenure_ch) = self.get_tenure_ch_at_cb_height(tip, coinbase_height)? else { + return Ok(None); + }; + self.get_tenure_block_id(tip, &tenure_ch) + } } impl GetTenureStartId for StacksDBConn<'_> { @@ -66,6 +81,21 @@ impl GetTenureStartId for StacksDBConn<'_> { .map(|block_id| TenureBlockId::from(block_id))) } + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let opt_out = self + .get_indexed( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .flatten(); + Ok(opt_out) + } + fn conn(&self) -> &Connection { self.sqlite() } @@ -87,6 +117,21 @@ impl GetTenureStartId for StacksDBTx<'_> { .map(|block_id| TenureBlockId::from(block_id))) } + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let opt_out = self + .get_indexed_ref( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .flatten(); + Ok(opt_out) + } + fn conn(&self) -> &Connection { self.sqlite() } @@ -105,6 +150,15 @@ impl GetTenureStartId for MARF { fn conn(&self) -> &Connection { self.sqlite_conn() } + + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let dbconn = StacksDBConn::new(self, ()); + dbconn.get_tenure_ch_at_cb_height(tip, coinbase_height) + } } pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); @@ -188,6 +242,22 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = + GetTenureStartId::get_tenure_block_id_at_cb_height(&self.0, tip, tenure_height.into()) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column(self.0.conn(), &tenure_block_id.0, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) + } + fn get_vrf_seed_for_block( &self, id_bhh: &StacksBlockId, @@ -417,6 +487,25 @@ impl<'a> HeadersDB for ChainstateTx<'a> { let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_matured_reward(self.deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } + + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height( + self.deref(), + tip, + tenure_height.into(), + ) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column(self.deref(), &tenure_block_id.0, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) + } } impl HeadersDB for MARF { @@ -572,6 +661,27 @@ impl HeadersDB for MARF { let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_matured_reward(self, &tenure_id_bhh, epoch).map(|x| x.total().into()) } + + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = + GetTenureStartId::get_tenure_block_id_at_cb_height(self, tip, tenure_height.into()) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column( + self.sqlite_conn(), + &tenure_block_id.0, + "block_height", + |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }, + ) + } } /// Select a specific column from the headers table, specifying whether to use diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 493ab18de58..9ff6e556441 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -22,6 +22,7 @@ use std::time::Instant; use std::{env, fs, io, process, thread}; use clarity::types::chainstate::SortitionId; +use db::blocks::DummyEventDispatcher; use db::ChainstateTx; use regex::Regex; use rusqlite::{Connection, OpenFlags}; @@ -30,12 +31,16 @@ use stacks_common::types::sqlite::NO_PARAMS; use crate::burnchains::db::BurnchainDB; use crate::burnchains::PoxConstants; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleContext}; +use crate::chainstate::burn::db::sortdb::{ + get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, +}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::miner::*; -use crate::chainstate::stacks::*; +use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; use crate::core::*; use crate::util_lib::db::IndexDBTx; @@ -519,3 +524,279 @@ fn replay_block( } }; } + +fn replay_block_nakamoto( + sort_db: &mut SortitionDB, + stacks_chain_state: &mut StacksChainState, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + block: &NakamotoBlock, + block_size: u64, +) -> Result<(), ChainstateError> { + // find corresponding snapshot + let next_ready_block_snapshot = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &block.header.consensus_hash)? + .unwrap_or_else(|| { + panic!( + "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", + &block.header.consensus_hash, + &block.header.block_hash() + ) + }); + + debug!("Process staging Nakamoto block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + ); + + let elected_height = sort_db + .get_consensus_hash_height(&block.header.consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let elected_in_cycle = sort_db + .pox_constants + .block_height_to_reward_cycle(sort_db.first_block_height, elected_height) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let active_reward_set = OnChainRewardSetProvider::(None) + .read_reward_set_nakamoto_of_cycle( + elected_in_cycle, + stacks_chain_state, + sort_db, + &block.header.parent_block_id, + true, + ) + .map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id, + ); + ChainstateError::NoSuchBlockError + })?; + let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; + + // find parent header + let Some(parent_header_info) = + NakamotoChainState::get_block_header(&chainstate_tx.tx, &block.header.parent_block_id)? + else { + // no parent; cannot process yet + info!("Cannot process Nakamoto block: missing parent header"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + return Ok(()); + }; + + // sanity check -- must attach to parent + let parent_block_id = StacksBlockId::new( + &parent_header_info.consensus_hash, + &parent_header_info.anchored_header.block_hash(), + ); + if parent_block_id != block.header.parent_block_id { + drop(chainstate_tx); + + let msg = "Discontinuous Nakamoto Stacks block"; + warn!("{}", &msg; + "child parent_block_id" => %block.header.parent_block_id, + "expected parent_block_id" => %parent_block_id, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + return Err(ChainstateError::InvalidStacksBlock(msg.into())); + } + + // set the sortition handle's pointer to the block's burnchain view. + // this is either: + // (1) set by the tenure change tx if one exists + // (2) the same as parent block id + + let burnchain_view = if let Some(tenure_change) = block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock( + "Failed to load burn view of parent block ID".into(), + ) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id( + &handle, + parent_burn_view_sn.block_height, + &handle.context.chain_tip, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock( + "Failed to load burn view of parent block ID".into(), + ) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + let Some(burnchain_view_sn) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? + else { + // This should be checked already during block acceptance and parent block processing + // - The check for expected burns returns `NoSuchBlockError` if the burnchain view + // could not be found for a block with a tenure tx. + // We error here anyways, but the check during block acceptance makes sure that the staging + // db doesn't get into a situation where it continuously tries to retry such a block (because + // such a block shouldn't land in the staging db). + warn!( + "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "burn_view_consensus_hash" => %burnchain_view, + ); + return Ok(()); + }; + + // find commit and sortition burns if this is a tenure-start block + let Ok(new_tenure) = block.is_wellformed_tenure_start_block() else { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid tenure change tx(s)".into(), + )); + }; + + let (commit_burn, sortition_burn) = if new_tenure { + // find block-commit to get commit-burn + let block_commit = SortitionDB::get_block_commit( + sort_db.conn(), + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); + + let sort_burn = + SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; + (block_commit.burn_fee, sort_burn) + } else { + (0, 0) + }; + + // attach the block to the chain state and calculate the next chain tip. + let pox_constants = sort_db.pox_constants.clone(); + + // NOTE: because block status is updated in a separate transaction, we need `chainstate_tx` + // and `clarity_instance` to go out of scope before we can issue the it (since we need a + // mutable reference to `stacks_chain_state` to start it). This means ensuring that, in the + // `Ok(..)` case, the `clarity_commit` gets dropped beforehand. In order to do this, we first + // run `::append_block()` here, and capture both the Ok(..) and Err(..) results as + // Option<..>'s. Then, if we errored, we can explicitly drop the `Ok(..)` option (even + // though it will always be None), which gets the borrow-checker to believe that it's safe + // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so + // simply commit the block before beginning the second transaction to mark it processed. + + let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); + let (ok_opt, err_opt) = match NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burn_view_handle, + &burnchain_view, + &pox_constants, + &parent_header_info, + &next_ready_block_snapshot.burn_header_hash, + next_ready_block_snapshot + .block_height + .try_into() + .expect("Failed to downcast u64 to u32"), + next_ready_block_snapshot.burn_header_timestamp, + &block, + block_size, + commit_burn, + sortition_burn, + &active_reward_set, + ) { + Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), + Err(e) => (None, Some(e)), + }; + + if let Some(e) = err_opt { + // force rollback + drop(ok_opt); + drop(chainstate_tx); + + warn!( + "Failed to append {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &e; + "stacks_block_id" => %block.header.block_id() + ); + + // as a separate transaction, mark this block as processed and orphaned. + // This is done separately so that the staging blocks DB, which receives writes + // from the network to store blocks, will be available for writes while a block is + // being processed. Therefore, it's *very important* that block-processing happens + // within the same, single thread. Also, it's *very important* that this update + // succeeds, since *we have already processed* the block. + return Err(e); + }; + + let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + + assert_eq!( + receipt.header.anchored_header.block_hash(), + block.header.block_hash() + ); + assert_eq!(receipt.header.consensus_hash, block.header.consensus_hash); + + info!( + "Advanced to new tip! {}/{}", + &receipt.header.consensus_hash, + &receipt.header.anchored_header.block_hash() + ); + Ok(()) +} From 9abf6f1ea0ef1b47d28cf3c99c241d3f955140df Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 14:47:41 -0400 Subject: [PATCH 822/910] test: update `check_block_times` integration test --- .../src/tests/nakamoto_integrations.rs | 533 ++++++++++-------- 1 file changed, 289 insertions(+), 244 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 93f4ac41065..094b5478241 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7220,6 +7220,155 @@ fn continue_tenure_extend() { run_loop_thread.join().unwrap(); } +fn get_block_times( + naka_conf: &Config, + sender_addr: &StacksAddress, + block_height: u128, + tenure_height: u128, +) -> (u128, u128, u128, u128, u128, u128, u128) { + let contract0_name = "test-contract-0"; + let contract1_name = "test-contract-1"; + let contract3_name = "test-contract-3"; + + info!("Getting block times at block {block_height}, {tenure_height}..."); + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time_now0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-last-time", + vec![], + ); + let time0_now = time_now0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time1 = time1_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_now_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-last-time", + vec![], + ); + let time1_now = time1_now_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(tenure_height)], + ); + let time3_tenure = time3_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time3_block = time3_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_now_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-last-tenure-time", + vec![], + ); + let time3_now_tenure = time3_now_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + info!("Reported times:"; + "time0" => time0, + "time0_now" => time0_now, + "time1" => time1, + "time1_now" => time1_now, + "time3_block" => time3_block, + "time3_tenure" => time3_tenure, + "time3_now_tenure" => time3_now_tenure + ); + + assert_eq!( + time0, time1, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert_eq!( + time0_now, time1_now, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + assert_eq!(time0_now, time1_now, "Time should match across contracts"); + assert_eq!( + time0_now, time3_now_tenure, + "Clarity 3 tenure time should match Clarity 2 block time" + ); + + ( + time0, + time0_now, + time1, + time1_now, + time3_tenure, + time3_block, + time3_now_tenure, + ) +} + #[test] #[ignore] /// Verify the timestamps using `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. @@ -7267,7 +7416,6 @@ fn check_block_times() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); @@ -7284,8 +7432,10 @@ fn check_block_times() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = - "(define-read-only (get-time (height uint)) (get-block-info? time height))"; + let contract_clarity1 = r#" + (define-read-only (get-time (height uint)) (get-block-info? time height)) + (define-read-only (get-last-time) (get-block-info? time (- block-height u1))) + "#; let contract_tx0 = make_contract_publish( &sender_sk, @@ -7312,6 +7462,19 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start = epoch_3.start_height; + let mut last_stacks_block_height = 0; + let mut last_tenure_height = 0; + next_block_and(&mut btc_regtest_controller, 60, || { + let info = get_chain_info_result(&naka_conf).unwrap(); + last_stacks_block_height = info.stacks_tip_height as u128; + last_tenure_height = last_stacks_block_height; + Ok(info.burn_block_height == epoch_3_start) + }) + .unwrap(); + let time0_value = call_read_only( &naka_conf, &sender_addr, @@ -7327,8 +7490,6 @@ fn check_block_times() { .unwrap(); info!("Time from pre-epoch 3.0: {}", time0); - wait_for_first_naka_block_commit(60, &commits_submitted); - // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; let contract_tx1 = make_contract_publish_versioned( @@ -7345,9 +7506,11 @@ fn check_block_times() { // This version uses the Clarity 3 functions let contract3_name = "test-contract-3"; - let contract_clarity3 = - "(define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) - (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height))"; + let contract_clarity3 = r#" + (define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) + (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height)) + (define-read-only (get-last-tenure-time) (get-tenure-info? time (- tenure-height u1))) + "#; let contract_tx3 = make_contract_publish( &sender_sk, @@ -7360,258 +7523,140 @@ fn check_block_times() { submit_tx(&http_origin, &contract_tx3); sender_nonce += 1; - // sleep to ensure seconds have changed - thread::sleep(Duration::from_secs(3)); - - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); - - // make sure that the contracts are published - wait_for(30, || { + let mut stacks_block_height = 0; + wait_for(60, || { let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; - Ok(cur_sender_nonce >= sender_nonce) + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); + last_stacks_block_height = stacks_block_height; - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info.stacks_tip_height); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_tenure_height = last_stacks_block_height as u128; - - let time0_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time0 = time0_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - - let time1_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time1 = time1_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0, time1, - "Time from pre- and post-epoch 3.0 contracts should match" - ); - - let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height - 2)], - ); - let time3_tenure = time3_tenure_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0, time3_tenure, - "Tenure time should match Clarity 2 block time" - ); - - let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time3_block = time3_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() + // Repeat these tests for 5 tenures + for _ in 0..5 { + next_block_and(&mut btc_regtest_controller, 60, || { + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height) + }) .unwrap(); + last_stacks_block_height = stacks_block_height; + last_tenure_height += 1; + info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(2)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - naka_conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); + let (time0, _time0_now, _time1, _time1_now, _time3_tenure, time3_block, _time3_now_tenure) = + get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - // make sure that the contracts are published - wait_for(30, || { - let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; - Ok(cur_sender_nonce >= sender_nonce) - }) - .expect("Timed out waiting for transfer to complete"); + // Mine a Nakamoto block + info!("Mining Nakamoto block"); - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info.stacks_tip_height); - let last_stacks_block_height = info.stacks_tip_height as u128; - - let time0a_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time0a = time0a_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert!( - time0a - time0 >= 1, - "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" - ); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); - let time1a_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time1a = time1a_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0a, time1a, - "Time from pre- and post-epoch 3.0 contracts should match" - ); + // wait for the block to be mined + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) + }) + .expect("Timed out waiting for block"); + last_stacks_block_height = stacks_block_height; + + info!("New Stacks block {last_stacks_block_height} in tenure {last_tenure_height}"); + + let ( + time0a, + _time0a_now, + _time1a, + _time1a_now, + _time3a_tenure, + time3a_block, + _time3a_now_tenure, + ) = get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - let time3a_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time3a_block = time3a_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert!( - time3a_block - time3_block >= 1, - "get-stacks-block-info? time should have changed" - ); + assert!( + time0a - time0 >= 1, + "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" + ); + assert!( + time3a_block - time3_block >= 1, + "get-stacks-block-info? time should have changed" + ); - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); + // Mine a Nakamoto block + info!("Mining Nakamoto block"); - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - naka_conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + // wait for the block to be mined + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) + }) + .expect("Timed out waiting for block"); + last_stacks_block_height = stacks_block_height; + + let ( + time0b, + _time0b_now, + time1b, + _time1b_now, + _time3b_tenure, + time3b_block, + _time3b_now_tenure, + ) = get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); + assert_eq!( + time0a, time0b, + "get-block-info? time should not have changed" + ); + assert_eq!( + time0b, time1b, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert!( + time3b_block - time3a_block >= 1, + "get-stacks-block-info? time should have changed" + ); } - let time0b_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time0b = time0b_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0a, time0b, - "get-block-info? time should not have changed" - ); - - let time1b_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time1b = time1b_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0b, time1b, - "Time from pre- and post-epoch 3.0 contracts should match" - ); - - let time3b_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time3b_block = time3b_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - - assert!( - time3b_block - time3a_block >= 1, - "get-stacks-block-info? time should have changed" - ); - coord_channel .lock() .expect("Mutex poisoned") From 249136c0133b6f20c64b5b1a0a5c798b959886a6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 15:01:04 -0400 Subject: [PATCH 823/910] fix: use tenure height for Clarity 2 contracts --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 094b5478241..50385ead8b8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7230,14 +7230,14 @@ fn get_block_times( let contract1_name = "test-contract-1"; let contract3_name = "test-contract-3"; - info!("Getting block times at block {block_height}, {tenure_height}..."); + info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( &naka_conf, &sender_addr, contract0_name, "get-time", - vec![&clarity::vm::Value::UInt(block_height)], + vec![&clarity::vm::Value::UInt(tenure_height)], ); let time0 = time0_value .expect_optional() @@ -7265,7 +7265,7 @@ fn get_block_times( &sender_addr, contract1_name, "get-time", - vec![&clarity::vm::Value::UInt(block_height)], + vec![&clarity::vm::Value::UInt(tenure_height)], ); let time1 = time1_value .expect_optional() From 5f3197c8da91807232e151b08b6538882a48a18c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 14:53:08 -0500 Subject: [PATCH 824/910] use tenure-height as block-height for 2.x blocks, fix interpretation of marf key --- clarity/src/vm/database/clarity_db.rs | 10 +- stackslib/src/chainstate/nakamoto/keys.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 53 +++---- .../src/tests/nakamoto_integrations.rs | 129 ++++++++---------- 4 files changed, 86 insertions(+), 108 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4f64bc08bb4..3a37edf75c6 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -935,10 +935,14 @@ impl<'a> ClarityDatabase<'a> { if current_tenure_height < tenure_height { return Ok(None); } - if current_tenure_height == tenure_height { - return Ok(Some(self.get_current_block_height())); - } let current_height = self.get_current_block_height(); + // check if we're querying a 2.x block + let id_bhh = self.get_index_block_header_hash(tenure_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; + if !epoch.uses_nakamoto_blocks() { + return Ok(Some(tenure_height)); + } + // query from the parent let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self diff --git a/stackslib/src/chainstate/nakamoto/keys.rs b/stackslib/src/chainstate/nakamoto/keys.rs index 2944c70affa..bf33c4448e1 100644 --- a/stackslib/src/chainstate/nakamoto/keys.rs +++ b/stackslib/src/chainstate/nakamoto/keys.rs @@ -23,7 +23,7 @@ pub fn ongoing_tenure_id() -> &'static str { "nakamoto::tenures::ongoing_tenure_id" } -/// MARF key to map the coinbase height of a tenure to its consensus hash +/// MARF key to map the coinbase height of a tenure to its first block ID pub fn ongoing_tenure_coinbase_height(coinbase_height: u64) -> String { format!( "nakamoto::tenures::ongoing_tenure_coinbase_height::{}", diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index b12c4470efa..e26f9be6ba3 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,22 +47,12 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; - fn get_tenure_ch_at_cb_height( - &self, - tip: &StacksBlockId, - coinbase_height: u64, - ) -> Result, DBError>; - fn conn(&self) -> &Connection; fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { - let Some(tenure_ch) = self.get_tenure_ch_at_cb_height(tip, coinbase_height)? else { - return Ok(None); - }; - self.get_tenure_block_id(tip, &tenure_ch) - } + ) -> Result, DBError>; + fn conn(&self) -> &Connection; } impl GetTenureStartId for StacksDBConn<'_> { @@ -81,17 +71,17 @@ impl GetTenureStartId for StacksDBConn<'_> { .map(|block_id| TenureBlockId::from(block_id))) } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let opt_out = self .get_indexed( tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) .flatten(); Ok(opt_out) } @@ -117,17 +107,17 @@ impl GetTenureStartId for StacksDBTx<'_> { .map(|block_id| TenureBlockId::from(block_id))) } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let opt_out = self .get_indexed_ref( tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) .flatten(); Ok(opt_out) } @@ -151,13 +141,13 @@ impl GetTenureStartId for MARF { self.sqlite_conn() } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let dbconn = StacksDBConn::new(self, ()); - dbconn.get_tenure_ch_at_cb_height(tip, coinbase_height) + dbconn.get_tenure_block_id_at_cb_height(tip, coinbase_height) } } @@ -250,7 +240,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height(&self.0, tip, tenure_height.into()) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column(self.0.conn(), &tenure_block_id.0, "block_height", |r| { + get_stacks_header_column(self.0.conn(), &tenure_block_id, "block_height", |r| { u64::from_row(r) .expect("FATAL: malformed block_height") .try_into() @@ -499,7 +489,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { tenure_height.into(), ) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column(self.deref(), &tenure_block_id.0, "block_height", |r| { + get_stacks_header_column(self.deref(), &tenure_block_id, "block_height", |r| { u64::from_row(r) .expect("FATAL: malformed block_height") .try_into() @@ -670,17 +660,12 @@ impl HeadersDB for MARF { let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height(self, tip, tenure_height.into()) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column( - self.sqlite_conn(), - &tenure_block_id.0, - "block_height", - |r| { - u64::from_row(r) - .expect("FATAL: malformed block_height") - .try_into() - .expect("FATAL: blockchain too long") - }, - ) + get_stacks_header_column(self.sqlite_conn(), &tenure_block_id, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 50385ead8b8..c81946cf66c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -70,6 +70,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; +use stacks::types::chainstate::StacksBlockId; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -7671,6 +7672,8 @@ fn assert_block_info( miner: &Value, miner_spend: &clarity::vm::Value, ) { + info!("block info tuple data: {tuple0:#?}"); + assert!(tuple0 .get("burnchain-header-hash") .unwrap() @@ -7816,7 +7819,7 @@ fn check_block_info() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = "(define-read-only (get-info (height uint)) + let contract_clarity1 = "(define-read-only (get-block-info (height uint)) { burnchain-header-hash: (get-block-info? burnchain-header-hash height), id-header-hash: (get-block-info? id-header-hash height), @@ -7859,7 +7862,7 @@ fn check_block_info() { &naka_conf, &sender_addr, contract0_name, - "get-info", + "get-block-info", vec![&clarity::vm::Value::UInt(1)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; @@ -7929,25 +7932,36 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); + + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let tuple0 = get_block_info(contract0_name, last_tenure_height - 1); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height - 1); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -7981,14 +7995,8 @@ fn check_block_info() { tuple0.get("miner-spend-winner") ); - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; + // this will point to the last block in the prior tenure (which should have been a 2.x block) + let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); assert_eq!( tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") @@ -8038,25 +8046,17 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_tenure_height); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8102,11 +8102,15 @@ fn check_block_info() { let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; // There should have been a block change, so these should be different. assert_ne!(tuple3_block1, tuple3_block2); + + // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) + + let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); assert_eq!( - tuple3_block2.get("id-header-hash"), + tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") ); - assert_eq!(tuple3_block2.get("header-hash"), tuple0.get("header-hash")); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); assert!(tuple3_block2 .get("time") .unwrap() @@ -8150,25 +8154,17 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_tenure_height); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8181,21 +8177,14 @@ fn check_block_info() { let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; assert_eq!(tuple3_tenure1, tuple3_tenure1a); - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple3_block3 = result3_block.expect_tuple().unwrap().data_map; + let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); // There should have been a block change, so these should be different. assert_ne!(tuple3_block3, tuple3_block2); assert_eq!( - tuple3_block3.get("id-header-hash"), + tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") ); - assert_eq!(tuple3_block3.get("header-hash"), tuple0.get("header-hash")); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); assert!(tuple3_block3 .get("time") .unwrap() From 1789dccc1b3aaaf955cae9a1b1460b504514b110 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 16 Oct 2024 13:52:41 -0700 Subject: [PATCH 825/910] Add multiple_miners_with_custom_chain_id test to test chain id in the signer in an integration test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/config.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 282 +++++++++++++++++++++ 3 files changed, 284 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4115118eaf8..be3d562d46b 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -118,6 +118,7 @@ jobs: - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle + - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 375ed1a171d..3392906682a 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -156,7 +156,7 @@ pub struct GlobalConfig { /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, /// An optional custom Chain ID - chain_id: Option, + pub chain_id: Option, } /// Internal struct for loading up the config file diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 89192d274da..448bd4c6447 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5170,3 +5170,285 @@ fn signing_in_0th_tenure_of_reward_cycle() { } assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); } + +/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. +/// Half of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. +#[test] +#[ignore] +fn multiple_miners_with_custom_chain_id() { + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + let chain_id = 0x87654321; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.chain_id = Some(chain_id) + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.chain_id = chain_id; + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 1; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + let mut sender_nonce = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!( + "Mined interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash.clone()); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + // Verify both nodes have the correct chain id + let miner1_info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(miner1_info.network_id, chain_id); + + let miner2_info = get_chain_info(&conf_node_2); + assert_eq!(miner2_info.network_id, chain_id); + + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From d0418f1476c4e7d7c52ff6fab12aebf75ba45814 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 16:14:25 -0500 Subject: [PATCH 826/910] test: update the check_block_info_rewards test --- .../src/tests/nakamoto_integrations.rs | 147 ++++++++---------- 1 file changed, 62 insertions(+), 85 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c81946cf66c..2a7a6fb4977 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7932,7 +7932,6 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -7941,6 +7940,7 @@ fn check_block_info() { ) .unwrap(); + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) .unwrap() @@ -8070,19 +8070,13 @@ fn check_block_info() { // There should have been a tenure change, so these should be different. assert_ne!(tuple3_tenure0, tuple3_tenure1); assert_eq!( - tuple3_tenure1.get("burnchain-header-hash"), - tuple0.get("burnchain-header-hash") - ); - assert_eq!( - tuple3_tenure1.get("miner-address"), - tuple0.get("miner-address") - ); - assert_eq!(tuple3_tenure1.get("time"), tuple0.get("time")); - assert_eq!(tuple3_tenure1.get("vrf-seed"), tuple0.get("vrf-seed")); - assert_eq!( - tuple3_tenure1.get("block-reward"), - tuple0.get("block-reward") + tuple3_tenure1["burnchain-header-hash"], + tuple0["burnchain-header-hash"] ); + assert_eq!(tuple3_tenure1["miner-address"], tuple0["miner-address"]); + assert_eq!(tuple3_tenure1["time"], tuple0["time"]); + assert_eq!(tuple3_tenure1["vrf-seed"], tuple0["vrf-seed"]); + assert_eq!(tuple3_tenure1["block-reward"], tuple0["block-reward"]); assert_eq!( tuple3_tenure1.get("miner-spend-total"), tuple0.get("miner-spend-total") @@ -8106,14 +8100,9 @@ fn check_block_info() { // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block2 - .get("time") - .unwrap() + assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); + assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); + assert!(tuple3_block2["time"] .clone() .expect_optional() .unwrap() @@ -8180,14 +8169,9 @@ fn check_block_info() { let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); // There should have been a block change, so these should be different. assert_ne!(tuple3_block3, tuple3_block2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block3 - .get("time") - .unwrap() + assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); + assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); + assert!(tuple3_block3["time"] .clone() .expect_optional() .unwrap() @@ -8329,7 +8313,7 @@ fn check_block_info_rewards() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = "(define-read-only (get-info (height uint)) + let contract_clarity1 = "(define-read-only (get-block-info (height uint)) { burnchain-header-hash: (get-block-info? burnchain-header-hash height), id-header-hash: (get-block-info? id-header-hash height), @@ -8354,6 +8338,17 @@ fn check_block_info_rewards() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx0); + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -8368,14 +8363,7 @@ fn check_block_info_rewards() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, 1); info!("Info from pre-epoch 3.0: {:?}", tuple0); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8489,8 +8477,22 @@ fn check_block_info_rewards() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let last_stacks_block_height = info.stacks_tip_height as u128; let last_nakamoto_block = last_stacks_block_height; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_nakamoto_block_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); // Mine more than 2 burn blocks to get the last block's reward matured // (only 2 blocks maturation time in tests) @@ -8511,36 +8513,32 @@ fn check_block_info_rewards() { let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); + // Check the block reward is now matured in one of the tenure-change blocks let mature_height = last_stacks_block_height - 4; let expected_reward = get_expected_reward_for_height(&blocks, mature_height); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(mature_height)], + let tuple0 = get_block_info(contract0_name, last_tenure_height - 4); + info!( + "block rewards"; + "fetched" => %tuple0["block-reward"], + "expected" => expected_reward, ); - let tuple0 = result0.expect_tuple().unwrap().data_map; assert_eq!( - tuple0 - .get("block-reward") - .unwrap() + tuple0["block-reward"] .clone() .expect_optional() .unwrap() .unwrap(), - Value::UInt(expected_reward as u128) + Value::UInt(expected_reward) ); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(mature_height)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height - 4); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8551,41 +8549,23 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(mature_height)], ); let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure.get("block-reward"), - tuple0.get("block-reward") - ); + assert_eq!(tuple3_tenure["block-reward"], tuple0["block-reward"]); // Check the block reward is now matured in one of the Nakamoto blocks let expected_reward = get_expected_reward_for_height(&blocks, last_nakamoto_block); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_nakamoto_block)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_nakamoto_block_tenure_height); + assert_eq!( - tuple0 - .get("block-reward") - .unwrap() + tuple0["block-reward"] .clone() .expect_optional() .unwrap() .unwrap(), - Value::UInt(expected_reward as u128) + Value::UInt(expected_reward) ); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_nakamoto_block)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_nakamoto_block_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8596,10 +8576,7 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(last_nakamoto_block)], ); let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure.get("block-reward"), - tuple0.get("block-reward") - ); + assert_eq!(tuple3_tenure["block-reward"], tuple0["block-reward"]); coord_channel .lock() From 5202e880b84b233cc1a7a63cab5d912269811061 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 16:30:12 -0500 Subject: [PATCH 827/910] fix: check current_height vs supplied tenure_height --- clarity/src/vm/database/clarity_db.rs | 4 +++- clarity/src/vm/tests/contracts.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 3a37edf75c6..2ee31a8b41e 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -936,13 +936,15 @@ impl<'a> ClarityDatabase<'a> { return Ok(None); } let current_height = self.get_current_block_height(); + if current_height <= tenure_height { + return Ok(None); + } // check if we're querying a 2.x block let id_bhh = self.get_index_block_header_hash(tenure_height)?; let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; if !epoch.uses_nakamoto_blocks() { return Ok(Some(tenure_height)); } - // query from the parent let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 3c4dc14b2e0..9cb5aea4b14 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -139,7 +139,7 @@ fn test_get_block_info_eval( .unwrap(); let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - + eprintln!("{}", contracts[i]); let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); match expected[i] { // any (some UINT) is okay for checking get-block-info? time From b9e1bb99635f3df32ef3eb6847d4a53250cb85dd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 18:37:56 -0400 Subject: [PATCH 828/910] test: resolve issues with `check_block_times` --- .../src/tests/nakamoto_integrations.rs | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2a7a6fb4977..0aab59c4b9b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7294,7 +7294,7 @@ fn get_block_times( &sender_addr, contract3_name, "get-tenure-time", - vec![&clarity::vm::Value::UInt(tenure_height)], + vec![&clarity::vm::Value::UInt(block_height)], ); let time3_tenure = time3_tenure_value .expect_optional() @@ -7349,15 +7349,7 @@ fn get_block_times( time0_now, time1_now, "Time from pre- and post-epoch 3.0 contracts should match" ); - assert_eq!( - time0, time3_tenure, - "Tenure time should match Clarity 2 block time" - ); assert_eq!(time0_now, time1_now, "Time should match across contracts"); - assert_eq!( - time0_now, time3_now_tenure, - "Clarity 3 tenure time should match Clarity 2 block time" - ); ( time0, @@ -7394,7 +7386,7 @@ fn check_block_times() { let deploy_fee = 3000; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - 3 * deploy_fee + (send_amt + send_fee) * 2, + 3 * deploy_fee + (send_amt + send_fee) * 12, ); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), @@ -7471,7 +7463,7 @@ fn check_block_times() { next_block_and(&mut btc_regtest_controller, 60, || { let info = get_chain_info_result(&naka_conf).unwrap(); last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height; + last_tenure_height = last_stacks_block_height + 1; Ok(info.burn_block_height == epoch_3_start) }) .unwrap(); @@ -7510,7 +7502,7 @@ fn check_block_times() { let contract_clarity3 = r#" (define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height)) - (define-read-only (get-last-tenure-time) (get-tenure-info? time (- tenure-height u1))) + (define-read-only (get-last-tenure-time) (get-tenure-info? time (- stacks-block-height u1))) "#; let contract_tx3 = make_contract_publish( @@ -7546,7 +7538,7 @@ fn check_block_times() { last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); - let (time0, _time0_now, _time1, _time1_now, _time3_tenure, time3_block, _time3_now_tenure) = + let (time0, time0_now, _time1, _time1_now, time3_tenure, time3_block, time3_now_tenure) = get_block_times( &naka_conf, &sender_addr, @@ -7554,6 +7546,15 @@ fn check_block_times() { last_tenure_height - 1, ); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + assert_eq!( + time0_now, time3_now_tenure, + "Clarity 3 tenure time should match Clarity 2 block time in the first block of a tenure" + ); + // Mine a Nakamoto block info!("Mining Nakamoto block"); @@ -7588,7 +7589,7 @@ fn check_block_times() { _time1a_now, _time3a_tenure, time3a_block, - _time3a_now_tenure, + time3a_now_tenure, ) = get_block_times( &naka_conf, &sender_addr, @@ -7596,9 +7597,9 @@ fn check_block_times() { last_tenure_height - 1, ); - assert!( - time0a - time0 >= 1, - "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" + assert_eq!( + time0a, time0, + "get-block-info? time should not have changed" ); assert!( time3a_block - time3_block >= 1, @@ -7618,6 +7619,7 @@ fn check_block_times() { send_amt, ); submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; // wait for the block to be mined wait_for(30, || { @@ -7632,11 +7634,11 @@ fn check_block_times() { let ( time0b, _time0b_now, - time1b, + _time1b, _time1b_now, _time3b_tenure, time3b_block, - _time3b_now_tenure, + time3b_now_tenure, ) = get_block_times( &naka_conf, &sender_addr, @@ -7645,17 +7647,17 @@ fn check_block_times() { ); assert_eq!( - time0a, time0b, + time0b, time0a, "get-block-info? time should not have changed" ); - assert_eq!( - time0b, time1b, - "Time from pre- and post-epoch 3.0 contracts should match" - ); assert!( time3b_block - time3a_block >= 1, "get-stacks-block-info? time should have changed" ); + assert_eq!( + time3b_now_tenure, time3a_now_tenure, + "get-tenure-info? time should not have changed" + ); } coord_channel From ec5a03a37ca0b998fe4773db7686a10691a636ef Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 20:53:16 -0500 Subject: [PATCH 829/910] test: stricter (and more clear?) assertions in check_block_info --- .../src/tests/nakamoto_integrations.rs | 543 +++++++++++------- 1 file changed, 321 insertions(+), 222 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0aab59c4b9b..99a65ed4cc6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7750,6 +7750,17 @@ fn assert_block_info( ); } +fn parse_block_id(optional_buff32: &Value) -> StacksBlockId { + let bytes = optional_buff32 + .clone() + .expect_optional() + .unwrap() + .unwrap() + .expect_buff(32) + .unwrap(); + StacksBlockId::from_vec(&bytes).unwrap() +} + #[test] #[ignore] /// Verify all properties in `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. @@ -7782,6 +7793,7 @@ fn check_block_info() { ); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + let contract3_name = "test-contract-3"; test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -7812,6 +7824,36 @@ fn check_block_info() { let mut sender_nonce = 0; + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let get_tenure_info = |query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let miner = clarity::vm::Value::Principal( PrincipalData::parse_standard_principal("ST25WA53N4PWF8XZGQH2J5A4CGCWV4JADPM8MHTRV") .unwrap() @@ -7834,6 +7876,25 @@ fn check_block_info() { miner-spend-winner: (get-block-info? miner-spend-winner height), } )"; + // This version uses the Clarity 3 functions + let contract_clarity3 = "(define-read-only (get-block-info (height uint)) + { + id-header-hash: (get-stacks-block-info? id-header-hash height), + header-hash: (get-stacks-block-info? header-hash height), + time: (get-stacks-block-info? time height), + } + ) + (define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; let contract_tx0 = make_contract_publish( &sender_sk, @@ -7855,20 +7916,13 @@ fn check_block_info() { &mut btc_regtest_controller, ); - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + let info = get_chain_info(&naka_conf); + let last_pre_nakamoto_block_height = info.stacks_tip_height.into(); - info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; - info!("Info from pre-epoch 3.0: {:?}", tuple0); + let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); + info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -7886,27 +7940,6 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx1); - // This version uses the Clarity 3 functions - let contract3_name = "test-contract-3"; - let contract_clarity3 = "(define-read-only (get-block-info (height uint)) - { - id-header-hash: (get-stacks-block-info? id-header-hash height), - header-hash: (get-stacks-block-info? header-hash height), - time: (get-stacks-block-info? time height), - } - ) - (define-read-only (get-tenure-info (height uint)) - { - burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), - miner-address: (get-tenure-info? miner-address height), - time: (get-tenure-info? time height), - vrf-seed: (get-tenure-info? vrf-seed height), - block-reward: (get-tenure-info? block-reward height), - miner-spend-total: (get-tenure-info? miner-spend-total height), - miner-spend-winner: (get-tenure-info? miner-spend-winner height), - } - )"; - let contract_tx3 = make_contract_publish( &sender_sk, sender_nonce, @@ -7919,8 +7952,6 @@ fn check_block_info() { submit_tx(&http_origin, &contract_tx3); // sleep to ensure seconds have changed - thread::sleep(Duration::from_secs(3)); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -7931,98 +7962,148 @@ fn check_block_info() { }) .expect("Timed out waiting for contracts to publish"); - let info = get_chain_info_result(&naka_conf).unwrap(); + // the first test we want to do is around the behavior of + // looking up 2.x blocks. + + // look up block height 1 with all 3 contracts after nakamoto activates + let c0_block_ht_1_post_3 = get_block_info(contract0_name, 1); + let c1_block_ht_1_post_3 = get_block_info(contract1_name, 1); + let c3_block_ht_1_post_3 = get_block_info(contract3_name, 1); + assert_eq!(c0_block_ht_1_post_3, c0_block_ht_1_pre_3); + assert_eq!(c0_block_ht_1_post_3, c1_block_ht_1_post_3); + for (key, value) in c3_block_ht_1_post_3.iter() { + assert_eq!(&c0_block_ht_1_post_3[key], value); + } + + // look up last 2.x height with all 3 contracts + let c0_last_2x_block = get_block_info(contract0_name, last_pre_nakamoto_block_height); + let c1_last_2x_block = get_block_info(contract1_name, last_pre_nakamoto_block_height); + let c3_last_2x_block = get_block_info(contract3_name, last_pre_nakamoto_block_height); + assert_eq!(c0_last_2x_block, c1_last_2x_block); + for (key, value) in c3_last_2x_block.iter() { + assert_eq!(&c0_last_2x_block[key], value); + } + + // now we want to test the behavior of the first block in a tenure + // so, we'll issue a bitcoin block, and not submit any transactions + // (which will keep the miner from issuing any blocks after the first + // one in the tenure) + + let info = get_chain_info(&naka_conf); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - let (chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) .unwrap() .unwrap() .into(); + let last_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &last_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap(); + let last_tenure_start_block_id = last_tenure_start_block_header.index_block_hash(); + let last_tenure_start_block_ht = last_tenure_start_block_header.stacks_block_height.into(); - let get_block_info = |contract_name: &str, query_height: u128| { - let result = call_read_only( - &naka_conf, - &sender_addr, - contract_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(query_height)], - ); - result.expect_tuple().unwrap().data_map - }; - - let tuple0 = get_block_info(contract0_name, last_tenure_height - 1); - assert_block_info(&tuple0, &miner, &miner_spend); - - let tuple1 = get_block_info(contract1_name, last_tenure_height - 1); - assert_eq!(tuple0, tuple1); - - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure0.get("burnchain-header-hash"), - tuple0.get("burnchain-header-hash") - ); - assert_eq!( - tuple3_tenure0.get("miner-address"), - tuple0.get("miner-address") - ); - assert_eq!(tuple3_tenure0.get("time"), tuple0.get("time")); - assert_eq!(tuple3_tenure0.get("vrf-seed"), tuple0.get("vrf-seed")); - assert_eq!( - tuple3_tenure0.get("block-reward"), - tuple0.get("block-reward") - ); - assert_eq!( - tuple3_tenure0.get("miner-spend-total"), - tuple0.get("miner-spend-total") - ); - assert_eq!( - tuple3_tenure0.get("miner-spend-winner"), - tuple0.get("miner-spend-winner") - ); + // lets issue the next bitcoin block + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); - // this will point to the last block in the prior tenure (which should have been a 2.x block) - let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block1 - .get("time") + let info = get_chain_info(&naka_conf); + info!("Chain info: {:?}", info); + let cur_stacks_block_height = info.stacks_tip_height as u128; + let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let cur_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &cur_stacks_tip) + .unwrap() + .unwrap() + .into(); + let cur_tenure_start_block_id = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &cur_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap() + .index_block_hash(); + + assert_eq!(cur_tenure_start_block_id, cur_stacks_tip); + assert_eq!(cur_stacks_block_height, last_stacks_block_height + 1); + assert_eq!(cur_tenure_height, last_tenure_height + 1); + + // first checks: get-block-info with the current tenure height should return None + let c0_cur_tenure = get_block_info(contract0_name, cur_tenure_height); + let c1_cur_tenure = get_block_info(contract1_name, cur_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_cur_tenure = get_block_info(contract3_name, cur_stacks_block_height); + let c3_cur_tenure_ti = get_tenure_info(cur_stacks_block_height); + assert!(c0_cur_tenure["id-header-hash"] + .clone() + .expect_optional() .unwrap() + .is_none()); + assert!(c1_cur_tenure["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); - - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + .is_none()); + assert!(c3_cur_tenure["id-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + assert!(c3_cur_tenure_ti["burnchain-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + + // second checks: get-block-info with prior tenure height should return Some + let c0_last_tenure = get_block_info(contract0_name, last_tenure_height); + let c1_last_tenure = get_block_info(contract1_name, last_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_last_tenure_bi = get_block_info(contract3_name, last_stacks_block_height); + let c3_last_tenure_ti = get_tenure_info(last_stacks_block_height); + let c3_last_tenure_start_bi = get_block_info(contract3_name, last_tenure_start_block_ht); + + // assert that c0 and c1 returned some data + assert_block_info(&c0_last_tenure, &miner, &miner_spend); + assert_block_info(&c1_last_tenure, &miner, &miner_spend); + assert_eq!(c0_last_tenure, c1_last_tenure); + + let c3_fetched_id_hash = parse_block_id(&c3_last_tenure_bi["id-header-hash"]); + assert_eq!(c3_fetched_id_hash, last_stacks_tip); + + // c0 and c1 should have different block info data than c3 + assert_ne!( + c0_last_tenure["header-hash"], + c3_last_tenure_bi["header-hash"] + ); + assert_ne!( + c0_last_tenure["id-header-hash"], + c3_last_tenure_bi["id-header-hash"] + ); + assert_ne!(c0_last_tenure["time"], c3_last_tenure_bi["time"]); + // c0 and c1 should have the same burn data as the *tenure info* lookup in c3 + for (key, value) in c3_last_tenure_ti.iter() { + assert_eq!(&c0_last_tenure[key], value); + } + // c0 and c1 should have the same header hash data as the *block info* lookup in c3 using last tenure start block ht + for key in ["header-hash", "id-header-hash"] { + assert_eq!(&c0_last_tenure[key], &c3_last_tenure_start_bi[key]); + } + // c0 should have the same index hash as last_tenure start block id + assert_eq!( + parse_block_id(&c0_last_tenure["id-header-hash"]), + last_tenure_start_block_id + ); - // submit a tx so that the miner will mine an extra block + // Now we want to test the behavior of a new nakamoto block within the same tenure + // We'll force a nakamoto block by submitting a transfer, then waiting for the nonce to bump + info!("Mining an interim nakamoto block"); let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -8034,93 +8115,94 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(30, || { + thread::sleep(Duration::from_secs(1)); + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Failed to process the submitted transfer tx in a new nakamoto block"); - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); - let last_tenure_height: u128 = - NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + let info = get_chain_info(&naka_conf); + let interim_stacks_block_height = info.stacks_tip_height as u128; + let interim_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let interim_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &interim_stacks_tip) .unwrap() .unwrap() .into(); - - let tuple0 = get_block_info(contract0_name, last_tenure_height); - assert_block_info(&tuple0, &miner, &miner_spend); - - let tuple1 = get_block_info(contract1_name, last_tenure_height); - assert_eq!(tuple0, tuple1); - - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple3_tenure1 = result3_tenure.expect_tuple().unwrap().data_map; - // There should have been a tenure change, so these should be different. - assert_ne!(tuple3_tenure0, tuple3_tenure1); + let interim_tenure_start_block_id = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &interim_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap() + .index_block_hash(); + assert_eq!(interim_tenure_height, cur_tenure_height); + assert_eq!(interim_tenure_start_block_id, cur_tenure_start_block_id); + assert_eq!(interim_stacks_block_height, cur_stacks_block_height + 1); + + // querying the same block heights that returned data before should yield the identical result assert_eq!( - tuple3_tenure1["burnchain-header-hash"], - tuple0["burnchain-header-hash"] + c0_last_tenure, + get_block_info(contract0_name, last_tenure_height) ); - assert_eq!(tuple3_tenure1["miner-address"], tuple0["miner-address"]); - assert_eq!(tuple3_tenure1["time"], tuple0["time"]); - assert_eq!(tuple3_tenure1["vrf-seed"], tuple0["vrf-seed"]); - assert_eq!(tuple3_tenure1["block-reward"], tuple0["block-reward"]); assert_eq!( - tuple3_tenure1.get("miner-spend-total"), - tuple0.get("miner-spend-total") + c1_last_tenure, + get_block_info(contract1_name, last_tenure_height) ); assert_eq!( - tuple3_tenure1.get("miner-spend-winner"), - tuple0.get("miner-spend-winner") + c3_last_tenure_bi, + get_block_info(contract3_name, last_stacks_block_height) ); - - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + assert_eq!(c3_last_tenure_ti, get_tenure_info(last_stacks_block_height)); + assert_eq!( + c3_last_tenure_start_bi, + get_block_info(contract3_name, last_tenure_start_block_ht) + ); + + // querying for the current tenure should work now though + let c0_cur_tenure = get_block_info(contract0_name, cur_tenure_height); + let c1_cur_tenure = get_block_info(contract1_name, cur_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_cur_tenure = get_block_info(contract3_name, cur_stacks_block_height); + let c3_cur_tenure_ti = get_tenure_info(cur_stacks_block_height); + assert_block_info(&c0_cur_tenure, &miner, &miner_spend); + assert_block_info(&c1_cur_tenure, &miner, &miner_spend); + assert_eq!(c0_cur_tenure, c1_cur_tenure); + + // c0 and c1 should have the same header hash data as the *block info* lookup in c3 using cur_stacks_block + // (because cur_stacks_tip == cur_tenure_start_block_id, as was asserted before) + for key in ["header-hash", "id-header-hash"] { + assert_eq!(&c0_cur_tenure[key], &c3_cur_tenure[key]); + } + // c0 should have the same index hash as cur_tenure start block id + assert_eq!( + parse_block_id(&c0_cur_tenure["id-header-hash"]), + cur_tenure_start_block_id, + "c0 should have the same index hash as cur_tenure_start_block_id" ); - let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; - // There should have been a block change, so these should be different. - assert_ne!(tuple3_block1, tuple3_block2); - - // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) + // c0 and c1 should have the same burn data as the *tenure info* lookup in c3 + for (key, value) in c3_cur_tenure_ti.iter() { + assert_eq!(&c0_cur_tenure[key], value); + } - let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); - assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); - assert!(tuple3_block2["time"] + let c3_interim_bi = get_block_info(contract3_name, interim_stacks_block_height); + let c3_interim_ti = get_tenure_info(interim_stacks_block_height); + assert!(c3_interim_bi["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); - - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + .is_none()); + assert!(c3_interim_ti["burnchain-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); - // submit a tx so that the miner will mine an extra block + // Now we'll mine one more interim block so that we can test that the stacks-block-info outputs update + // again. + info!("Mining a second interim nakamoto block"); let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -8129,55 +8211,72 @@ fn check_block_info() { &recipient, send_amt, ); + sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } - - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); - let last_tenure_height: u128 = - NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) - .unwrap() - .unwrap() - .into(); - - let tuple0 = get_block_info(contract0_name, last_tenure_height); - assert_block_info(&tuple0, &miner, &miner_spend); + wait_for(30, || { + thread::sleep(Duration::from_secs(1)); + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Failed to process the submitted transfer tx in a new nakamoto block"); - let tuple1 = get_block_info(contract1_name, last_tenure_height); - assert_eq!(tuple0, tuple1); + let info = get_chain_info(&naka_conf); + assert_eq!( + info.stacks_tip_height as u128, + interim_stacks_block_height + 1 + ); - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + // querying for the current tenure should work the same as before + assert_eq!( + c0_cur_tenure, + get_block_info(contract0_name, cur_tenure_height) ); - let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!(tuple3_tenure1, tuple3_tenure1a); + assert_eq!( + c1_cur_tenure, + get_block_info(contract1_name, cur_tenure_height) + ); + // contract 3 uses the current stacks block height rather than current tenure. + assert_eq!( + c3_cur_tenure, + get_block_info(contract3_name, cur_stacks_block_height) + ); + assert_eq!(c3_cur_tenure_ti, get_tenure_info(cur_stacks_block_height)); + + // querying using the first interim's block height should now work in contract 3 + let c3_interim_bi = get_block_info(contract3_name, interim_stacks_block_height); + let c3_interim_ti = get_tenure_info(interim_stacks_block_height); - let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); - // There should have been a block change, so these should be different. - assert_ne!(tuple3_block3, tuple3_block2); - assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); - assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); - assert!(tuple3_block3["time"] + // it will *not* work in contracts 1 and 2 + let c0_interim = get_block_info(contract0_name, interim_stacks_block_height); + let c1_interim = get_block_info(contract1_name, interim_stacks_block_height); + assert!(c0_interim["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); + .is_none()); + assert!(c1_interim["id-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + + assert_eq!(c3_interim_ti, c3_cur_tenure_ti, "Tenure info should be the same whether queried using the starting block or the interim block height"); + + // c0 and c1 should have different block info data than the interim block + assert_ne!(c0_cur_tenure["header-hash"], c3_interim_bi["header-hash"]); + assert_ne!( + c0_cur_tenure["id-header-hash"], + c3_interim_bi["id-header-hash"] + ); + assert_ne!(c0_cur_tenure["time"], c3_interim_bi["time"]); + + // c3 should have gotten the interim's tip + assert_eq!( + parse_block_id(&c3_interim_bi["id-header-hash"]), + interim_stacks_tip, + "Contract 3 should be able to fetch the StacksBlockId of the tip" + ); coord_channel .lock() From 55d502d683e5a5b64dfbd3171360d7b05bb1a55c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 10:29:08 -0400 Subject: [PATCH 830/910] feat: add reward cycle check for 3.0 activation 3.0 can only activate in reward cycle 2 or later. --- testnet/stacks-node/src/config.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f4750c62426..02fb2493060 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -550,6 +550,16 @@ impl Config { &burnchain.pox_constants ); } + let activation_reward_cycle = burnchain + .block_height_to_reward_cycle(epoch_30.start_height) + .expect("FATAL: Epoch 3.0 starts before the first burnchain block"); + if activation_reward_cycle < 2 { + panic!( + "FATAL: Epoch 3.0 must start at or after the second reward cycle. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", + epoch_30.start_height, + &burnchain.pox_constants + ); + } } /// Connect to the MempoolDB using the configured cost estimation From 7144bb71a7fe5ee6e4cd2f9ee19a1ca4391e7459 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 07:45:51 -0700 Subject: [PATCH 831/910] Fix comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3b619a46bdf..009797a3765 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7117,7 +7117,7 @@ fn continue_tenure_extend() { }) .unwrap(); - // Mine 3 nakamoto tenures + // Mine 3 nakamoto blocks for i in 0..3 { info!("Triggering Nakamoto blocks after extend ({})", i + 1); transfer_nonce += 1; From 097c4db2e376f56794b0585347cb692585758bbe Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Thu, 17 Oct 2024 16:44:44 +0100 Subject: [PATCH 832/910] feat: cache `coinbase_height` --- stackslib/src/net/api/getinfo.rs | 16 ++------------- stackslib/src/net/api/postmempoolquery.rs | 12 ++--------- stackslib/src/net/p2p.rs | 25 +++++++++++++++++++++-- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 846e5a18d23..52f07e937e5 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -225,20 +225,8 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let ibd = node.ibd; let rpc_peer_info: Result = - node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { - let coinbase_height = NakamotoChainState::get_coinbase_height( - &mut chainstate.index_conn(), - &StacksBlockId::new( - &network.stacks_tip.consensus_hash, - &network.stacks_tip.block_hash, - ), - ) - .map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e)), - ) - })?; + node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { + let coinbase_height = network.stacks_tip.coinbase_height; Ok(RPCPeerInfoData::from_network( network, diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 3710db7dc81..8e0c6f459cc 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -275,16 +275,8 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { .ok_or(NetError::SendError("`mempool_query` not set".into()))?; let page_id = self.page_id.take(); - let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let coinbase_height = NakamotoChainState::get_coinbase_height( - &mut chainstate.index_conn(), - &StacksBlockId::new( - &network.stacks_tip.consensus_hash, - &network.stacks_tip.block_hash - ), - ) - .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? - .unwrap_or(0); + let stream_res = node.with_node_state(|network, _sortdb, _chainstate, mempool, _rpc_args| { + let coinbase_height = network.stacks_tip.coinbase_height.unwrap_or(0); let max_txs = network.connection_opts.mempool_max_tx_query; debug!( diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index eb224f3e80c..05f6c1ac290 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -246,6 +246,7 @@ pub struct StacksTipInfo { pub consensus_hash: ConsensusHash, pub block_hash: BlockHeaderHash, pub height: u64, + pub coinbase_height: Option, pub is_nakamoto: bool, } @@ -255,6 +256,7 @@ impl StacksTipInfo { consensus_hash: ConsensusHash([0u8; 20]), block_hash: BlockHeaderHash([0u8; 32]), height: 0, + coinbase_height: None, is_nakamoto: false, } } @@ -4218,14 +4220,25 @@ impl PeerNetwork { let parent_tenure_start_header = NakamotoChainState::get_tenure_start_block_header(&mut chainstate.index_conn(), stacks_tip_block_id, &parent_header.consensus_hash)? .ok_or_else(|| { - debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parnet {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); + debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parent {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); net_error::DBError(db_error::NotFoundError) })?; + // TODO: Test this! + let parent_stacks_tip_block_hash = parent_tenure_start_header.anchored_header.block_hash(); + let parent_tenure_start_header_cbh = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &StacksBlockId::new( + &parent_tenure_start_header.consensus_hash, + &parent_stacks_tip_block_hash, + ), + )?; + let parent_stacks_tip = StacksTipInfo { consensus_hash: parent_tenure_start_header.consensus_hash, - block_hash: parent_tenure_start_header.anchored_header.block_hash(), + block_hash: parent_stacks_tip_block_hash, height: parent_tenure_start_header.anchored_header.height(), + coinbase_height: parent_tenure_start_header_cbh, is_nakamoto: parent_tenure_start_header .anchored_header .as_stacks_nakamoto() @@ -4377,6 +4390,12 @@ impl PeerNetwork { self.stacks_tip.is_nakamoto }; + // TODO: Test this! + let stacks_tip_cbh = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &new_stacks_tip_block_id, + )?; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed @@ -4415,6 +4434,7 @@ impl PeerNetwork { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), block_hash: FIRST_STACKS_BLOCK_HASH.clone(), height: 0, + coinbase_height: None, is_nakamoto: false, } } @@ -4610,6 +4630,7 @@ impl PeerNetwork { consensus_hash: stacks_tip_ch, block_hash: stacks_tip_bhh, height: stacks_tip_height, + coinbase_height: stacks_tip_cbh, is_nakamoto: stacks_tip_is_nakamoto, }; self.parent_stacks_tip = parent_stacks_tip; From 8c0f9675ccfe913829fee62e532e453f2b2a7f33 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Thu, 17 Oct 2024 16:45:22 +0100 Subject: [PATCH 833/910] nit: field init shorthand --- stackslib/src/net/p2p.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 05f6c1ac290..5f249ae12cd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -105,7 +105,7 @@ struct NetworkHandleServer { impl NetworkHandle { pub fn new(chan_in: SyncSender) -> NetworkHandle { - NetworkHandle { chan_in: chan_in } + NetworkHandle { chan_in } } /// Send out a command to the p2p thread. Do not bother waiting for the response. @@ -175,7 +175,7 @@ impl NetworkHandle { impl NetworkHandleServer { pub fn new(chan_in: Receiver) -> NetworkHandleServer { - NetworkHandleServer { chan_in: chan_in } + NetworkHandleServer { chan_in } } pub fn pair(bufsz: usize) -> (NetworkHandleServer, NetworkHandle) { @@ -483,11 +483,11 @@ impl PeerNetwork { } let mut network = PeerNetwork { - peer_version: peer_version, - epochs: epochs, + peer_version, + epochs, - local_peer: local_peer, - chain_view: chain_view, + local_peer, + chain_view, chain_view_stable_consensus_hash: ConsensusHash([0u8; 20]), ast_rules: ASTRules::Typical, heaviest_affirmation_map: AffirmationMap::empty(), @@ -506,8 +506,8 @@ impl PeerNetwork { tenure_start_block_id: StacksBlockId([0x00; 32]), current_reward_sets: BTreeMap::new(), - peerdb: peerdb, - atlasdb: atlasdb, + peerdb, + atlasdb, peers: PeerMap::new(), sockets: HashMap::new(), @@ -523,8 +523,8 @@ impl PeerNetwork { p2p_network_handle: 0, http_network_handle: 0, - burnchain: burnchain, - connection_opts: connection_opts, + burnchain, + connection_opts, work_state: PeerNetworkWorkState::GetPublicIP, nakamoto_work_state: PeerNetworkWorkState::GetPublicIP, @@ -555,8 +555,8 @@ impl PeerNetwork { attachments_downloader: None, stacker_db_syncs: Some(stacker_db_sync_map), - stacker_db_configs: stacker_db_configs, - stackerdbs: stackerdbs, + stacker_db_configs, + stackerdbs, prune_outbound_counts: HashMap::new(), prune_inbound_counts: HashMap::new(), @@ -3493,7 +3493,7 @@ impl PeerNetwork { } let microblocks_data = MicroblocksData { index_anchor_block: anchor_block_id.clone(), - microblocks: microblocks, + microblocks, }; debug!( @@ -4041,7 +4041,7 @@ impl PeerNetwork { peer_version: nk.peer_version, network_id: nk.network_id, ts: get_epoch_time_secs(), - pubkey: pubkey, + pubkey, }, ); @@ -5205,7 +5205,7 @@ mod test { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x01, ]), - port: port, + port, }, public_key: Secp256k1PublicKey::from_hex( "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", From 4172711a50ed9ef07fc124651662616fa96bd50b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 11:03:31 -0500 Subject: [PATCH 834/910] feat: gate block-info behavior on chain_id (leave inactive in primary testnet) --- clarity/src/vm/functions/database.rs | 36 +++++++++++-------- .../src/tests/nakamoto_integrations.rs | 4 +++ 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index eecb5e2ba0f..ff14507ead7 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -16,6 +16,7 @@ use std::cmp; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; @@ -769,22 +770,27 @@ pub fn special_get_block_info( _ => return Ok(Value::none()), }; - let height_value = if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity3 { - if env.global_context.epoch_id < StacksEpochId::Epoch30 { - height_value - } else { - // interpretting height_value as a tenure height - let height_opt = env - .global_context - .database - .get_block_height_for_tenure_height(height_value)?; - match height_opt { - Some(x) => x, - None => return Ok(Value::none()), - } - } - } else { + // interpret height as a tenure height IFF + // * clarity version is less than Clarity3 + // * the evaluated epoch is geq 3.0 + // * we are not on (classic) primary testnet + let interpret_height_as_tenure_height = env.contract_context.get_clarity_version() + < &ClarityVersion::Clarity3 + && env.global_context.epoch_id >= StacksEpochId::Epoch30 + && env.global_context.chain_id != CHAIN_ID_TESTNET; + + let height_value = if !interpret_height_as_tenure_height { height_value + } else { + // interpretting height_value as a tenure height + let height_opt = env + .global_context + .database + .get_block_height_for_tenure_height(height_value)?; + match height_opt { + Some(x) => x, + None => return Ok(Value::none()), + } }; let current_block_height = env.global_context.database.get_current_block_height(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 99a65ed4cc6..eec06c12915 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7374,6 +7374,7 @@ fn check_block_times() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -7771,6 +7772,8 @@ fn check_block_info() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + // change the chain id so that it isn't the same as primary testnet + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -8362,6 +8365,7 @@ fn check_block_info_rewards() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); From 60ed0cf0458b56836dd464714f44bf94bdf83bc1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 10:00:50 -0700 Subject: [PATCH 835/910] Make the ports random in the v0 tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 82 +++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 448bd4c6447..da2c4c36630 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -78,7 +78,7 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{self, make_stacks_transfer}; +use crate::tests::{self, gen_random_port, make_stacks_transfer}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -1449,10 +1449,10 @@ fn multiple_miners() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -1735,13 +1735,14 @@ fn miner_forking() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); // partition the signer set so that ~half are listening and using node 1 for RPC and events, @@ -1764,11 +1765,10 @@ fn miner_forking() { Duration::from_secs(first_proposal_burn_block_timing); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -2744,13 +2744,14 @@ fn multiple_miners_mock_sign_epoch_25() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); // partition the signer set so that ~half are listening and using node 1 for RPC and events, @@ -2768,11 +2769,10 @@ fn multiple_miners_mock_sign_epoch_25() { signer_config.node_host = node_host.to_string(); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3469,10 +3469,10 @@ fn multiple_miners_with_nakamoto_blocks() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -3746,14 +3746,14 @@ fn partial_tenure_fork() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; - - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -5193,10 +5193,10 @@ fn multiple_miners_with_custom_chain_id() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); From f17721160a613078db39e4ee09763045d9fd89cf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 10:51:20 -0700 Subject: [PATCH 836/910] CRC: coinbase height always exists even if in pre nakamoto Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 2 +- stackslib/src/net/api/getinfo.rs | 6 +-- stackslib/src/net/api/postmempoolquery.rs | 3 +- stackslib/src/net/api/tests/getinfo.rs | 2 +- stackslib/src/net/p2p.rs | 51 ++++++++++++++----- .../src/tests/nakamoto_integrations.rs | 11 ++-- 6 files changed, 48 insertions(+), 27 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 2cb8155f61a..9885182d982 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -322,7 +322,7 @@ pub(crate) mod tests { stacks_tip_consensus_hash: generate_random_consensus_hash(), unanchored_tip: None, unanchored_seq: Some(0), - tenure_height: None, + tenure_height: thread_rng().next_u64(), exit_at_block_height: None, is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 52f07e937e5..d95b94803a8 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,9 +82,7 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub tenure_height: Option, + pub tenure_height: u64, pub exit_at_block_height: Option, pub is_fully_synced: bool, #[serde(default)] @@ -110,7 +108,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, - coinbase_height: Option, + coinbase_height: u64, ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 8e0c6f459cc..25da52a66de 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -276,8 +276,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, _sortdb, _chainstate, mempool, _rpc_args| { - let coinbase_height = network.stacks_tip.coinbase_height.unwrap_or(0); - + let coinbase_height = network.stacks_tip.coinbase_height; let max_txs = network.connection_opts.mempool_max_tx_query; debug!( "Begin mempool query"; diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 2a0ae5eaf93..173918145ed 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -103,5 +103,5 @@ fn test_try_make_response() { ); let resp = response.decode_peer_info().unwrap(); - assert_eq!(resp.tenure_height, Some(1)); + assert_eq!(resp.tenure_height, 1); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 5f249ae12cd..054fefaf1dc 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -246,7 +246,7 @@ pub struct StacksTipInfo { pub consensus_hash: ConsensusHash, pub block_hash: BlockHeaderHash, pub height: u64, - pub coinbase_height: Option, + pub coinbase_height: u64, pub is_nakamoto: bool, } @@ -256,7 +256,7 @@ impl StacksTipInfo { consensus_hash: ConsensusHash([0u8; 20]), block_hash: BlockHeaderHash([0u8; 32]), height: 0, - coinbase_height: None, + coinbase_height: 0, is_nakamoto: false, } } @@ -4224,21 +4224,35 @@ impl PeerNetwork { net_error::DBError(db_error::NotFoundError) })?; - // TODO: Test this! let parent_stacks_tip_block_hash = parent_tenure_start_header.anchored_header.block_hash(); - let parent_tenure_start_header_cbh = NakamotoChainState::get_coinbase_height( + let parent_stacks_tip_block_id = StacksBlockId::new( + &parent_tenure_start_header.consensus_hash, + &parent_stacks_tip_block_hash, + ); + let parent_coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &StacksBlockId::new( - &parent_tenure_start_header.consensus_hash, - &parent_stacks_tip_block_hash, - ), + &parent_stacks_tip_block_id, )?; + let coinbase_height = match parent_coinbase_height { + Some(cbh) => cbh, + None => { + if parent_tenure_start_header.is_epoch_2_block() { + // The coinbase height is the same as the stacks block height as + // every block contains a coinbase in epoch 2.x + parent_tenure_start_header.stacks_block_height + } else { + debug!("{:?}: get_parent_stacks_tip: No coinbase height found for nakamoto block {parent_stacks_tip_block_id}", self.get_local_peer()); + return Err(net_error::DBError(db_error::NotFoundError)); + } + } + }; + let parent_stacks_tip = StacksTipInfo { consensus_hash: parent_tenure_start_header.consensus_hash, block_hash: parent_stacks_tip_block_hash, height: parent_tenure_start_header.anchored_header.height(), - coinbase_height: parent_tenure_start_header_cbh, + coinbase_height, is_nakamoto: parent_tenure_start_header .anchored_header .as_stacks_nakamoto() @@ -4390,12 +4404,25 @@ impl PeerNetwork { self.stacks_tip.is_nakamoto }; - // TODO: Test this! let stacks_tip_cbh = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), &new_stacks_tip_block_id, )?; + let coinbase_height = match stacks_tip_cbh { + Some(cbh) => cbh, + None => { + if !stacks_tip_is_nakamoto { + // The coinbase height is the same as the stacks block height as + // every block contains a coinbase in epoch 2.x + stacks_tip_height + } else { + debug!("{:?}: No coinbase height found for nakamoto block {new_stacks_tip_block_id}", self.get_local_peer()); + return Err(net_error::DBError(db_error::NotFoundError)); + } + } + }; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed @@ -4434,7 +4461,7 @@ impl PeerNetwork { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), block_hash: FIRST_STACKS_BLOCK_HASH.clone(), height: 0, - coinbase_height: None, + coinbase_height: 0, is_nakamoto: false, } } @@ -4630,7 +4657,7 @@ impl PeerNetwork { consensus_hash: stacks_tip_ch, block_hash: stacks_tip_bhh, height: stacks_tip_height, - coinbase_height: stacks_tip_cbh, + coinbase_height, is_nakamoto: stacks_tip_is_nakamoto, }; self.parent_stacks_tip = parent_stacks_tip; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5fdf79dcaea..37b7a6c3298 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5402,7 +5402,7 @@ fn check_block_heights() { // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) - assert_eq!(info.tenure_height.unwrap(), info.stacks_tip_height); + assert_eq!(info.tenure_height, info.stacks_tip_height); let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; @@ -5538,7 +5538,7 @@ fn check_block_heights() { last_tenure_height = bh1; let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + assert_eq!(info.tenure_height, bh3 as u64); let sbh = heights3 .get("stacks-block-height") @@ -5649,7 +5649,7 @@ fn check_block_heights() { ); let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + assert_eq!(info.tenure_height, bh3 as u64); let sbh = heights3 .get("stacks-block-height") @@ -5692,10 +5692,7 @@ fn check_block_heights() { ); let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!( - info.tenure_height.unwrap(), - block_height_pre_3_0 + tenure_count - ); + assert_eq!(info.tenure_height, block_height_pre_3_0 + tenure_count); coord_channel .lock() From f78c778e6f81ce46e15bbe07cc9bef12cecd5196 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 12:58:18 -0500 Subject: [PATCH 837/910] test: add TestPeer coverage for get-block-info? * add support to TestPeer for changing network_id/chain_id * test get-block-info? behavior in Clarity1 with primary testnet chain id and a different chain id --- stackslib/src/burnchains/tests/mod.rs | 16 +- .../chainstate/nakamoto/coordinator/tests.rs | 538 +++++++++++++++++- .../src/chainstate/nakamoto/tests/node.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 +- stackslib/src/chainstate/stacks/tests/mod.rs | 6 +- stackslib/src/net/mod.rs | 1 + stackslib/src/net/tests/mod.rs | 24 +- 7 files changed, 614 insertions(+), 25 deletions(-) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 31e29c0b26e..e7fa51a89cc 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -123,11 +123,13 @@ pub struct TestMiner { pub nonce: u64, pub spent_at_nonce: HashMap, // how much uSTX this miner paid in a given tx's nonce pub test_with_tx_fees: bool, // set to true to make certain helper methods attach a pre-defined tx fee + pub chain_id: u32, } pub struct TestMinerFactory { pub key_seed: [u8; 32], pub next_miner_id: usize, + pub chain_id: u32, } impl TestMiner { @@ -136,6 +138,7 @@ impl TestMiner { privks: &Vec, num_sigs: u16, hash_mode: &AddressHashMode, + chain_id: u32, ) -> TestMiner { TestMiner { burnchain: burnchain.clone(), @@ -150,6 +153,7 @@ impl TestMiner { nonce: 0, spent_at_nonce: HashMap::new(), test_with_tx_fees: true, + chain_id, } } @@ -314,15 +318,7 @@ impl TestMinerFactory { TestMinerFactory { key_seed: [0u8; 32], next_miner_id: 1, - } - } - - pub fn from_u16(seed: u16) -> TestMinerFactory { - let mut bytes = [0u8; 32]; - (&mut bytes[0..2]).copy_from_slice(&seed.to_be_bytes()); - TestMinerFactory { - key_seed: bytes, - next_miner_id: seed as usize, + chain_id: CHAIN_ID_TESTNET, } } @@ -346,7 +342,7 @@ impl TestMinerFactory { } test_debug!("New miner: {:?} {}:{:?}", &hash_mode, num_sigs, &keys); - let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode); + let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode, self.chain_id); m.id = self.next_miner_id; self.next_miner_id += 1; m diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0a59c1a67bb..23146bb943f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -17,9 +17,12 @@ use std::collections::{HashMap, HashSet}; use std::sync::Mutex; +use clarity::consts::CHAIN_ID_TESTNET; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::PrincipalData; -use clarity::vm::Value; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::clarity_db::NullBurnStateDB; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::{ClarityVersion, Value}; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; @@ -35,6 +38,7 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; +use crate::burnchains::tests::TestMiner; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{ @@ -64,7 +68,7 @@ use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionVersion, + TransactionPayload, TransactionSmartContract, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; @@ -76,6 +80,7 @@ use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +use crate::util_lib::strings::StacksString; impl<'a> NakamotoStagingBlocksConnRef<'a> { pub fn get_blocks_at_height(&self, height: u64) -> Vec { @@ -285,7 +290,7 @@ pub fn make_token_transfer( TokenTransferMemo([0x00; 34]), ), ); - stx_transfer.chain_id = 0x80000000; + stx_transfer.chain_id = chainstate.chain_id; stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; stx_transfer.set_tx_fee(fee); stx_transfer.auth.set_origin_nonce(nonce); @@ -297,6 +302,37 @@ pub fn make_token_transfer( stx_transfer_signed } +/// Make a token-transfer from a private key +pub fn make_contract( + chainstate: &mut StacksChainState, + name: &str, + code: &str, + private_key: &StacksPrivateKey, + version: ClarityVersion, + nonce: u64, + fee: u64, +) -> StacksTransaction { + let mut stx_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(private_key).unwrap(), + TransactionPayload::SmartContract( + TransactionSmartContract { + name: name.into(), + code_body: StacksString::from_str(code).unwrap(), + }, + Some(version), + ), + ); + stx_tx.chain_id = chainstate.chain_id; + stx_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + stx_tx.set_tx_fee(fee); + stx_tx.auth.set_origin_nonce(nonce); + + let mut tx_signer = StacksTransactionSigner::new(&stx_tx); + tx_signer.sign_origin(&private_key).unwrap(); + tx_signer.get_tx().unwrap() +} + /// Given the blocks and block-commits for a reward cycle, replay the sortitions on the given /// TestPeer, always processing the first block of the reward cycle before processing all /// subsequent blocks in random order. @@ -612,6 +648,67 @@ impl<'a> TestPeer<'a> { block } + pub fn mine_tenure(&mut self, block_builder: F) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + &[(NakamotoBlock, u64, ExecutionCost)], + ) -> Vec, + { + let (burn_ops, mut tenure_change, miner_key) = + self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops.clone()); + let pox_constants = self.sortdb().pox_constants.clone(); + let first_burn_height = self.sortdb().first_block_height; + let mut test_signers = self.config.test_signers.clone().unwrap(); + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_naka_signing_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let nakamoto_tip = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + nakamoto_parent_tenure.last().as_ref().unwrap().block_id() + } else { + let tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + tip.index_block_hash() + }; + + let miner_addr = self.miner.origin_address().unwrap(); + let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); + + let tenure_change_tx = self + .miner + .make_nakamoto_tenure_change_with_nonce(tenure_change.clone(), miner_acct.nonce); + + let coinbase_tx = + self.miner + .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + + self.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + block_builder, + |_| true, + ) + } + pub fn single_block_tenure( &mut self, sender_key: &StacksPrivateKey, @@ -764,6 +861,439 @@ fn block_descendant() { ); } +#[test] +fn block_info_primary_testnet() { + block_info_tests(true) +} + +#[test] +fn block_info_other_testnet() { + block_info_tests(false) +} + +fn block_info_tests(use_primary_testnet: bool) { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let chain_id = if use_primary_testnet { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_TESTNET + 1 + }; + let mut boot_plan = + NakamotoBootPlan::new(&format!("{}.{use_primary_testnet}", function_name!())) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key) + .with_network_id(chain_id); + boot_plan.pox_constants = pox_constants; + + // Supply an empty vec to make sure we have no nakamoto blocks when this test begins + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + + let clar1_contract = " + (define-read-only (get-info (height uint)) (get-block-info? id-header-hash height)) + "; + let clar3_contract = " + (define-read-only (get-info (height uint)) (get-stacks-block-info? id-header-hash height)) + "; + + let clar1_contract_name = "clar1"; + let clar3_contract_name = "clar3"; + + let clar1_contract_id = QualifiedContractIdentifier { + issuer: addr.clone().into(), + name: clar1_contract_name.into(), + }; + let clar3_contract_id = QualifiedContractIdentifier { + issuer: addr.clone().into(), + name: clar3_contract_name.into(), + }; + + let get_tip_info = |peer: &mut TestPeer| { + peer.with_db_state(|sortdb, _, _, _| { + let (tip_ch, tip_bh, tip_height) = + SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn()).unwrap(); + let tip_block_id = StacksBlockId::new(&tip_ch, &tip_bh); + Ok((tip_block_id, tip_height)) + }) + .unwrap() + }; + + let get_info = |peer: &mut TestPeer, + version: ClarityVersion, + query_ht: u64, + tip_block_id: &StacksBlockId| { + let contract_id = match version { + ClarityVersion::Clarity1 => &clar1_contract_id, + ClarityVersion::Clarity2 => panic!(), + ClarityVersion::Clarity3 => &clar3_contract_id, + }; + peer.with_db_state(|sortdb, chainstate, _, _| { + let sortdb_handle = sortdb.index_handle_at_tip(); + let output = chainstate + .clarity_eval_read_only( + &sortdb_handle, + &tip_block_id, + contract_id, + &format!("(get-info u{query_ht})"), + ) + .expect_optional() + .unwrap() + .map(|value| StacksBlockId::from_vec(&value.expect_buff(32).unwrap()).unwrap()); + + info!("At stacks block {tip_block_id}, {contract_id} returned {output:?}"); + + Ok(output) + }) + .unwrap() + }; + + let (last_2x_block_id, last_2x_block_ht) = get_tip_info(&mut peer); + + peer.mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 0 { + return vec![]; + } + info!("Producing first nakamoto block, publishing our three contracts"); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_contract( + chainstate, + clar1_contract_name, + clar1_contract, + &private_key, + ClarityVersion::Clarity1, + account.nonce, + 1000, + ); + let tx_1 = make_contract( + chainstate, + clar3_contract_name, + clar3_contract, + &private_key, + ClarityVersion::Clarity3, + account.nonce + 1, + 1000, + ); + + vec![tx_0, tx_1] + }); + + let (tenure_1_start_block_id, tenure_1_block_ht) = get_tip_info(&mut peer); + assert_eq!( + get_info( + &mut peer, + ClarityVersion::Clarity1, + last_2x_block_ht, + &tenure_1_start_block_id + ) + .unwrap(), + last_2x_block_id, + ); + assert_eq!( + get_info( + &mut peer, + ClarityVersion::Clarity3, + last_2x_block_ht, + &tenure_1_start_block_id + ) + .unwrap(), + last_2x_block_id, + ); + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_1_block_ht, + &tenure_1_start_block_id + ) + .is_none()); + assert!(get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_1_block_ht, + &tenure_1_start_block_id + ) + .is_none()); + + let recipient_addr = StacksAddress::p2pkh( + false, + &StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 1, 2])), + ); + + let tenure_2_blocks: Vec<_> = peer + .mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 3 { + return vec![]; + } + info!("Producing block #{} in Tenure #2", blocks_so_far.len()); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![tx_0] + }) + .into_iter() + .map(|(block, ..)| block.header.block_id()) + .collect(); + + let (tenure_2_last_block_id, tenure_2_last_block_ht) = get_tip_info(&mut peer); + + assert_eq!(&tenure_2_last_block_id, tenure_2_blocks.last().unwrap()); + + let c3_tenure1_from_tenure2 = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_1_block_ht, + &tenure_2_blocks[0], + ) + .unwrap(); + let c1_tenure1_from_tenure2 = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_1_block_ht, + &tenure_2_blocks[0], + ) + .unwrap(); + + // note, since tenure_1 only has one block in it, tenure_1_block_ht is *also* the tenure height, so this should return the + // same value regardless of the `primary_tesnet` flag + assert_eq!(c1_tenure1_from_tenure2, c3_tenure1_from_tenure2); + assert_eq!(c1_tenure1_from_tenure2, tenure_1_start_block_id); + + let tenure_2_start_block_ht = tenure_1_block_ht + 1; + let tenure_2_tenure_ht = tenure_1_block_ht + 1; + + // make sure we can't look up block info from the block we're evaluating at + if use_primary_testnet { + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht, + &tenure_2_blocks[0] + ) + .is_none()); + } else { + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[0] + ) + .is_none()); + } + assert!(get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht, + &tenure_2_blocks[0] + ) + .is_none()); + + // but we can from the next block in the tenure + let c1_tenure_2_start_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht, + &tenure_2_blocks[1], + ) + .unwrap() + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[1], + ) + .unwrap() + }; + let c3_tenure_2_start_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht, + &tenure_2_blocks[1], + ) + .unwrap(); + assert_eq!(c1_tenure_2_start_block, c3_tenure_2_start_block); + assert_eq!(&c1_tenure_2_start_block, &tenure_2_blocks[0]); + + // try to query the middle block from the last block in the tenure + let c1_tenure_2_mid_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + }; + let c3_tenure_2_mid_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + .unwrap(); + assert_eq!(&c3_tenure_2_mid_block, &tenure_2_blocks[1]); + if use_primary_testnet { + assert_eq!(c1_tenure_2_mid_block.unwrap(), c3_tenure_2_mid_block); + } else { + // if interpreted as a tenure-height, this will return none, because there's no tenure at height +1 yet + assert!(c1_tenure_2_mid_block.is_none()); + + // query the tenure height again from the latest block for good measure + let start_block_result = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[2], + ) + .unwrap(); + assert_eq!(&start_block_result, &tenure_2_blocks[0]); + } + + let tenure_3_tenure_ht = tenure_2_tenure_ht + 1; + let tenure_3_start_block_ht = + tenure_2_start_block_ht + u64::try_from(tenure_2_blocks.len()).unwrap(); + + let tenure_3_blocks: Vec<_> = peer + .mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 3 { + return vec![]; + } + info!("Producing block #{} in Tenure #3", blocks_so_far.len()); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![tx_0] + }) + .into_iter() + .map(|(block, ..)| block.header.block_id()) + .collect(); + + let (tenure_3_last_block_id, tenure_3_last_block_ht) = get_tip_info(&mut peer); + + assert_eq!(&tenure_3_last_block_id, tenure_3_blocks.last().unwrap()); + assert_eq!(tenure_3_start_block_ht, tenure_2_last_block_ht + 1); + + // query the current tenure information from the middle block + let c1_tenure_3_start_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht, + &tenure_3_blocks[1], + ) + .unwrap() + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_tenure_ht, + &tenure_3_blocks[1], + ) + .unwrap() + }; + let c3_tenure_3_start_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_3_start_block_ht, + &tenure_3_blocks[1], + ) + .unwrap(); + assert_eq!(c1_tenure_3_start_block, c3_tenure_3_start_block); + assert_eq!(&c1_tenure_3_start_block, &tenure_3_blocks[0]); + + // try to query the middle block from the last block in the tenure + let c1_tenure_3_mid_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + }; + let c3_tenure_3_mid_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + .unwrap(); + assert_eq!(&c3_tenure_3_mid_block, &tenure_3_blocks[1]); + if use_primary_testnet { + assert_eq!(c1_tenure_3_mid_block.unwrap(), c3_tenure_3_mid_block); + } else { + // if interpreted as a tenure-height, this will return none, because there's no tenure at height +1 yet + assert!(c1_tenure_3_mid_block.is_none()); + + // query the tenure height again from the latest block for good measure + let start_block_result = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_tenure_ht, + &tenure_3_blocks[2], + ) + .unwrap(); + assert_eq!(&start_block_result, &tenure_3_blocks[0]); + } +} + #[test] // Test PoX Reward and Punish treatment in nakamoto // - create a 12 address PoX reward set diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index e7d6fef03f9..6f929e0031a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -231,7 +231,7 @@ impl TestMiner { Some(vrf_proof), ), ); - tx_coinbase.chain_id = 0x80000000; + tx_coinbase.chain_id = self.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_coinbase.auth.set_origin_nonce(nonce); @@ -258,7 +258,7 @@ impl TestMiner { self.as_transaction_auth().unwrap(), TransactionPayload::TenureChange(tenure_change), ); - tx_tenure_change.chain_id = 0x80000000; + tx_tenure_change.chain_id = self.chain_id; tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_tenure_change.auth.set_origin_nonce(nonce); diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 8562449dd30..fb360211525 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1998,6 +1998,44 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_4_lockup_chain_id( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: &PoxAddress, + lock_period: u128, + signer_key: &StacksPublicKey, + burn_ht: u64, + signature_opt: Option>, + max_amount: u128, + auth_id: u128, + chain_id: u32, + ) -> StacksTransaction { + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let signature = match signature_opt { + Some(sig) => Value::some(Value::buff_from(sig).unwrap()).unwrap(), + None => Value::none(), + }; + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + "pox-4", + "stack-stx", + vec![ + Value::UInt(amount), + addr_tuple, + Value::UInt(burn_ht as u128), + Value::UInt(lock_period), + signature, + Value::buff_from(signer_key.to_bytes_compressed()).unwrap(), + Value::UInt(max_amount), + Value::UInt(auth_id), + ], + ) + .unwrap(); + + make_tx_chain_id(key, nonce, 0, payload, chain_id) + } + pub fn make_pox_2_or_3_lockup( key: &StacksPrivateKey, nonce: u64, @@ -2450,11 +2488,21 @@ pub mod test { nonce: u64, tx_fee: u64, payload: TransactionPayload, + ) -> StacksTransaction { + make_tx_chain_id(key, nonce, tx_fee, payload, CHAIN_ID_TESTNET) + } + + fn make_tx_chain_id( + key: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + payload: TransactionPayload, + chain_id: u32, ) -> StacksTransaction { let auth = TransactionAuth::from_p2pkh(key).unwrap(); let addr = auth.origin().address_testnet(); let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); - tx.chain_id = 0x80000000; + tx.chain_id = chain_id; tx.auth.set_origin_nonce(nonce); tx.set_post_condition_mode(TransactionPostConditionMode::Allow); tx.set_tx_fee(tx_fee); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index cda74cb46d1..8b66c019f0d 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1050,7 +1050,7 @@ pub fn make_coinbase_with_nonce( None, ), ); - tx_coinbase.chain_id = 0x80000000; + tx_coinbase.chain_id = miner.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_coinbase.auth.set_origin_nonce(nonce); @@ -1147,7 +1147,7 @@ pub fn make_contract_call( .unwrap(), ); - tx_contract_call.chain_id = 0x80000000; + tx_contract_call.chain_id = miner.chain_id; tx_contract_call.auth.set_origin_nonce(miner.get_nonce()); if miner.test_with_tx_fees { @@ -1179,7 +1179,7 @@ pub fn make_token_transfer( TransactionPayload::TokenTransfer((*recipient).clone().into(), amount, (*memo).clone()), ); - tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.chain_id = miner.chain_id; tx_stx_transfer .auth .set_origin_nonce(nonce.unwrap_or(miner.get_nonce())); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index a33d1bc4663..f0c50037401 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2379,6 +2379,7 @@ pub mod test { ) -> TestPeer<'a> { let test_path = TestPeer::make_test_path(&config); let mut miner_factory = TestMinerFactory::new(); + miner_factory.chain_id = config.network_id; let mut miner = miner_factory.next_miner(&config.burnchain, 1, 1, AddressHashMode::SerializeP2PKH); // manually set fees diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 07227c930e4..6e61e7e6109 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -48,7 +48,8 @@ use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_pox_4_lockup_chain_id, make_signer_key_signature, + with_sortdb, }; use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, @@ -66,6 +67,7 @@ use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::util_lib::boot::boot_code_id; +use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; /// One step of a simulated Nakamoto node's bootup procedure. #[derive(Debug, PartialEq, Clone)] @@ -91,6 +93,7 @@ pub struct NakamotoBootPlan { pub num_peers: usize, /// Whether to add an initial balance for `private_key`'s account pub add_default_balance: bool, + pub network_id: u32, } impl NakamotoBootPlan { @@ -106,6 +109,7 @@ impl NakamotoBootPlan { observer: Some(TestEventObserver::new()), num_peers: 0, add_default_balance: true, + network_id: TestPeerConfig::default().network_id, } } @@ -114,6 +118,11 @@ impl NakamotoBootPlan { self } + pub fn with_network_id(mut self, network_id: u32) -> Self { + self.network_id = network_id; + self + } + pub fn with_pox_constants(mut self, cycle_length: u32, prepare_length: u32) -> Self { let new_consts = PoxConstants::new( cycle_length, @@ -328,6 +337,7 @@ impl NakamotoBootPlan { observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); + peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -487,16 +497,19 @@ impl NakamotoBootPlan { .clone() .unwrap_or(default_pox_addr.clone()); let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - let signature = make_signer_key_signature( + let signature = make_pox_4_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, reward_cycle.into(), &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - 12_u128, + peer.config.network_id, + 12, max_amount, 1, - ); - make_pox_4_lockup( + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( &test_stacker.stacker_private_key, 0, test_stacker.amount, @@ -507,6 +520,7 @@ impl NakamotoBootPlan { Some(signature), max_amount, 1, + peer.config.network_id, ) }) .collect(); From 708bfe51294e77c55be6071c586905b2ff2da4eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 11:03:36 -0700 Subject: [PATCH 838/910] Fix getinfo_compat test Signed-off-by: Jacinta Ferrant --- stackslib/src/net/api/tests/getinfo.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 173918145ed..89054e3453c 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -63,9 +63,9 @@ fn test_try_parse_request() { #[test] fn test_getinfo_compat() { - let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; - let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; - let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false, "tenure_height": 42}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d", "tenure_height": 42}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 0}"#; let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 2423}"#; // they all parse From d901503496a4c91671f9700a0e4df972beb1aafd Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 14:20:39 -0500 Subject: [PATCH 839/910] add docs to address PR feedback --- clarity/src/vm/database/clarity_db.rs | 8 ++++++++ stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 2ee31a8b41e..50715fd98ff 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -927,6 +927,14 @@ impl<'a> ClarityDatabase<'a> { } } + /// Return the block height for a given tenure height + /// if the block information is queryable for the tenure height. + /// The block information for a given tenure height is queryable iff: + /// * `tenure_height` falls in 2.x, and `tenure_height` < `current_height` + /// * `tenure_height` falls in 3.x, and the first block of the tenure + /// at `tenure_height` has a stacks block height less than `current_height` + /// + /// If the block information isn't queryable, return `Ok(None)` pub fn get_block_height_for_tenure_height( &mut self, tenure_height: u32, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 23146bb943f..23bf3313e99 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -302,7 +302,7 @@ pub fn make_token_transfer( stx_transfer_signed } -/// Make a token-transfer from a private key +/// Make contract publish pub fn make_contract( chainstate: &mut StacksChainState, name: &str, diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index e26f9be6ba3..44eeaa2e075 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,6 +47,9 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; + /// Return the StacksBlockId of the tenure start block for the + /// tenure with coinbase height `coinbase_height` in the fork + /// referenced by `tip`. fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, From bb7e4eb779287b472ce082d5ddc395f9f1a21784 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:21:15 -0400 Subject: [PATCH 840/910] chore: add `backoff` to dispatcher warning logs This could be helpful to debug issues communicating with API. --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 43714f3573d..a91b95cd1f0 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -455,7 +455,8 @@ impl EventObserver { Err(err) => { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err + &host, &port, err; + "backoff" => backoff ); } } From 2207ac4974b65bd4f78f99c4fe8c34592f31ae0a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:40:43 -0400 Subject: [PATCH 841/910] feat: cap the backoff for sending events to timeout * 3 --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index a91b95cd1f0..448d71eb3c1 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -429,6 +429,7 @@ impl EventObserver { .unwrap_or(PeerHost::DNS(host.to_string(), port)); let mut backoff = Duration::from_millis(100); + let max_backoff = timeout.saturating_mul(3); loop { let mut request = StacksHttpRequest::new_for_peer( peerhost.clone(), @@ -472,7 +473,7 @@ impl EventObserver { } sleep(backoff); - backoff *= 2; + backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); } } From 1c595adc0c7fb9b33c7fb06cf9b6acf75ab499d9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:57:29 -0400 Subject: [PATCH 842/910] feat: various improvements to retry logic in event dispatcher - Create the request outside the loop - Cap the backoff timeout at 3x timeout - Print the retry attempt and backoff time in the log - Add a jitter to the backoff time --- testnet/stacks-node/src/event_dispatcher.rs | 31 +++++++++++++-------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 448d71eb3c1..96eca2247a5 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,7 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; @@ -429,17 +430,19 @@ impl EventObserver { .unwrap_or(PeerHost::DNS(host.to_string(), port)); let mut backoff = Duration::from_millis(100); + let mut attempts: i32 = 0; + // Cap the backoff at 3x the timeout let max_backoff = timeout.saturating_mul(3); - loop { - let mut request = StacksHttpRequest::new_for_peer( - peerhost.clone(), - "POST".into(), - url.path().into(), - HttpRequestContents::new().payload_json(payload.clone()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + loop { match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { @@ -457,7 +460,8 @@ impl EventObserver { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", &host, &port, err; - "backoff" => backoff + "backoff" => backoff, + "attempts" => attempts ); } } @@ -473,7 +477,12 @@ impl EventObserver { } sleep(backoff); - backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); + let jitter: u64 = rand::thread_rng().gen_range(0..100); + backoff = std::cmp::min( + backoff.saturating_mul(2) + Duration::from_millis(jitter), + max_backoff, + ); + attempts = attempts.saturating_add(1); } } From 7d995db34b6215d1d7e48569493a17f910209111 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 17:36:45 -0400 Subject: [PATCH 843/910] chore: fix log --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 96eca2247a5..72f6929ac23 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -460,7 +460,7 @@ impl EventObserver { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", &host, &port, err; - "backoff" => backoff, + "backoff" => ?backoff, "attempts" => attempts ); } From 334cf0f970b2a03903f0cef03c21884a9e204ddd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 17:53:25 -0400 Subject: [PATCH 844/910] chore: fix missing `clone` --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 72f6929ac23..64c2b5ce904 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -443,7 +443,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); loop { - match send_http_request(host, port, request, timeout) { + match send_http_request(host, port, request.clone(), timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( From 607e3f115f811fb5d012ad03f19f6980f9ca8bea Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 10:32:03 +0200 Subject: [PATCH 845/910] feat: add `tenure_height` to `/new_block` event payload --- stackslib/src/chainstate/coordinator/mod.rs | 1 + stackslib/src/chainstate/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 2 ++ stackslib/src/chainstate/stacks/db/blocks.rs | 11 ++++++++++- stackslib/src/chainstate/stacks/db/mod.rs | 1 + stackslib/src/cost_estimates/tests/common.rs | 1 + stackslib/src/net/mod.rs | 1 + testnet/stacks-node/src/event_dispatcher.rs | 10 ++++++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + 9 files changed, 28 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index c8415e3f69e..5b7c7e89b61 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -180,6 +180,7 @@ pub trait BlockEventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ); /// called whenever a burn block is about to be diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index be5f862839d..d566113fad5 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -431,6 +431,7 @@ impl BlockEventDispatcher for NullEventDispatcher { _reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { assert!( false, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8abbe058f53..b8d0441591a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2118,6 +2118,7 @@ impl NakamotoChainState { &reward_set_data, &Some(signer_bitvec), Some(block_timestamp), + receipt.coinbase_height, ); } @@ -4382,6 +4383,7 @@ impl NakamotoChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated, + coinbase_height, }; Ok((epoch_receipt, clarity_commit, reward_set_data)) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index a45a8d60cb4..115678ada81 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -191,6 +191,7 @@ impl BlockEventDispatcher for DummyEventDispatcher { _reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { assert!( false, @@ -5809,8 +5810,10 @@ impl StacksChainState { .map(|(_, _, _, info)| info.clone()); if do_not_advance { + let regtest_genesis_header = StacksHeaderInfo::regtest_genesis(); + let coinbase_height = regtest_genesis_header.stacks_block_height; let epoch_receipt = StacksEpochReceipt { - header: StacksHeaderInfo::regtest_genesis(), + header: regtest_genesis_header, tx_receipts, matured_rewards, matured_rewards_info, @@ -5822,6 +5825,7 @@ impl StacksChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated: false, + coinbase_height, }; return Ok((epoch_receipt, clarity_commit, None)); @@ -5898,6 +5902,9 @@ impl StacksChainState { ); set_last_execution_cost_observed(&block_execution_cost, &block_limit); + // // The coinbase height is the same as the stacks block height in epoch 2.x + let coinbase_height = new_tip.stacks_block_height; + let epoch_receipt = StacksEpochReceipt { header: new_tip, tx_receipts, @@ -5911,6 +5918,7 @@ impl StacksChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated, + coinbase_height, }; Ok((epoch_receipt, clarity_commit, reward_set_data)) @@ -6411,6 +6419,7 @@ impl StacksChainState { &reward_set_data, &None, None, + next_staging_block.height, ); } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 857bfaead42..160e2dc60e5 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -230,6 +230,7 @@ pub struct StacksEpochReceipt { pub epoch_transition: bool, /// Was .signers updated during this block? pub signers_updated: bool, + pub coinbase_height: u64, } /// Headers we serve over the network diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index fe6527ff53e..01f6c32ec75 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -52,5 +52,6 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE evaluated_epoch: StacksEpochId::Epoch20, epoch_transition: false, signers_updated: false, + coinbase_height: 1234, } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index f0c50037401..2210160bee5 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2037,6 +2037,7 @@ pub mod test { reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { self.blocks.lock().unwrap().push(TestEventObserverBlock { block: block.clone(), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 43714f3573d..e9fb79db882 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -748,6 +748,7 @@ impl EventObserver { reward_set_data: &Option, signer_bitvec_opt: &Option>, block_timestamp: Option, + coinbase_height: u64, ) -> serde_json::Value { // Serialize events to JSON let serialized_events: Vec = filtered_events @@ -809,6 +810,7 @@ impl EventObserver { "signer_bitvec": signer_bitvec_value, "reward_set": reward_set_value, "cycle_number": cycle_number_value, + "tenure_height": coinbase_height, }); let as_object_mut = payload.as_object_mut().unwrap(); @@ -1008,6 +1010,7 @@ impl BlockEventDispatcher for EventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ) { self.process_chain_tip( block, @@ -1026,6 +1029,7 @@ impl BlockEventDispatcher for EventDispatcher { reward_set_data, signer_bitvec, block_timestamp, + coinbase_height, ); } @@ -1209,6 +1213,7 @@ impl EventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ) { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); @@ -1261,6 +1266,7 @@ impl EventDispatcher { reward_set_data, signer_bitvec, block_timestamp, + coinbase_height, ); // Send payload @@ -1669,6 +1675,7 @@ mod test { let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); + let coinbase_height = 1234; let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1687,6 +1694,7 @@ mod test { &None, &Some(signer_bitvec.clone()), block_timestamp, + coinbase_height, ); assert_eq!( payload @@ -1737,6 +1745,7 @@ mod test { let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); + let coinbase_height = 1234; let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1755,6 +1764,7 @@ mod test { &None, &Some(signer_bitvec.clone()), block_timestamp, + coinbase_height, ); let event_signer_signature = payload diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 2be02659cd0..b2b9aa3f752 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -198,5 +198,6 @@ pub fn announce_boot_receipts( &None, &None, None, + 0, ); } From 2b36268ef97ce1308c8a796f788efc266a927cae Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 18 Oct 2024 09:31:51 -0400 Subject: [PATCH 846/910] chore: revert change moving request creation out of loop This avoids a clone in the happy case (where no retry is needed). --- testnet/stacks-node/src/event_dispatcher.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 64c2b5ce904..8142bc21665 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -434,16 +434,16 @@ impl EventObserver { // Cap the backoff at 3x the timeout let max_backoff = timeout.saturating_mul(3); - let mut request = StacksHttpRequest::new_for_peer( - peerhost.clone(), - "POST".into(), - url.path().into(), - HttpRequestContents::new().payload_json(payload.clone()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); loop { - match send_http_request(host, port, request.clone(), timeout) { + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( From cbb9456072ddbe2e8df441d741c8d2d056922e40 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 16:33:28 +0200 Subject: [PATCH 847/910] test: verify tenure_heights in new block events --- .../src/tests/nakamoto_integrations.rs | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e13a2ae27d1..bc03b26784f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4866,6 +4866,45 @@ fn burn_ops_integration_test() { "Stack-stx tx without a signer_key shouldn't have been submitted" ); assert!(transfer_stx_found, "Expected transfer STX op"); + + let mut last_tenture_height = 0; + for block in blocks.iter() { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx != "0x00" { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + } + } + } + // if `signer_bitvec` is set on a block, then it's a nakamoto block + let is_nakamoto_block = block.get("signer_bitvec").is_some(); + + let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); + let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + + if is_nakamoto_block { + if block_has_tenure_change { + // tenure change block should have tenure height 1 more than the last tenure height + assert_eq!(last_tenture_height + 1, tenure_height); + last_tenture_height = tenure_height; + } else { + // tenure extend block should have the same tenure height as the last tenure height + assert_eq!(last_tenture_height, tenure_height); + } + last_tenture_height = block.get("block_height").unwrap().as_u64().unwrap(); + } else { + // epoch2.x block tenure height is the same as the block height + assert_eq!(tenure_height, block_height); + last_tenture_height = block_height; + } + } + assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); From 23057d9aa876256f6c351be4c404e93cb33a5abb Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 18:17:12 +0200 Subject: [PATCH 848/910] chore: fix test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index bc03b26784f..dfbedc24781 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4879,6 +4879,7 @@ fn burn_ops_integration_test() { StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { block_has_tenure_change = true; + continue; } } } @@ -4897,7 +4898,6 @@ fn burn_ops_integration_test() { // tenure extend block should have the same tenure height as the last tenure height assert_eq!(last_tenture_height, tenure_height); } - last_tenture_height = block.get("block_height").unwrap().as_u64().unwrap(); } else { // epoch2.x block tenure height is the same as the block height assert_eq!(tenure_height, block_height); From e27254033af4e613a0f15ba21be1d1b28ef8aa33 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 19:04:34 +0200 Subject: [PATCH 849/910] chore: move test --- .../src/tests/nakamoto_integrations.rs | 77 ++++++++++--------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index dfbedc24781..e2d6e0199a7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4867,44 +4867,6 @@ fn burn_ops_integration_test() { ); assert!(transfer_stx_found, "Expected transfer STX op"); - let mut last_tenture_height = 0; - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - let mut block_has_tenure_change = false; - for tx in transactions.iter().rev() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx != "0x00" { - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); - if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { - block_has_tenure_change = true; - continue; - } - } - } - // if `signer_bitvec` is set on a block, then it's a nakamoto block - let is_nakamoto_block = block.get("signer_bitvec").is_some(); - - let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); - let block_height = block.get("block_height").unwrap().as_u64().unwrap(); - - if is_nakamoto_block { - if block_has_tenure_change { - // tenure change block should have tenure height 1 more than the last tenure height - assert_eq!(last_tenture_height + 1, tenure_height); - last_tenture_height = tenure_height; - } else { - // tenure extend block should have the same tenure height as the last tenure height - assert_eq!(last_tenture_height, tenure_height); - } - } else { - // epoch2.x block tenure height is the same as the block height - assert_eq!(tenure_height, block_height); - last_tenture_height = block_height; - } - } - assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); @@ -8366,6 +8328,45 @@ fn check_block_info() { "Contract 3 should be able to fetch the StacksBlockId of the tip" ); + let blocks = test_observer::get_blocks(); + let mut last_tenture_height = 0; + for block in blocks.iter() { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx != "0x00" { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + continue; + } + } + } + // if `signer_bitvec` is set on a block, then it's a nakamoto block + let is_nakamoto_block = block.get("signer_bitvec").is_some(); + + let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); + let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + + if is_nakamoto_block { + if block_has_tenure_change { + // tenure change block should have tenure height 1 more than the last tenure height + assert_eq!(last_tenture_height + 1, tenure_height); + last_tenture_height = tenure_height; + } else { + // tenure extend block should have the same tenure height as the last tenure height + assert_eq!(last_tenture_height, tenure_height); + } + } else { + // epoch2.x block tenure height is the same as the block height + assert_eq!(tenure_height, block_height); + last_tenture_height = block_height; + } + } + coord_channel .lock() .expect("Mutex poisoned") From 858856851258241218d237c1ba68b017057ec77a Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 22:22:08 +0200 Subject: [PATCH 850/910] chore: fix test --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e2d6e0199a7..90334cce9b4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8328,7 +8328,9 @@ fn check_block_info() { "Contract 3 should be able to fetch the StacksBlockId of the tip" ); - let blocks = test_observer::get_blocks(); + let mut blocks = test_observer::get_blocks(); + blocks.sort_by_key(|block| block["block_height"].as_u64().unwrap()); + let mut last_tenture_height = 0; for block in blocks.iter() { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -8346,11 +8348,15 @@ fn check_block_info() { } } // if `signer_bitvec` is set on a block, then it's a nakamoto block - let is_nakamoto_block = block.get("signer_bitvec").is_some(); - + let is_nakamoto_block = block.get("signer_bitvec").map_or(false, |v| !v.is_null()); let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + if block_height == 0 { + // genesis block + continue; + } + if is_nakamoto_block { if block_has_tenure_change { // tenure change block should have tenure height 1 more than the last tenure height From 540fcd4481d365952ae3904b8d5aa78b76536992 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:13:33 -0700 Subject: [PATCH 851/910] chore: convert BlockAccepted to a struct --- libsigner/src/v0/messages.rs | 107 +++++++++++++++--- stacks-signer/src/v0/signer.rs | 32 +++--- .../src/nakamoto_node/sign_coordinator.rs | 13 ++- testnet/stacks-node/src/tests/signer/mod.rs | 11 +- testnet/stacks-node/src/tests/signer/v0.rs | 41 +++---- 5 files changed, 142 insertions(+), 62 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 47d317992d6..102da15a1de 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -603,7 +603,7 @@ impl From<&BlockResponse> for BlockResponseTypePrefix { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BlockResponse { /// The Nakamoto block was accepted and therefore signed - Accepted((Sha512Trunc256Sum, MessageSignature)), + Accepted(BlockAccepted), /// The Nakamoto block was rejected and therefore not signed Rejected(BlockRejection), } @@ -616,7 +616,7 @@ impl std::fmt::Display for BlockResponse { write!( f, "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 + a.signer_signature_hash, a.signature ) } BlockResponse::Rejected(r) => { @@ -633,7 +633,10 @@ impl std::fmt::Display for BlockResponse { impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { - Self::Accepted((hash, sig)) + Self::Accepted(BlockAccepted { + signer_signature_hash: hash, + signature: sig, + }) } /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code and sign it with the provided private key @@ -651,9 +654,8 @@ impl StacksMessageCodec for BlockResponse { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; match self { - BlockResponse::Accepted((hash, sig)) => { - write_next(fd, hash)?; - write_next(fd, sig)?; + BlockResponse::Accepted(accepted) => { + write_next(fd, accepted)?; } BlockResponse::Rejected(rejection) => { write_next(fd, rejection)?; @@ -667,9 +669,8 @@ impl StacksMessageCodec for BlockResponse { let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; let response = match type_prefix { BlockResponseTypePrefix::Accepted => { - let hash = read_next::(fd)?; - let sig = read_next::(fd)?; - BlockResponse::Accepted((hash, sig)) + let accepted = read_next::(fd)?; + BlockResponse::Accepted(accepted) } BlockResponseTypePrefix::Rejected => { let rejection = read_next::(fd)?; @@ -680,6 +681,32 @@ impl StacksMessageCodec for BlockResponse { } } +/// A rejection response from a signer for a proposed block +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockAccepted { + /// The signer signature hash of the block that was accepted + pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the acceptance + pub signature: MessageSignature, +} + +impl StacksMessageCodec for BlockAccepted { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.signature)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next::(fd)?; + let signature = read_next::(fd)?; + Ok(Self { + signer_signature_hash, + signature, + }) + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -894,7 +921,7 @@ mod test { use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use clarity::types::PrivateKey; - use clarity::util::hash::MerkleTree; + use clarity::util::hash::{hex_bytes, MerkleTree}; use clarity::util::secp256k1::MessageSignature; use rand::rngs::mock; use rand::{thread_rng, Rng, RngCore}; @@ -958,8 +985,11 @@ mod test { #[test] fn serde_block_response() { - let response = - BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), MessageSignature::empty())); + let accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), + signature: MessageSignature::empty(), + }; + let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) .expect("Failed to deserialize BlockResponse"); @@ -979,10 +1009,11 @@ mod test { #[test] fn serde_signer_message() { - let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( - Sha512Trunc256Sum([2u8; 32]), - MessageSignature::empty(), - ))); + let accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), + signature: MessageSignature::empty(), + }; + let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); let deserialized_signer_message = read_next::(&mut &serialized_signer_message[..]) @@ -1122,4 +1153,48 @@ mod test { .expect("Failed to deserialize MockSignData"); assert_eq!(mock_block, deserialized_data); } + + #[test] + fn test_backwards_compatibility() { + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3"; + let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8"; + let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_rejected = read_next::(&mut &block_rejected_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + let block_accepted = read_next::(&mut &block_accepted_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + + assert_eq!( + block_rejected, + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), + signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + chain_id: CHAIN_ID_TESTNET, + signature: MessageSignature([ + 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, + 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, + 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, + 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, + ]), + })) + ); + + assert_eq!( + block_accepted, + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([ + 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, + 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 + ]), + signature: MessageSignature([ + 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, + 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, + 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, + 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 + ]), + })) + ); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 5d068fad0fe..fe626cb11f1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -25,14 +25,13 @@ use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, - SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, + RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; @@ -494,8 +493,8 @@ impl Signer { block_response: &BlockResponse, ) { match block_response { - BlockResponse::Accepted((block_hash, signature)) => { - self.handle_block_signature(stacks_client, block_hash, signature); + BlockResponse::Accepted(accepted) => { + self.handle_block_signature(stacks_client, accepted); } BlockResponse::Rejected(block_rejection) => { self.handle_block_rejection(block_rejection); @@ -547,13 +546,13 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + let accepted = BlockAccepted { + signer_signature_hash: block_info.signer_signature_hash(), + signature, + }; // have to save the signature _after_ the block info - self.handle_block_signature( - stacks_client, - &block_info.signer_signature_hash(), - &signature, - ); - Some(BlockResponse::accepted(signer_signature_hash, signature)) + self.handle_block_signature(stacks_client, &accepted); + Some(BlockResponse::Accepted(accepted)) } /// Handle the block validate reject response. Returns our block response if we have one @@ -739,12 +738,11 @@ impl Signer { } /// Handle an observed signature from another signer - fn handle_block_signature( - &mut self, - stacks_client: &StacksClient, - block_hash: &Sha512Trunc256Sum, - signature: &MessageSignature, - ) { + fn handle_block_signature(&mut self, stacks_client: &StacksClient, accepted: &BlockAccepted) { + let BlockAccepted { + signer_signature_hash: block_hash, + signature, + } = accepted; debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); // Have we already processed this block? diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 2694d1d9ca8..1954f6eb125 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -20,7 +20,9 @@ use std::sync::Arc; use std::time::Duration; use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::v0::messages::{ + BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, +}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -450,10 +452,11 @@ impl SignCoordinator { } match message { - SignerMessageV0::BlockResponse(BlockResponse::Accepted(( - response_hash, - signature, - ))) => { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: response_hash, + signature, + } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { warn!( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 2e67234285a..5fb318e234c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockResponse, SignerMessage}; +use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -578,10 +578,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: hash, + signature, + } = accepted; if hash == *signer_signature_hash && expected_signers.iter().any(|pk| { pk.verify(hash.bits(), &signature) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 753883bead3..6fc4c7078a3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3411,7 +3411,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {message:?}"); + info!("Message(accepted): {:?}", &m); Some(m) } _ => { @@ -3425,20 +3425,23 @@ fn duplicate_signers() { info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); // Pick a message hash - let (selected_sighash, _) = signer_accepted_responses + let accepted = signer_accepted_responses .iter() - .min_by_key(|(sighash, _)| *sighash) - .copied() + .min_by_key(|accepted| accepted.signer_signature_hash) .expect("No `BlockResponse::Accepted` messages recieved"); + let selected_sighash = accepted.signer_signature_hash; // Filter only resonses for selected block and collect unique pubkeys and signatures let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses .into_iter() - .filter(|(hash, _)| *hash == selected_sighash) - .map(|(msg, sig)| { - let pubkey = Secp256k1PublicKey::recover_to_pubkey(msg.bits(), &sig) - .expect("Failed to recover pubkey"); - (pubkey, sig) + .filter(|accepted| accepted.signer_signature_hash == selected_sighash) + .map(|accepted| { + let pubkey = Secp256k1PublicKey::recover_to_pubkey( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to recover pubkey"); + (pubkey, accepted.signature) }) .unzip(); @@ -4652,10 +4655,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - ignoring_signers - .iter() - .find(|key| key.verify(hash.bits(), &signature).is_ok()) + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) } _ => None, } @@ -4896,12 +4900,11 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(( - hash, - signature, - ))) => { - if block.header.signer_signature_hash() == hash { - Some(signature) + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() + == accepted.signer_signature_hash + { + Some(accepted.signature) } else { None } From 5f4c42fc5a28bb8f19a2a212f6587d2f1c64b9bb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:54:27 -0700 Subject: [PATCH 852/910] feat: add signer message metadata to block responses --- libsigner/src/libsigner.rs | 10 ++ libsigner/src/v0/messages.rs | 148 +++++++++++++++++- stacks-signer/src/cli.rs | 3 +- stacks-signer/src/client/stackerdb.rs | 5 +- stacks-signer/src/lib.rs | 12 +- stacks-signer/src/main.rs | 3 +- stacks-signer/src/monitoring/server.rs | 2 +- stacks-signer/src/v0/signer.rs | 11 +- .../src/nakamoto_node/sign_coordinator.rs | 7 +- testnet/stacks-node/src/tests/signer/mod.rs | 17 +- testnet/stacks-node/src/tests/signer/v0.rs | 4 +- 11 files changed, 183 insertions(+), 39 deletions(-) diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 878d428bfc3..b1b760af6dc 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -49,8 +49,10 @@ use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; +use blockstack_lib::version_string; use clarity::codec::StacksMessageCodec; use clarity::vm::types::QualifiedContractIdentifier; +use lazy_static::lazy_static; pub use crate::error::{EventError, RPCError}; pub use crate::events::{ @@ -74,3 +76,11 @@ pub trait SignerMessage: StacksMessageCodec { /// The contract identifier for the message slot in stacker db fn msg_id(&self) -> Option; } + +lazy_static! { + /// The version string for the signer + pub static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + version_string("stacks-signer", pkg_version) + }; +} diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 102da15a1de..264a4cc1074 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -70,7 +70,7 @@ use crate::http::{decode_http_body, decode_http_request}; use crate::stacks_common::types::PublicKey; use crate::{ BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, - SignerMessage as SignerMessageTrait, + SignerMessage as SignerMessageTrait, VERSION_STRING, }; define_u8_enum!( @@ -615,15 +615,15 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Accepted(a) => { write!( f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.signer_signature_hash, a.signature + "BlockAccepted: signer_sighash = {}, signature = {}, version = {}", + a.signer_signature_hash, a.signature, a.metadata.server_version ) } BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}", - r.reason_code, r.reason, r.signer_signature_hash, r.signature + "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}, version = {}", + r.reason_code, r.reason, r.signer_signature_hash, r.signature, r.metadata.server_version ) } } @@ -636,6 +636,7 @@ impl BlockResponse { Self::Accepted(BlockAccepted { signer_signature_hash: hash, signature: sig, + metadata: SignerMessageMetadata::default(), }) } @@ -681,6 +682,57 @@ impl StacksMessageCodec for BlockResponse { } } +/// Metadata for signer messages +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SignerMessageMetadata { + /// The signer's server version + pub server_version: String, +} + +/// To ensure backwards compatibility, when deserializing, +/// if no bytes are found, return empty metadata +impl StacksMessageCodec for SignerMessageMetadata { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.server_version.as_bytes().to_vec())?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + match read_next::, _>(fd) { + Ok(server_version) => { + let server_version = String::from_utf8(server_version).map_err(|e| { + CodecError::DeserializeError(format!( + "Failed to decode server version: {:?}", + &e + )) + })?; + Ok(Self { server_version }) + } + Err(_) => { + // For backwards compatibility, return empty metadata + Ok(Self::empty()) + } + } + } +} + +impl Default for SignerMessageMetadata { + fn default() -> Self { + Self { + server_version: VERSION_STRING.to_string(), + } + } +} + +impl SignerMessageMetadata { + /// Empty metadata + pub fn empty() -> Self { + Self { + server_version: String::new(), + } + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAccepted { @@ -688,25 +740,41 @@ pub struct BlockAccepted { pub signer_signature_hash: Sha512Trunc256Sum, /// The signer's signature across the acceptance pub signature: MessageSignature, + /// Signer message metadata + pub metadata: SignerMessageMetadata, } impl StacksMessageCodec for BlockAccepted { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; Ok(Self { signer_signature_hash, signature, + metadata, }) } } +impl BlockAccepted { + /// Create a new BlockAccepted for the provided block signer signature hash and signature + pub fn new(signer_signature_hash: Sha512Trunc256Sum, signature: MessageSignature) -> Self { + Self { + signer_signature_hash, + signature, + metadata: SignerMessageMetadata::default(), + } + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -720,6 +788,8 @@ pub struct BlockRejection { pub signature: MessageSignature, /// The chain id pub chain_id: u32, + /// Signer message metadata + pub metadata: SignerMessageMetadata, } impl BlockRejection { @@ -741,6 +811,7 @@ impl BlockRejection { signer_signature_hash, signature: MessageSignature::empty(), chain_id, + metadata: SignerMessageMetadata::default(), }; rejection .sign(private_key) @@ -765,6 +836,7 @@ impl BlockRejection { signer_signature_hash: reject.signer_signature_hash, chain_id, signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; rejection .sign(private_key) @@ -814,6 +886,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; Ok(()) } @@ -826,12 +899,14 @@ impl StacksMessageCodec for BlockRejection { let signer_signature_hash = read_next::(fd)?; let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; Ok(Self { reason, reason_code, signer_signature_hash, chain_id, signature, + metadata, }) } } @@ -988,6 +1063,7 @@ mod test { let accepted = BlockAccepted { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1012,6 +1088,7 @@ mod test { let accepted = BlockAccepted { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1178,6 +1255,55 @@ mod test { 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, ]), + metadata: SignerMessageMetadata::empty(), + })) + ); + + assert_eq!( + block_accepted, + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([ + 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, + 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 + ]), + metadata: SignerMessageMetadata::empty(), + signature: MessageSignature([ + 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, + 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, + 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, + 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 + ]), + })) + ); + } + + #[test] + fn test_block_response_metadata() { + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; + let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; + let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_rejected = read_next::(&mut &block_rejected_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + let block_accepted = read_next::(&mut &block_accepted_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + + assert_eq!( + block_rejected, + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), + signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + chain_id: CHAIN_ID_TESTNET, + signature: MessageSignature([ + 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, + 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, + 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, + 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, + ]), + metadata: SignerMessageMetadata { + server_version: "Hello world".to_string(), + }, })) ); @@ -1188,6 +1314,9 @@ mod test { 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 ]), + metadata: SignerMessageMetadata { + server_version: "Hello world".to_string(), + }, signature: MessageSignature([ 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, @@ -1197,4 +1326,13 @@ mod test { })) ); } + + #[test] + fn test_empty_metadata() { + let serialized_metadata = [0u8; 0]; + let deserialized_metadata = + read_next::(&mut &serialized_metadata[..]) + .expect("Failed to deserialize SignerMessageMetadata"); + assert_eq!(deserialized_metadata, SignerMessageMetadata::empty()); + } } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 97829b69777..4e9067498d0 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,6 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; +use libsigner::VERSION_STRING; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -38,8 +39,6 @@ use stacks_common::address::{ use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::VERSION_STRING; - extern crate alloc; #[derive(Parser, Debug)] diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 0fc43350db9..117dd4814fc 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -235,7 +235,9 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; - use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; + use libsigner::v0::messages::{ + BlockRejection, BlockResponse, RejectCode, SignerMessage, SignerMessageMetadata, + }; use rand::{thread_rng, RngCore}; use super::*; @@ -283,6 +285,7 @@ mod tests { signer_signature_hash: block.header.signer_signature_hash(), chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::empty(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 3555435eaa7..246015bfb7a 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -48,11 +48,9 @@ mod tests; use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; -use blockstack_lib::version_string; use chainstate::SortitionsView; use config::GlobalConfig; -use lazy_static::lazy_static; -use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; +use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait, VERSION_STRING}; use runloop::SignerResult; use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; @@ -61,14 +59,6 @@ use crate::client::StacksClient; use crate::config::SignerConfig; use crate::runloop::RunLoop; -lazy_static! { - /// The version string for the signer - pub static ref VERSION_STRING: String = { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - version_string("stacks-signer", pkg_version) - }; -} - /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 56f322b1853..a23918f6f80 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -32,7 +32,7 @@ use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_ke use clap::Parser; use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; -use libsigner::SignerSession; +use libsigner::{SignerSession, VERSION_STRING}; use libstackerdb::StackerDBChunkData; use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; @@ -47,7 +47,6 @@ use stacks_signer::config::GlobalConfig; use stacks_signer::monitor_signers::SignerMonitor; use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; -use stacks_signer::VERSION_STRING; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index f5e3cceef15..15267c44ee7 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -19,6 +19,7 @@ use std::time::Instant; use clarity::util::hash::to_hex; use clarity::util::secp256k1::Secp256k1PublicKey; +use libsigner::VERSION_STRING; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use tiny_http::{Response as HttpResponse, Server as HttpServer}; @@ -28,7 +29,6 @@ use crate::client::{ClientError, StacksClient}; use crate::config::{GlobalConfig, Network}; use crate::monitoring::prometheus::gather_metrics_string; use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; -use crate::VERSION_STRING; #[derive(thiserror::Error, Debug)] /// Monitoring server errors diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fe626cb11f1..2cb10a98178 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -546,10 +546,7 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let accepted = BlockAccepted { - signer_signature_hash: block_info.signer_signature_hash(), - signature, - }; + let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); Some(BlockResponse::Accepted(accepted)) @@ -742,8 +739,12 @@ impl Signer { let BlockAccepted { signer_signature_hash: block_hash, signature, + metadata, } = accepted; - debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + debug!( + "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", + metadata.server_version + ); // Have we already processed this block? match self diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 1954f6eb125..697dddeb034 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -456,6 +456,7 @@ impl SignCoordinator { let BlockAccepted { signer_signature_hash: response_hash, signature, + metadata, } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { @@ -466,7 +467,8 @@ impl SignCoordinator { "response_hash" => %response_hash, "slot_id" => slot_id, "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash + "response_hash" => %response_hash, + "server_version" => %metadata.server_version ); continue; } @@ -514,7 +516,8 @@ impl SignCoordinator { "signer_weight" => signer_entry.weight, "total_weight_signed" => total_weight_signed, "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() + "stacks_block_id" => %block.header.block_id(), + "server_version" => metadata.server_version, ); gathered_signatures.insert(slot_id, signature); responded_signers.insert(signer_pubkey); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5fb318e234c..42b894398df 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage}; +use libsigner::v0::messages::{BlockResponse, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -579,17 +579,16 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - let BlockAccepted { - signer_signature_hash: hash, - signature, - } = accepted; - if hash == *signer_signature_hash + if accepted.signer_signature_hash == *signer_signature_hash && expected_signers.iter().any(|pk| { - pk.verify(hash.bits(), &signature) - .expect("Failed to verify signature") + pk.verify( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to verify signature") }) { - Some(signature) + Some(accepted.signature) } else { None } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6fc4c7078a3..e02d5b62ca6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -25,7 +25,7 @@ use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; -use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -2567,10 +2567,12 @@ fn empty_sortition() { }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code, + metadata, .. })) = latest_msg { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert_eq!(metadata.server_version, VERSION_STRING.to_string()); found_rejections.push(*slot_id); } else { info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); From a1208c89e1e8101944af7f1d39f5dd92f302662a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:56:27 -0700 Subject: [PATCH 853/910] feat: add metadata to mock signatures --- libsigner/src/v0/messages.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 264a4cc1074..2436421fa88 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -439,6 +439,8 @@ pub struct MockSignature { signature: MessageSignature, /// The mock block proposal that was signed across pub mock_proposal: MockProposal, + /// The signature metadata + pub metadata: SignerMessageMetadata, } impl MockSignature { @@ -447,6 +449,7 @@ impl MockSignature { let mut sig = Self { signature: MessageSignature::empty(), mock_proposal, + metadata: SignerMessageMetadata::default(), }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -476,15 +479,18 @@ impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; self.mock_proposal.consensus_serialize(fd)?; + self.metadata.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; let mock_proposal = MockProposal::consensus_deserialize(fd)?; + let metadata = SignerMessageMetadata::consensus_deserialize(fd)?; Ok(Self { signature, mock_proposal, + metadata, }) } } @@ -1206,6 +1212,7 @@ mod test { let mut mock_signature = MockSignature { signature: MessageSignature::empty(), mock_proposal: random_mock_proposal(), + metadata: SignerMessageMetadata::default(), }; mock_signature .sign(&StacksPrivateKey::new()) From 899ff22c2b8205eb2f12af4833aa1b32eb59a321 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 18 Oct 2024 19:34:00 -0700 Subject: [PATCH 854/910] Do not assume every tenure has inter_blocks_per_tenure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 37 ++++++++++++++++------ 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 753883bead3..44ff3854b4f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3867,7 +3867,14 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + let mut miner_1_blocks = 0; + let mut miner_2_blocks = 0; + // Make sure that both miner 1 and 2 mine at least 1 block each + while miner_1_tenures < min_miner_1_tenures + || miner_2_tenures < min_miner_2_tenures + || miner_1_blocks == 0 + || miner_2_blocks == 0 + { if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3959,6 +3966,7 @@ fn partial_tenure_fork() { min_miner_1_tenures = miner_1_tenures + 1; } + let mut blocks = inter_blocks_per_tenure; // mine (or attempt to mine) the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); @@ -4030,6 +4038,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); + blocks = interim_block_ix; break; } else { panic!("Failed to submit tx: {}", e); @@ -4044,21 +4053,24 @@ fn partial_tenure_fork() { if miner == 1 { miner_1_tenures += 1; + miner_1_blocks += blocks; } else { miner_2_tenures += 1; + miner_2_blocks += blocks; } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}, Miner 1 before: {}, Miner 2 before: {}", - miner_1_tenures, miner_2_tenures, mined_before_1, mined_before_2, - ); let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); + + info!( + "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", + ); + if miner == 1 { - assert_eq!(mined_1, mined_before_1 + inter_blocks_per_tenure + 1); + assert_eq!(mined_1, mined_before_1 + blocks + 1); } else { if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { // Miner 2 should have mined 0 blocks after the fork assert_eq!(mined_2, mined_before_2); @@ -4078,11 +4090,16 @@ fn partial_tenure_fork() { assert_eq!(peer_2_height, ignore_block - 1); // The height may be higher than expected due to extra transactions waiting // to be mined during the forking miner's tenure. + // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure + let min_num_miner_2_blocks = std::cmp::min( + miner_2_blocks, + min_miner_2_tenures * (inter_blocks_per_tenure + 1), + ); assert!( - peer_1_height - >= pre_nakamoto_peer_1_height - + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + miner_2_tenures >= min_miner_2_tenures, + "Miner 2 failed to win its minimum number of tenures" ); + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_num_miner_2_blocks,); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From 9f9ad40c8ae57cae5a9dab9e7fd5bf8528ac51e2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 20 Oct 2024 12:12:50 -0700 Subject: [PATCH 855/910] fix: use hex instead of u8 bytes in fixture --- libsigner/src/v0/messages.rs | 48 +++++++++++------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 2436421fa88..618aa20937d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1254,14 +1254,9 @@ mod test { SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), - signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + signer_signature_hash: Sha512Trunc256Sum::from_hex("91f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e").unwrap(), chain_id: CHAIN_ID_TESTNET, - signature: MessageSignature([ - 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, - 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, - 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, - 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, - ]), + signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), })) ); @@ -1269,17 +1264,12 @@ mod test { assert_eq!( block_accepted, SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash: Sha512Trunc256Sum([ - 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, - 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 - ]), + signer_signature_hash: Sha512Trunc256Sum::from_hex( + "11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19" + ) + .unwrap(), metadata: SignerMessageMetadata::empty(), - signature: MessageSignature([ - 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, - 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, - 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, - 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 - ]), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), })) ); } @@ -1300,14 +1290,9 @@ mod test { SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), - signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + signer_signature_hash: Sha512Trunc256Sum::from_hex("91f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e").unwrap(), chain_id: CHAIN_ID_TESTNET, - signature: MessageSignature([ - 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, - 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, - 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, - 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, - ]), + signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, @@ -1317,19 +1302,14 @@ mod test { assert_eq!( block_accepted, SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash: Sha512Trunc256Sum([ - 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, - 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 - ]), + signer_signature_hash: Sha512Trunc256Sum::from_hex( + "11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19" + ) + .unwrap(), metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, - signature: MessageSignature([ - 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, - 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, - 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, - 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 - ]), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), })) ); } From 706806f1f104e2be0bd15e873d171b793b9df9d8 Mon Sep 17 00:00:00 2001 From: Parikalp Bhardwaj Date: Mon, 21 Oct 2024 16:19:42 +0400 Subject: [PATCH 856/910] Fix: Conditional logging for 'Proceeding to mine blocks' message --- testnet/stacks-node/src/run_loop/nakamoto.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 997327287df..065441b91b9 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -701,10 +701,17 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { - info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height - ); + if is_miner { + info!( + "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", + sortition_db_height + ); + } else { + info!( + "Runloop: Synchronized full burnchain up to height {}.", + sortition_db_height + ); + } last_tenure_sortition_height = sortition_db_height; } } From 5ae2906816b222ffa4f0546715474e8bfc1e7071 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 21 Oct 2024 11:01:43 -0500 Subject: [PATCH 857/910] chore: signers should be more permissive about a slow miner wakeup --- stacks-signer/src/config.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 3392906682a..c0514274e10 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,6 +35,7 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; +const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -239,8 +240,11 @@ impl TryFrom for GlobalConfig { StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); - let first_proposal_burn_block_timing = - Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); + let first_proposal_burn_block_timing = Duration::from_secs( + raw_data + .first_proposal_burn_block_timing_secs + .unwrap_or(DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS), + ); let db_path = raw_data.db_path.into(); let metrics_endpoint = match raw_data.metrics_endpoint { From 067fe1916f6ba56fdb2a8080297e9ca7867eec24 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 21 Oct 2024 11:49:02 -0700 Subject: [PATCH 858/910] Turn common stale chunk error into a debug with its own message to not spam logs Signed-off-by: Jacinta Ferrant --- stackslib/src/net/relay.rs | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 575e96138ea..7d4895e131c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2499,14 +2499,27 @@ impl Relayer { for chunk in sync_result.chunks_to_store.into_iter() { let md = chunk.get_slot_metadata(); if let Err(e) = tx.try_replace_chunk(&sc, &md, &chunk.data) { - warn!( - "Failed to store chunk for StackerDB"; - "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), - "slot_id" => md.slot_id, - "slot_version" => md.slot_version, - "num_bytes" => chunk.data.len(), - "error" => %e - ); + if matches!(e, Error::StaleChunk { .. }) { + // This is a common and expected error, so log it as a debug and with a sep message + // to distinguish it from other error types. + debug!( + "Dropping stale StackerDB chunk"; + "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), + "slot_id" => md.slot_id, + "slot_version" => md.slot_version, + "num_bytes" => chunk.data.len(), + "error" => %e + ); + } else { + warn!( + "Failed to store chunk for StackerDB"; + "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), + "slot_id" => md.slot_id, + "slot_version" => md.slot_version, + "num_bytes" => chunk.data.len(), + "error" => %e + ); + } continue; } else { debug!("Stored chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), "slot_id" => md.slot_id, "slot_version" => md.slot_version); From cc8bddc3a58ceb4de48ef40e61f402d112da5fb4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 21 Oct 2024 14:06:41 -0500 Subject: [PATCH 859/910] chore: set 3.0 mainnet activation height --- stackslib/src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index ade8a825899..491ba21ca0a 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -97,7 +97,7 @@ pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; /// This is Epoch-2.5, activation height proposed in SIP-021 pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360; /// This is Epoch-3.0, activation height proposed in SIP-021 -pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 2_000_000; +pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 867_867; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From c184f5469cfadb71803410014e9ef3c555d5f219 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 21 Oct 2024 18:08:43 -0400 Subject: [PATCH 860/910] Fix default affirmation map settings for 3.0 --- testnet/stacks-node/src/config.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f3c10b72d3b..06588622461 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -853,12 +853,6 @@ impl Config { "Attempted to run mainnet node with `use_test_genesis_chainstate`" )); } - } else if node.require_affirmed_anchor_blocks { - // testnet requires that we use the 2.05 rules for anchor block affirmations, - // because reward cycle 360 (and possibly future ones) has a different anchor - // block choice in 2.05 rules than in 2.1 rules. - debug!("Set `require_affirmed_anchor_blocks` to `false` for non-mainnet config"); - node.require_affirmed_anchor_blocks = false; } if node.stacker || node.miner { @@ -1968,7 +1962,7 @@ impl Default for NodeConfig { marf_defer_hashing: true, pox_sync_sample_secs: 30, use_test_genesis_chainstate: None, - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, From c4a88f6298302b06dc86bbd0853a223e99cd5381 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 08:10:07 -0700 Subject: [PATCH 861/910] Only check if we advanced pre fork miner 2 blocks forward Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 307a3e85187..1744a3b4a85 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3874,12 +3874,9 @@ fn partial_tenure_fork() { let mut miner_1_blocks = 0; let mut miner_2_blocks = 0; - // Make sure that both miner 1 and 2 mine at least 1 block each - while miner_1_tenures < min_miner_1_tenures - || miner_2_tenures < min_miner_2_tenures - || miner_1_blocks == 0 - || miner_2_blocks == 0 - { + let mut min_miner_2_blocks = 0; + + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3963,6 +3960,7 @@ fn partial_tenure_fork() { // Ensure that miner 2 runs at least one more tenure min_miner_2_tenures = miner_2_tenures + 1; fork_initiated = true; + min_miner_2_blocks = miner_2_blocks; } if miner == 2 && miner_2_tenures == min_miner_2_tenures { // This is the forking tenure. Ensure that miner 1 runs one more @@ -4096,15 +4094,9 @@ fn partial_tenure_fork() { // The height may be higher than expected due to extra transactions waiting // to be mined during the forking miner's tenure. // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - let min_num_miner_2_blocks = std::cmp::min( - miner_2_blocks, - min_miner_2_tenures * (inter_blocks_per_tenure + 1), - ); - assert!( - miner_2_tenures >= min_miner_2_tenures, - "Miner 2 failed to win its minimum number of tenures" - ); - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_num_miner_2_blocks,); + // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 + // before the fork was initiated + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From c9d3e3631f829e50eb7addcab72954018d861103 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 08:18:07 -0700 Subject: [PATCH 862/910] CRC: update comment to be more descriptive Signed-off-by: Jacinta Ferrant --- stackslib/src/net/relay.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7d4895e131c..7e4ecbb4081 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2500,8 +2500,8 @@ impl Relayer { let md = chunk.get_slot_metadata(); if let Err(e) = tx.try_replace_chunk(&sc, &md, &chunk.data) { if matches!(e, Error::StaleChunk { .. }) { - // This is a common and expected error, so log it as a debug and with a sep message - // to distinguish it from other error types. + // This is a common and expected message, so log it as a debug and with a sep message + // to distinguish it from other message types. debug!( "Dropping stale StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), From f6d50c51409321179ebe3d0201cb4bc92aaf7d21 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 21 Oct 2024 11:30:19 -0700 Subject: [PATCH 863/910] Have get nakamoto headers return only the unique block ids and increase the reward length Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 307a3e85187..6891a6201b1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1481,6 +1481,7 @@ fn multiple_miners() { config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(30); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1664,7 +1665,7 @@ fn multiple_miners() { /// Read processed nakamoto block IDs from the test observer, and use `config` to open /// a chainstate DB and returns their corresponding StacksHeaderInfos fn get_nakamoto_headers(config: &Config) -> Vec { - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + let nakamoto_block_ids: HashSet<_> = test_observer::get_blocks() .into_iter() .filter_map(|block_json| { if block_json From 2fb170b5b1125f17b66c430c5a394fa775c35f25 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 08:23:22 -0700 Subject: [PATCH 864/910] Increase the max number of nakamoto tenures to match the pox reward length Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6891a6201b1..7a234d3bb73 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1460,6 +1460,8 @@ fn multiple_miners() { let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 @@ -1481,7 +1483,7 @@ fn multiple_miners() { config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); config.node.pox_sync_sample_secs = 30; - config.burnchain.pox_reward_length = Some(30); + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1561,8 +1563,6 @@ fn multiple_miners() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let max_nakamoto_tenures = 20; - // due to the random nature of mining sortitions, the way this test is structured // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the From 7ead5fc7bb3551cdfcb60ed6094a71711a3f38c6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 08:33:19 -0700 Subject: [PATCH 865/910] Cargo fmt Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7a234d3bb73..8c344f680ec 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1460,7 +1460,6 @@ fn multiple_miners() { let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); - let max_nakamoto_tenures = 30; // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 From ed33c4f4d205cde3fc23b16a19c7636f5a61733e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 09:19:35 -0700 Subject: [PATCH 866/910] Fix build error due to type change Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8c344f680ec..260358ad5a8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1645,11 +1645,11 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!( peer_1_height, - pre_nakamoto_peer_1_height + btc_blocks_mined - 1 + pre_nakamoto_peer_1_height + btc_blocks_mined as u64 - 1 ); assert_eq!( btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + u32::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); rl2_coord_channels From 8a36a278704bf4e656d61bdd236d7867a806cf43 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 22 Oct 2024 10:29:03 -0700 Subject: [PATCH 867/910] feat: allow pretty print logging in tests --- stacks-common/src/util/log.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 9d52f0dbbf4..e86ed7f44b5 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -215,14 +215,16 @@ fn make_json_logger() -> Logger { panic!("Tried to construct JSON logger, but stacks-blockchain built without slog_json feature enabled.") } -#[cfg(not(any(test, feature = "testing")))] fn make_logger() -> Logger { if env::var("STACKS_LOG_JSON") == Ok("1".into()) { make_json_logger() } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); + #[cfg(not(any(test, feature = "testing")))] let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); + #[cfg(any(test, feature = "testing"))] + let decorator = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -230,20 +232,6 @@ fn make_logger() -> Logger { } } -#[cfg(any(test, feature = "testing"))] -fn make_logger() -> Logger { - if env::var("STACKS_LOG_JSON") == Ok("1".into()) { - make_json_logger() - } else { - let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); - let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); - let isatty = isatty(Stream::Stdout); - let drain = TermFormat::new(plain, false, debug, isatty); - let logger = Logger::root(drain.ignore_res(), o!()); - logger - } -} - fn inner_get_loglevel() -> slog::Level { if env::var("STACKS_LOG_TRACE") == Ok("1".into()) { slog::Level::Trace From a7432bbe8965d43e23af3d7e9e7b1b28031213b0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 22 Oct 2024 13:00:45 -0500 Subject: [PATCH 868/910] feat: update tx estimator when tx yields too big tx error --- stackslib/src/chainstate/nakamoto/miner.rs | 11 +++- stackslib/src/chainstate/stacks/miner.rs | 68 ++++++++++++++++++++-- stackslib/src/chainstate/stacks/mod.rs | 10 ++-- 3 files changed, 79 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 4d99b538210..04401a0d9bb 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -674,10 +674,19 @@ impl BlockBuilder for NakamotoBlockBuilder { tx.txid(), 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget + ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" ); + None + }; return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d3298855da5..7fb08335a28 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1040,9 +1040,18 @@ impl<'a> StacksMicroblockBuilder<'a> { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after.clone(); + let measured_cost = if measured_cost.sub(cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; return Ok(TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), )); } else { warn!( @@ -1323,7 +1332,22 @@ impl<'a> StacksMicroblockBuilder<'a> { return Ok(None); } } - Error::TransactionTooBigError => { + Error::TransactionTooBigError(measured_cost) => { + if update_estimator { + if let Some(measured_cost) = measured_cost { + if let Err(e) = estimator.notify_event( + &mempool_tx.tx.payload, + &measured_cost, + &block_limit, + &stacks_epoch_id, + ) { + warn!("Error updating estimator"; + "txid" => %mempool_tx.metadata.txid, + "error" => ?e); + } + } + } + invalidated_txs.push(mempool_tx.metadata.txid); } _ => {} @@ -2405,7 +2429,22 @@ impl StacksBlockBuilder { return Ok(None); } } - Error::TransactionTooBigError => { + Error::TransactionTooBigError(measured_cost) => { + if update_estimator { + if let Some(measured_cost) = measured_cost { + if let Err(e) = estimator.notify_event( + &txinfo.tx.payload, + &measured_cost, + &block_limit, + &stacks_epoch_id, + ) { + warn!("Error updating estimator"; + "txid" => %txinfo.metadata.txid, + "error" => ?e); + } + } + } + invalidated_txs.push(txinfo.metadata.txid); } Error::InvalidStacksTransaction(_, true) => { @@ -2714,9 +2753,18 @@ impl BlockBuilder for StacksBlockBuilder { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( @@ -2795,9 +2843,19 @@ impl BlockBuilder for StacksBlockBuilder { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; + return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 2ce250d991a..8af9cf6ec72 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -104,7 +104,7 @@ pub enum Error { NotInSameFork, InvalidChainstateDB, BlockTooBigError, - TransactionTooBigError, + TransactionTooBigError(Option), BlockCostExceeded, NoTransactionsToMine, MicroblockStreamTooLongError, @@ -168,7 +168,9 @@ impl fmt::Display for Error { Error::NoSuchBlockError => write!(f, "No such Stacks block"), Error::InvalidChainstateDB => write!(f, "Invalid chainstate database"), Error::BlockTooBigError => write!(f, "Too much data in block"), - Error::TransactionTooBigError => write!(f, "Too much data in transaction"), + Error::TransactionTooBigError(ref c) => { + write!(f, "Too much data in transaction: measured_cost={c:?}") + } Error::BlockCostExceeded => write!(f, "Block execution budget exceeded"), Error::MicroblockStreamTooLongError => write!(f, "Too many microblocks in stream"), Error::IncompatibleSpendingConditionError => { @@ -246,7 +248,7 @@ impl error::Error for Error { Error::NoSuchBlockError => None, Error::InvalidChainstateDB => None, Error::BlockTooBigError => None, - Error::TransactionTooBigError => None, + Error::TransactionTooBigError(..) => None, Error::BlockCostExceeded => None, Error::MicroblockStreamTooLongError => None, Error::IncompatibleSpendingConditionError => None, @@ -291,7 +293,7 @@ impl Error { Error::NoSuchBlockError => "NoSuchBlockError", Error::InvalidChainstateDB => "InvalidChainstateDB", Error::BlockTooBigError => "BlockTooBigError", - Error::TransactionTooBigError => "TransactionTooBigError", + Error::TransactionTooBigError(..) => "TransactionTooBigError", Error::BlockCostExceeded => "BlockCostExceeded", Error::MicroblockStreamTooLongError => "MicroblockStreamTooLongError", Error::IncompatibleSpendingConditionError => "IncompatibleSpendingConditionError", From 5262c16873fe13c1a53c843e3a9c0215ddcd8411 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 15:14:01 -0400 Subject: [PATCH 869/910] fix: retry `insert_payload` on failure --- testnet/stacks-node/src/event_dispatcher.rs | 39 +++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index b63f9d462ed..bb05cd6128a 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -355,6 +355,42 @@ impl EventObserver { Ok(()) } + /// Insert a payload into the database, retrying on failure. + fn insert_payload_with_retry( + conn: &Connection, + url: &str, + payload: &serde_json::Value, + timeout: Duration, + ) { + let mut attempts = 0i64; + let mut backoff = Duration::from_millis(100); // Initial backoff duration + let max_backoff = Duration::from_secs(5); // Cap the backoff duration + + loop { + match Self::insert_payload(conn, url, payload, timeout) { + Ok(_) => { + // Successful insert, break the loop + return; + } + Err(err) => { + // Log the error, then retry after a delay + warn!("Failed to insert payload into event observer database: {:?}", err; + "backoff" => ?backoff, + "attempts" => attempts + ); + + // Wait for the backoff duration + sleep(backoff); + + // Increase the backoff duration (with exponential backoff) + backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); + + attempts = attempts.saturating_add(1); + } + } + } + } + fn get_pending_payloads( conn: &Connection, ) -> Result, db_error> { @@ -524,8 +560,7 @@ impl EventObserver { Connection::open(db_path).expect("Failed to open database for event observer"); // Insert the new payload into the database - Self::insert_payload(&conn, &full_url, payload, self.timeout) - .expect("Failed to insert payload into event observer database"); + Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); // Process all pending payloads Self::process_pending_payloads(&conn); From fb9f046603c9afd5705a4fdd02f42f7a694427f9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 15:58:00 -0400 Subject: [PATCH 870/910] chore: update changelog for 3.0.0.0.0 --- CHANGELOG.md | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4025a66c3ac..0304c8fbe5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [3.0.0.0.0] -- Added support for Clarity 3 +### Added + +- Nakamoto consensus rules, activating in epoch 3.0 at block 867,867 +- Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added - `stacks-block-height` added @@ -16,10 +19,28 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed -- Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint -- Added `tenure_height` to `/v2/info` endpoint -- Added optional `timeout_ms` to `events_observer` configuration -- Added support for re-sending events to event observers across restarts +- New RPC endpoints + - `/v3/blocks/:block_id` + - `/v3/blocks/upload/` + - `/v3/signer/:signer_pubkey/:cycle_num` + - `/v3/sortitions` + - `/v3/stacker_set/:cycle_num` + - `/v3/tenures/:block_id` + - `/v3/tenures/fork_info/:start/:stop` + - `/v3/tenures/info` + - `/v3/tenures/tip/:consensus_hash` +- Re-send events to event observers across restarts +- Support custom chain-ids for testing +- Add `replay-block` command to CLI + +### Changed + +- Strict config file validation (unknown fields will cause the node to fail to start) +- Add optional `timeout_ms` to `events_observer` configuration +- Modified RPC endpoints + - Include `tenure_height` in `/v2/info` endpoint + - Include `block_time` and `tenure_height` in `/new/block` event payload +- Various improvements to logging, reducing log spam and improving log messages ## [2.5.0.0.7] From 98930ed1c54f5feec77217c348e5190795c2b98b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:01:20 -0400 Subject: [PATCH 871/910] chore: add `[Unreleased]` and bold Nakamoto bullet --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0304c8fbe5e..7548d55545c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + ## [3.0.0.0.0] ### Added -- Nakamoto consensus rules, activating in epoch 3.0 at block 867,867 +- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** - Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added From 0cf78d269e019a21d10f4287f89d3bf3b2905adb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:02:13 -0400 Subject: [PATCH 872/910] chore: various improvements and bugfixes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7548d55545c..dfb9d8b8093 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Include `tenure_height` in `/v2/info` endpoint - Include `block_time` and `tenure_height` in `/new/block` event payload - Various improvements to logging, reducing log spam and improving log messages +- Various improvements and bugfixes ## [2.5.0.0.7] From 1ba674fdd9eb58187df91f93e545b60be26763ba Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 22 Oct 2024 13:07:25 -0700 Subject: [PATCH 873/910] feat: update signer 3.0 changelog --- stacks-signer/CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index aa2b87deb7e..489fd39cf7a 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,24 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.0.0.0.0] + +### Added + +- Improved StackerDB message structures +- Improved mock signing during epoch 2.5 +- Include the `stacks-signer` binary version in startup logging and StackerDB messages +- Added a `monitor-signers` CLI command for better visibility into other signers on the network +- Support custom Chain ID in signer configuration +- Refresh the signer's sortition view when it sees a block proposal for a new tenure +- Fixed a race condition where a signer would try to update before StackerDB configuration was set + +### Changed + +- Migrate to new Stacks Node RPC endpoint `/v3/tenures/fork_info/:start/:stop` +- Improved chainstate storage for handling of forks and other state +- Updated prometheus metric labels to reduce high cardinality + ## [2.5.0.0.5.3] ### Added From 51879b990cce1fac8843632f2886d9e85759d190 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:10:49 -0400 Subject: [PATCH 874/910] chore: add SIP link to changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfb9d8b8093..fe5e200d17f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** +- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** (see [SIP-021](https://github.com/stacksgov/sips/blob/main/sips/sip-021/sip-021-nakamoto.md) for details) - Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added From 434317177bd294997bce13ba658870db06b96930 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:13:59 -0700 Subject: [PATCH 875/910] PR comments - remove unused fields --- docs/mining.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index a2a914c998f..4b3160d43bb 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -13,14 +13,9 @@ seed = "YOUR PRIVATE KEY" #mock_mining = True [miner] -# Smallest allowed tx fee, in microSTX -min_tx_fee = 100 # Time to spend on the first attempt to make a block, in milliseconds. # This can be small, so your node gets a block-commit into the Bitcoin mempool early. first_attempt_time_ms = 1000 -# Time to spend on subsequent attempts to make a block, in milliseconds. -# This can be bigger -- new block-commits will be RBF'ed. -subsequent_attempt_time_ms = 60000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 From beb3e63f19e77b5db435e98fa1a6d7aac3b84cca Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:15:37 -0700 Subject: [PATCH 876/910] remove unwanted field --- docs/mining.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index 4b3160d43bb..34a299cd1c7 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -13,9 +13,6 @@ seed = "YOUR PRIVATE KEY" #mock_mining = True [miner] -# Time to spend on the first attempt to make a block, in milliseconds. -# This can be small, so your node gets a block-commit into the Bitcoin mempool early. -first_attempt_time_ms = 1000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 From 5574a17530d1029c0037f0981caac62ae064c8a9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:17:21 -0700 Subject: [PATCH 877/910] uncomment signer event-observer for sample config --- testnet/stacks-node/conf/mainnet-signer.toml | 12 +++++------- testnet/stacks-node/conf/testnet-signer.toml | 12 +++++------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/testnet/stacks-node/conf/mainnet-signer.toml index 226fcae806c..8683f076f24 100644 --- a/testnet/stacks-node/conf/mainnet-signer.toml +++ b/testnet/stacks-node/conf/mainnet-signer.toml @@ -14,11 +14,9 @@ peer_host = "127.0.0.1" # events_keys = ["*"] # timeout_ms = 60_000 -# Used if running a local stacks-signer service -# [[events_observer]] -# endpoint = "127.0.0.1:30000" -# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] +[[events_observer]] +endpoint = "127.0.0.1:30000" +events_keys = ["stackerdb", "block_proposal", "burn_blocks"] -# Used if running a local stacks-signer service -# [connection_options] -# auth_token = "" # fill with a unique password +[connection_options] +auth_token = "" # fill with a unique password diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/testnet/stacks-node/conf/testnet-signer.toml index 80226c5b89b..f4a9bc3b71b 100644 --- a/testnet/stacks-node/conf/testnet-signer.toml +++ b/testnet/stacks-node/conf/testnet-signer.toml @@ -18,14 +18,12 @@ pox_reward_length = 900 # events_keys = ["*"] # timeout_ms = 60_000 -# Used if running a local stacks-signer service -# [[events_observer]] -# endpoint = "127.0.0.1:30000" -# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] +[[events_observer]] +endpoint = "127.0.0.1:30000" +events_keys = ["stackerdb", "block_proposal", "burn_blocks"] -# Used if running a local stacks-signer service -# [connection_options] -# auth_token = "" # fill with a unique password +[connection_options] +auth_token = "" # fill with a unique password [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" From 5b773d41e66be1b60a1802c03eebb6f5bcd9f24d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:20:36 -0700 Subject: [PATCH 878/910] adding commented event_observer for mainnet --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 941b3490349..226fcae806c 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -13,3 +13,12 @@ peer_host = "127.0.0.1" # endpoint = "localhost:3700" # events_keys = ["*"] # timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password From c1790367135fdc7b31f02603afd97418886339df Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 16:55:37 -0700 Subject: [PATCH 879/910] Fix conf file to have valid port values in test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/conf/testnet-miner-conf.toml | 4 ++-- testnet/stacks-node/src/config.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 39af98b0919..93455dcee51 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -10,8 +10,8 @@ mode = "krypton" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = -peer_port = +rpc_port = 12345 # Bitcoin RPC port +peer_port = 6789 # Bitcoin P2P port pox_prepare_length = 100 pox_reward_length = 900 # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 06588622461..0beed9471d2 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -3033,8 +3033,9 @@ mod tests { if path.is_file() { let file_name = path.file_name().unwrap().to_str().unwrap(); if file_name.ends_with(".toml") { + debug!("Parsing config file: {file_name}"); let _config = ConfigFile::from_path(path.to_str().unwrap()).unwrap(); - debug!("Parsed config file: {}", file_name); + debug!("Parsed config file: {file_name}"); } } } From 9daba453850a73070cb9b127f2b2df7d3aa283ee Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 23 Oct 2024 10:11:08 +0200 Subject: [PATCH 880/910] Fix link --- stacks-signer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 246015bfb7a..d69d73742de 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -121,7 +121,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner as this could potentially expose sensitive data or functionalities to security risks \ if additional proper security checks are not integrated in place. \ For more information, check the documentation at \ - https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." + https://docs.stacks.co/guides-and-tutorials/running-a-signer#preflight-setup" ); let (res_send, res_recv) = channel(); let ev = SignerEventReceiver::new(config.network.is_mainnet()); From 18ea94700085ae1a4f0ab43919baba80ffed191c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 23 Oct 2024 10:05:26 -0700 Subject: [PATCH 881/910] signing_in_0th_tenure_of_reward_cycle should only check signers who signed the block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1744a3b4a85..98a087dd68e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5316,7 +5316,19 @@ fn signing_in_0th_tenure_of_reward_cycle() { }) .unwrap(); - for signer in &signer_public_keys { + let block_mined = test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .clone(); + // Must ensure that the signers that signed the block have their blocks_signed updated appropriately + for signature in &block_mined.signer_signature { + let signer = signer_public_keys + .iter() + .find(|pk| { + pk.verify(block_mined.signer_signature_hash.as_bytes(), signature) + .unwrap() + }) + .expect("Unknown signer signature"); let blocks_signed = get_v3_signer(&signer, next_reward_cycle); assert_eq!(blocks_signed, 1); } From 44862d5d69102785b7c3a918a53fc3814629d2f4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 23 Oct 2024 13:13:08 -0700 Subject: [PATCH 882/910] Do not use the burn block timestamp when comparing the min gap between blocks Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 134 +++++++++++---------- 1 file changed, 73 insertions(+), 61 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9422c6bd661..21fed3ca28f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3242,7 +3242,8 @@ fn signer_set_rollover() { #[test] #[ignore] -/// This test checks that the signers will broadcast a block once they receive enough signatures. +/// This test checks that the miners and signers will not produce Nakamoto blocks +/// until the minimum time has passed between blocks. fn min_gap_between_blocks() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -3259,11 +3260,14 @@ fn min_gap_between_blocks() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + + let mut sender_nonce = 0; + let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr.clone(), (send_amt + send_fee) * interim_blocks)], |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -3276,73 +3280,81 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); + info!("Ensure that the first Nakamoto block was mined"); let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); assert_eq!(blocks.len(), 1); - let first_block = blocks.last().unwrap(); - let blocks = test_observer::get_blocks(); - let parent = blocks - .iter() - .find(|b| b.get("block_height").unwrap() == first_block.stacks_block_height - 1) - .unwrap(); - let first_block_time = first_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - let parent_block_time = parent.get("burn_block_time").unwrap().as_u64().unwrap(); - assert!( - Duration::from_secs(first_block_time - parent_block_time) - >= Duration::from_millis(time_between_blocks_ms), - "First block proposed before gap was exceeded: {}s - {}s > {}ms", - first_block_time, - parent_block_time, - time_between_blocks_ms - ); + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..interim_blocks { + let blocks_processed_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); - // Submit a tx so that the miner will mine a block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + info!("Submitted transfer tx and waiting for block to be processed"); + wait_for(60, || { + let blocks_processed = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block:{}", interim_block_ix); + } - info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); wait_for(60, || { - let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - Ok(blocks.len() >= 2) + let new_blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + Ok(new_blocks.len() == blocks.len() + interim_blocks as usize) }) .unwrap(); - // Verify that the second Nakamoto block is mined after the gap is exceeded - let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - let last_block = blocks.last().unwrap(); - let last_block_time = last_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - assert!(blocks.len() >= 2, "Expected at least 2 mined blocks"); - let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); - let penultimate_block_time = penultimate_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - assert!( - Duration::from_secs(last_block_time - penultimate_block_time) - >= Duration::from_millis(time_between_blocks_ms), - "Block proposed before gap was exceeded: {}s - {}s > {}ms", - last_block_time, - penultimate_block_time, - time_between_blocks_ms - ); - + // Verify that every Nakamoto block is mined after the gap is exceeded between each + let mut blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + blocks.sort_by(|a, b| a.stacks_block_height.cmp(&b.stacks_block_height)); + for i in 1..blocks.len() { + let block = &blocks[i]; + let parent_block = &blocks[i - 1]; + assert_eq!( + block.stacks_block_height, + parent_block.stacks_block_height + 1 + ); + info!( + "Checking that the time between blocks {} and {} is respected", + parent_block.stacks_block_height, block.stacks_block_height + ); + let block_time = block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let parent_block_time = parent_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + assert!( + block_time > parent_block_time, + "Block time is BEFORE parent block time" + ); + assert!( + Duration::from_secs(block_time - parent_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "Block mined before gap was exceeded: {block_time}s - {parent_block_time}s > {time_between_blocks_ms}ms", + ); + } + debug!("Shutting down min_gap_between_blocks test"); signer_test.shutdown(); } From 03831e07e54b2510d42ce459b8ace7ac37190a04 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 23 Oct 2024 13:48:33 -0700 Subject: [PATCH 883/910] Increase the reward cycle length to increase the liklihood of the test case hitting before reward cycle end Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9422c6bd661..a141de70db7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1746,6 +1746,8 @@ fn miner_forking() { let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); + let max_sortitions = 30; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 @@ -1776,6 +1778,7 @@ fn miner_forking() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_sortitions as u32); config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -1797,11 +1800,10 @@ fn miner_forking() { ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = node_2_rpc_bind; + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -1931,7 +1933,6 @@ fn miner_forking() { // (a) its the first nakamoto tenure // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) let mut expects_miner_2_to_be_valid = true; - let max_sortitions = 20; // due to the random nature of mining sortitions, the way this test is structured // is that keeps track of two scenarios that we want to cover, and once enough sortitions // have been produced to cover those scenarios, it stops and checks the results at the end. From 37390579197c484620fe685b6811573383926e7c Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:32:36 +0200 Subject: [PATCH 884/910] docs: explicitly specify burn or stacks block height parameter in clarity get info functions --- clarity/src/vm/docs/mod.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index d718ff5366a..9075c55e713 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1775,17 +1775,17 @@ this value is less than or equal to the value for `miner-spend-total` at the sam const GET_BURN_BLOCK_INFO_API: SpecialAPI = SpecialAPI { input_type: "BurnBlockInfoPropertyName, uint", output_type: "(optional buff) | (optional (tuple (addrs (list 2 (tuple (hashbytes (buff 32)) (version (buff 1))))) (payout uint)))", - snippet: "get-burn-block-info? ${1:prop} ${2:block-height}", - signature: "(get-burn-block-info? prop-name block-height)", + snippet: "get-burn-block-info? ${1:prop} ${2:burn-block-height}", + signature: "(get-burn-block-info? prop-name burn-block-height)", description: "The `get-burn-block-info?` function fetches data for a block of the given *burnchain* block height. The -value and type returned are determined by the specified `BlockInfoPropertyName`. Valid values for `block-height` only +value and type returned are determined by the specified `BlockInfoPropertyName`. Valid values for `burn-block-height` only include heights between the burnchain height at the time the Stacks chain was launched, and the last-processed burnchain -block. If the `block-height` argument falls outside of this range, then `none` shall be returned. +block. If the `burn-block-height` argument falls outside of this range, then `none` shall be returned. The following `BlockInfoPropertyName` values are defined: * The `header-hash` property returns a 32-byte buffer representing the header hash of the burnchain block at -burnchain height `block-height`. +burnchain height `burn-block-height`. * The `pox-addrs` property returns a tuple with two items: a list of up to two PoX addresses that received a PoX payout at that block height, and the amount of burnchain tokens paid to each address (note that per the blockchain consensus rules, each PoX payout will be the same for each address in the block-commit transaction). @@ -1811,11 +1811,11 @@ The `addrs` list contains the same PoX address values passed into the PoX smart const GET_STACKS_BLOCK_INFO_API: SpecialAPI = SpecialAPI { input_type: "StacksBlockInfoPropertyName, uint", - snippet: "get-stacks-block-info? ${1:prop} ${2:block-height}", + snippet: "get-stacks-block-info? ${1:prop} ${2:stacks-block-height}", output_type: "(optional buff) | (optional uint)", - signature: "(get-stacks-block-info? prop-name block-height)", + signature: "(get-stacks-block-info? prop-name stacks-block-height)", description: "The `get-stacks-block-info?` function fetches data for a block of the given *Stacks* block height. The -value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `block-height` does +value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `stacks-block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: @@ -1840,11 +1840,11 @@ the mining of this block started, but is not guaranteed to be accurate. This tim const GET_TENURE_INFO_API: SpecialAPI = SpecialAPI { input_type: "TenureInfoPropertyName, uint", - snippet: "get-tenure-info? ${1:prop} ${2:block-height}", + snippet: "get-tenure-info? ${1:prop} ${2:stacks-block-height}", output_type: "(optional buff) | (optional uint)", - signature: "(get-tenure-info? prop-name block-height)", + signature: "(get-tenure-info? prop-name stacks-block-height)", description: "The `get-tenure-info?` function fetches data for the tenure at the given block height. The -value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `block-height` does +value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `stacks-block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: From f5911fbf181d0470efedaaabe2ec05720dfae26a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 24 Oct 2024 07:30:16 -0700 Subject: [PATCH 885/910] fix: make get_decorator its own function --- stacks-common/src/util/log.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index e86ed7f44b5..534f3f99691 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -221,10 +221,7 @@ fn make_logger() -> Logger { } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); - #[cfg(not(any(test, feature = "testing")))] - let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); - #[cfg(any(test, feature = "testing"))] - let decorator = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); + let decorator = get_decorator(); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -232,6 +229,16 @@ fn make_logger() -> Logger { } } +#[cfg(any(test, feature = "testing"))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter) +} + +#[cfg(not(any(test, feature = "testing")))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(std::io::stderr()) +} + fn inner_get_loglevel() -> slog::Level { if env::var("STACKS_LOG_TRACE") == Ok("1".into()) { slog::Level::Trace From 08f293e1b11cd2810ce85ca3bb913bb86fadf5ab Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 24 Oct 2024 11:31:47 -0700 Subject: [PATCH 886/910] Ensure we have a minimum expected gap in flash blocks test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 90334cce9b4..b5140a06eed 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1895,11 +1895,10 @@ fn flash_blocks_on_epoch_3() { } } - // Verify that there's a gap of exactly 3 blocks - assert_eq!( - gap_end - gap_start + 1, - 3, - "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", + // Verify that there's a gap of AT LEAST 3 blocks + assert!( + gap_end - gap_start + 1 >= 3, + "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {} to {}", gap_start, gap_end ); From 6fa2b6d20e59208fd6399d86ddd2b77325a7ab03 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 24 Oct 2024 12:02:22 -0700 Subject: [PATCH 887/910] Do not expect an exact number of signatures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++------------ 1 file changed, 63 insertions(+), 93 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a8e64decc7f..d0f3dfff83b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4804,18 +4804,10 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - let start_time = Instant::now(); // wait until we get a sortition. // we might miss a block-commit at the start of epoch 3 @@ -4828,6 +4820,12 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { }) .expect("Timed out waiting for sortition"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer( @@ -4842,13 +4840,10 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to mine block N"); // a tenure has begun, so wait until we mine a block - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + }) + .expect("Timed out waiting for block to be mined and processed"); sender_nonce += 1; let info_after = signer_test @@ -4892,61 +4887,51 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); let mut block = None; - loop { - if block.is_none() { - block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash - == info_before.stacks_tip_consensus_hash - { - Some(proposal.block) - } else { - None - } + wait_for(30, || { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None } - _ => None, } - }); - } - if let Some(block) = &block { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() - == accepted.signer_signature_hash - { - Some(accepted.signature) - } else { - None - } + _ => None, + } + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + Some(accepted.signature) + } else { + None } - _ => None, } - }) - .collect::>(); - if signatures.len() == num_signers { - break; - } - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for signers signatures for first block proposal", - ); - sleep_ms(1000); - } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); let blocks_after = mined_blocks.load(Ordering::SeqCst); @@ -4979,9 +4964,8 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" ); // Wait for the miner to propose a new invalid block N+1' - let start_time = Instant::now(); let mut rejected_block = None; - while rejected_block.is_none() { + wait_for(30, || { rejected_block = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -5002,11 +4986,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { _ => None, } }); - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for N+1' block proposal", - ); - } + Ok(rejected_block.is_some()) + }) + .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); @@ -5015,7 +4997,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); - loop { + wait_for(30, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -5037,14 +5019,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - if block_rejections.len() == num_signers { - break; - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal rejections"); // Induce block N+2 to get mined let transfer_tx = make_stacks_transfer( @@ -5060,7 +5037,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to attempt to mine block N+2"); info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); - loop { + wait_for(30, || { // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) let info = signer_test @@ -5068,16 +5045,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); - if info_before.stacks_tip_height + 2 <= info.stacks_tip_height { - break; - } - - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + }) + .expect("Timed out waiting for blocks to be mined"); let info_after = signer_test .stacks_client @@ -5096,7 +5066,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Not a Nakamoto block") .signer_signature .len(); - assert_eq!(nmb_signatures, num_signers); + assert!(nmb_signatures >= num_signers * 7 / 10); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); From 68ea4da1c105a894406891caf7adc62f05652fdd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 25 Oct 2024 12:43:45 -0700 Subject: [PATCH 888/910] Increment block resposnes sent all the time Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a98178..2d6bfa003d4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -339,11 +339,17 @@ impl Signer { }; // Submit a proposal response to the .signers contract for miners debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - if let Err(e) = self + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + match self .stackerdb .send_message_with_retry::(block_response.into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + Ok(_) => { + crate::monitoring::increment_block_responses_sent(accepted); + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } } return; } @@ -612,12 +618,12 @@ impl Signer { info!( "{self}: Broadcasting a block response to stacks node: {response:?}"; ); + let accepted = matches!(response, BlockResponse::Accepted(..)); match self .stackerdb - .send_message_with_retry::(response.clone().into()) + .send_message_with_retry::(response.into()) { Ok(_) => { - let accepted = matches!(response, BlockResponse::Accepted(..)); crate::monitoring::increment_block_responses_sent(accepted); } Err(e) => { From c3dbc5d2678ad342ec4ebe9a26628373adacb22b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 25 Oct 2024 17:25:06 -0700 Subject: [PATCH 889/910] Store the rejected block in the database in testing directive case Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 64 ++++++++++++++-------- testnet/stacks-node/src/tests/signer/v0.rs | 2 + 2 files changed, 42 insertions(+), 24 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a98178..df78c5cc7d3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -428,30 +428,8 @@ impl Signer { }; #[cfg(any(test, feature = "testing"))] - let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { - Some(public_keys) => { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - )) - } else { - None - } - } - None => block_response, - }; + let block_response = + self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation @@ -935,6 +913,44 @@ impl Signer { false } + #[cfg(any(test, feature = "testing"))] + fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { + return block_response; + }; + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d0f3dfff83b..976ebc2cd06 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4246,6 +4246,8 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); + // Make a new stacks transaction to create a different block signature, but make sure to propose it + // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, From 938e1213f6708da86947ea1fd13df707ce67aa60 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 28 Oct 2024 08:55:51 -0700 Subject: [PATCH 890/910] Update block proposal timeout default to 10 mins Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index c0514274e10..6fc7c7b2dda 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -34,7 +34,7 @@ use stacks_common::util::hash::Hash160; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; #[derive(thiserror::Error, Debug)] From c8d00ab7fcfde6507ab9778227258db6b310fd62 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 28 Oct 2024 14:01:46 -0700 Subject: [PATCH 891/910] Wait for the tip to update before proceeding Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d0f3dfff83b..2958ea13823 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4841,7 +4841,13 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // a tenure has begun, so wait until we mine a block wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + let new_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && new_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); From 41c036918a0fcb7b6a7187f6747c8f1ba01c9c62 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 28 Oct 2024 22:43:13 -0700 Subject: [PATCH 892/910] Do not attempt to process a block validation response for an already globally processed block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a98178..65d764baa8d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -563,7 +563,16 @@ impl Signer { .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } else { + block_info + } + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); From 3d67fcbca55e658c845ab5b363912c2c56937f26 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 29 Oct 2024 08:02:59 -0700 Subject: [PATCH 893/910] Remove unnecesssary elses in handle_block_validate_* Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 65d764baa8d..8dc73fcef78 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -520,9 +520,8 @@ impl Signer { { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? @@ -569,9 +568,8 @@ impl Signer { { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? From d734345c75cc33a4d809b34f1a245914a11d9074 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:25:38 -0400 Subject: [PATCH 894/910] feat: add pause after block rejections Fixes: #5405 --- testnet/stacks-node/src/config.rs | 14 +++++++++++++- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 +++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0beed9471d2..b6517eadbe0 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,9 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -2183,6 +2185,10 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, + /// Time in milliseconds to pause after receiving the first rejection, before proposing a new block. + pub first_rejection_pause_ms: u64, + /// Time in milliseconds to pause after receiving the subsequent rejections, before proposing a new block. + pub subsequent_rejection_pause_ms: u64, } impl Default for MinerConfig { @@ -2213,6 +2219,8 @@ impl Default for MinerConfig { max_reorg_depth: 3, pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, + first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, + subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, } } } @@ -2575,6 +2583,8 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, + pub first_rejection_pause_ms: Option, + pub subsequent_rejection_pause_ms: Option, } impl MinerConfigFile { @@ -2688,6 +2698,8 @@ impl MinerConfigFile { } else { ms }).unwrap_or(miner_default_config.min_time_between_blocks_ms), + first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), + subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a08c0ab353d..0caf0a7088d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -283,6 +283,7 @@ impl BlockMinerThread { } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; + let mut last_block_rejected = false; // now, actually run this tenure loop { @@ -386,15 +387,26 @@ impl BlockMinerThread { return Err(e); } _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."; + // Sleep for a bit to allow signers to catch up + let pause_ms = if last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); + continue; } }, }; + last_block_rejected = false; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { From 6e0eca60969f0029f67dc259790dbbb30391ad22 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:34:27 -0400 Subject: [PATCH 895/910] chore: move log before sleep --- testnet/stacks-node/src/nakamoto_node/miner.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0caf0a7088d..150762e9654 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -393,15 +393,14 @@ impl BlockMinerThread { } else { self.config.miner.first_rejection_pause_ms }; - thread::sleep(Duration::from_millis(pause_ms)); - last_block_rejected = true; error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; continue; } }, From 62470519f5d1ba99f0034030214e4089e04694d2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:36:03 -0400 Subject: [PATCH 896/910] chore: improve comments --- testnet/stacks-node/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index b6517eadbe0..5df5de28f2b 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2185,9 +2185,9 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, - /// Time in milliseconds to pause after receiving the first rejection, before proposing a new block. + /// Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block. pub first_rejection_pause_ms: u64, - /// Time in milliseconds to pause after receiving the subsequent rejections, before proposing a new block. + /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. pub subsequent_rejection_pause_ms: u64, } From eeab742c3228a544570de0f49500afa98eefd4d8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 30 Oct 2024 13:27:06 -0700 Subject: [PATCH 897/910] Change block rejection message to generic block response Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e60428be6eb..36f49923c3e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -348,7 +348,7 @@ impl Signer { crate::monitoring::increment_block_responses_sent(accepted); } Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + warn!("{self}: Failed to send block response to stacker-db: {e:?}",); } } return; From 18662ccd70897660b5070f234eb56d1deeb0132b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 10:00:04 -0700 Subject: [PATCH 898/910] Clippy fix signer and stackslib cli.rs Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 2 +- stacks-signer/src/main.rs | 6 +----- stacks-signer/src/monitoring/mod.rs | 3 +-- stacks-signer/src/v0/signer.rs | 2 +- stackslib/src/cli.rs | 4 +--- 5 files changed, 5 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 6fc7c7b2dda..7dd9cc4fdf9 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -335,7 +335,7 @@ Metrics endpoint: {metrics_endpoint} /// Get the chain ID for the network pub fn to_chain_id(&self) -> u32 { - self.chain_id.unwrap_or_else(|| match self.network { + self.chain_id.unwrap_or(match self.network { Network::Mainnet => CHAIN_ID_MAINNET, Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, }) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a23918f6f80..eac60cc53f3 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -157,11 +157,7 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!( - "Signer version: {}\nConfig: \n{}", - VERSION_STRING.to_string(), - config - ); + println!("Signer version: {}\nConfig: \n{}", *VERSION_STRING, config); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 621886b9c0a..4f6956051cb 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -97,8 +97,7 @@ pub fn update_signer_nonce(nonce: u64) { #[allow(dead_code)] /// Remove the origin from the full path to avoid duplicate metrics for different origins fn remove_origin_from_path(full_path: &str, origin: &str) -> String { - let path = full_path.replace(origin, ""); - path + full_path.replace(origin, "") } /// Start a new RPC call timer. diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fcda30a2706..bcdcd2f7a0e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -191,7 +191,7 @@ impl SignerTrait for Signer { "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); - stacks_client.post_block_until_ok(self, &b); + stacks_client.post_block_until_ok(self, b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 9ff6e556441..587daee7879 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -528,8 +528,6 @@ fn replay_block( fn replay_block_nakamoto( sort_db: &mut SortitionDB, stacks_chain_state: &mut StacksChainState, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, block: &NakamotoBlock, block_size: u64, ) -> Result<(), ChainstateError> { @@ -785,7 +783,7 @@ fn replay_block_nakamoto( return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (receipt, _clarity_commit, _reward_set_data) = ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), From 118cc19c5ee97f4d6bb0165198e16f5623ffe51d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 15:54:57 -0700 Subject: [PATCH 899/910] Use thiserror throughout testnet/stacks-node Signed-off-by: Jacinta Ferrant --- Cargo.lock | 9 ++--- Cargo.toml | 1 + libsigner/Cargo.toml | 2 +- stacks-signer/Cargo.toml | 2 +- testnet/stacks-node/Cargo.toml | 1 + testnet/stacks-node/src/burnchains/mod.rs | 36 ++++++------------- .../stacks-node/src/tests/bitcoin_regtest.rs | 13 ++----- 7 files changed, 22 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 227cd9d7684..8a3769b6a8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,6 +3328,7 @@ dependencies = [ "stackslib", "stx-genesis", "tempfile", + "thiserror", "tikv-jemallocator", "tiny_http", "tokio", @@ -3592,18 +3593,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 10dc427e2e5..c00c223c472 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } +thiserror = { version = "1.0.65" } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 63241d32565..7c472365a12 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -30,7 +30,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = "0.12" [dev-dependencies] diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index da94cc10deb..139c34fba85 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -38,7 +38,7 @@ slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 958820b491a..0c68d22ee7b 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } +thiserror = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 0c9446304d4..0509993dd08 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -1,7 +1,6 @@ pub mod bitcoin_regtest_controller; pub mod mocknet_controller; -use std::fmt; use std::time::Instant; use stacks::burnchains; @@ -16,41 +15,26 @@ pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestC pub use self::mocknet_controller::MocknetController; use super::operations::BurnchainOpSigner; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum Error { + #[error("ChainsCoordinator closed")] CoordinatorClosed, - IndexerError(burnchains::Error), + #[error("Indexer error: {0}")] + IndexerError(#[from] burnchains::Error), + #[error("Burnchain error")] BurnchainError, + #[error("Max fee rate exceeded")] MaxFeeRateExceeded, + #[error("Identical operation, not submitting")] IdenticalOperation, + #[error("No UTXOs available")] NoUTXOs, + #[error("Transaction submission failed: {0}")] TransactionSubmissionFailed(String), + #[error("Serializer error: {0}")] SerializerError(CodecError), } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), - Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), - Error::BurnchainError => write!(f, "Burnchain error"), - Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), - Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUTXOs => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed(e) => { - write!(f, "Transaction submission failed: {e}") - } - Error::SerializerError(e) => write!(f, "Serializer error: {e}"), - } - } -} - -impl From for Error { - fn from(e: burnchains::Error) -> Self { - Error::IndexerError(e) - } -} - pub trait BurnchainController { fn start(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 621f92aa476..90b13101832 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -17,21 +17,14 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BitcoinCoreError { + #[error("bitcoind spawn failed: {0}")] SpawnFailed(String), + #[error("bitcoind stop failed: {0}")] StopFailed(String), } -impl std::fmt::Display for BitcoinCoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), - Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), - } - } -} - type BitcoinResult = Result; pub struct BitcoinCoreController { From 28c723b64de43c5bd5e8ce9f840d9de9ce65ff56 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 1 Nov 2024 13:34:34 -0500 Subject: [PATCH 900/910] feat: add index for stacks block id in nakamoto_block_headers --- stackslib/src/chainstate/stacks/db/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 160e2dc60e5..9996af199ae 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -903,6 +903,7 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; From 7d2c13c4fe8e94aff9db807932ea10913ac80a4e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 1 Nov 2024 14:31:52 -0500 Subject: [PATCH 901/910] bump chainstate schema version --- stackslib/src/chainstate/nakamoto/mod.rs | 6 +++++ stackslib/src/chainstate/stacks/db/mod.rs | 29 +++++++++++++++-------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b8d0441591a..0b25fb45046 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -289,6 +289,12 @@ lazy_static! { ); "#, ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 1] = [ + r#" + UPDATE db_config SET version = "8"; + "# + ]; } #[cfg(test)] diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 9996af199ae..530b2ca6d12 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, NAKAMOTO_CHAINSTATE_SCHEMA_5, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -299,14 +299,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 8, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 8, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, } } } @@ -680,7 +680,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "7"; +pub const CHAINSTATE_VERSION: &'static str = "8"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1133,6 +1133,15 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "7" => { + // migrate to nakamoto 3 + info!( + "Migrating chainstate schema from version 7 to 8: just bump the schema (added indexes)" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", From 20f2f4d0caaa0d0fad6457d81637340d276b63bf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 09:33:41 -0500 Subject: [PATCH 902/910] use schema_5 for index creation rather than indices list --- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++++-- stackslib/src/chainstate/stacks/db/mod.rs | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0b25fb45046..d88082ae41b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -290,10 +290,12 @@ lazy_static! { "#, ]; - pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 1] = [ + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 2] = [ r#" UPDATE db_config SET version = "8"; - "# + "#, + // Add an index for index block hash in nakamoto block headers + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 530b2ca6d12..e899be993e4 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -903,7 +903,6 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", - "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; From da557e3f63e1ebd532f43978ea33de19e5544682 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 10:00:51 -0500 Subject: [PATCH 903/910] changelog entry --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5e200d17f..ff5fdd588b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) + ## [3.0.0.0.0] ### Added From 722d01b64a51bbbbd2d12cb3a0ac82098b30a84f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 14:53:53 -0500 Subject: [PATCH 904/910] chore: clean out migration comments --- stackslib/src/chainstate/stacks/db/mod.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e899be993e4..6b6f523f88f 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1087,28 +1087,24 @@ impl StacksChainState { while db_config.version != CHAINSTATE_VERSION { match db_config.version.as_str() { "1" => { - // migrate to 2 info!("Migrating chainstate schema from version 1 to 2"); for cmd in CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } } "2" => { - // migrate to 3 info!("Migrating chainstate schema from version 2 to 3"); for cmd in CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "3" => { - // migrate to nakamoto 1 info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { tx.execute_batch(cmd)?; } } "4" => { - // migrate to nakamoto 2 info!( "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" ); @@ -1117,14 +1113,12 @@ impl StacksChainState { } } "5" => { - // migrate to nakamoto 3 info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "6" => { - // migrate to nakamoto 3 info!( "Migrating chainstate schema from version 6 to 7: adds signer_stats table" ); @@ -1133,9 +1127,8 @@ impl StacksChainState { } } "7" => { - // migrate to nakamoto 3 info!( - "Migrating chainstate schema from version 7 to 8: just bump the schema (added indexes)" + "Migrating chainstate schema from version 7 to 8: add index for nakamoto block headers" ); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { tx.execute_batch(cmd)?; From 72d45f57e07a6d64b0eba88106676951ab02513c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 2 Nov 2024 14:34:13 -0700 Subject: [PATCH 905/910] Fix clippy in stacks node Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/tests/mod.rs | 4 +- .../burnchains/bitcoin_regtest_controller.rs | 187 ++- .../src/burnchains/mocknet_controller.rs | 6 +- testnet/stacks-node/src/chain_data.rs | 53 +- testnet/stacks-node/src/config.rs | 144 +- testnet/stacks-node/src/event_dispatcher.rs | 72 +- testnet/stacks-node/src/globals.rs | 11 +- testnet/stacks-node/src/keychain.rs | 24 +- testnet/stacks-node/src/main.rs | 19 +- testnet/stacks-node/src/nakamoto_node.rs | 8 +- .../stacks-node/src/nakamoto_node/miner.rs | 59 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 6 +- .../stacks-node/src/nakamoto_node/relayer.rs | 51 +- .../src/nakamoto_node/sign_coordinator.rs | 16 +- testnet/stacks-node/src/neon_node.rs | 467 ++++--- testnet/stacks-node/src/node.rs | 68 +- testnet/stacks-node/src/operations.rs | 3 +- .../stacks-node/src/run_loop/boot_nakamoto.rs | 6 +- testnet/stacks-node/src/run_loop/helium.rs | 24 +- testnet/stacks-node/src/run_loop/mod.rs | 11 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 17 +- testnet/stacks-node/src/run_loop/neon.rs | 44 +- testnet/stacks-node/src/stacks_events.rs | 2 +- testnet/stacks-node/src/syncctl.rs | 20 +- testnet/stacks-node/src/tenure.rs | 9 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 36 +- testnet/stacks-node/src/tests/epoch_205.rs | 68 +- testnet/stacks-node/src/tests/epoch_21.rs | 686 ++++------ testnet/stacks-node/src/tests/epoch_22.rs | 275 ++-- testnet/stacks-node/src/tests/epoch_23.rs | 46 +- testnet/stacks-node/src/tests/epoch_24.rs | 206 ++- testnet/stacks-node/src/tests/epoch_25.rs | 8 +- testnet/stacks-node/src/tests/integrations.rs | 397 +++--- testnet/stacks-node/src/tests/mempool.rs | 179 +-- testnet/stacks-node/src/tests/mod.rs | 203 +-- .../src/tests/nakamoto_integrations.rs | 648 ++++----- .../src/tests/neon_integrations.rs | 1195 +++++++---------- testnet/stacks-node/src/tests/signer/mod.rs | 32 +- testnet/stacks-node/src/tests/signer/v0.rs | 271 ++-- testnet/stacks-node/src/tests/stackerdb.rs | 40 +- 40 files changed, 2392 insertions(+), 3229 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 8b66c019f0d..6e6fdfd8f72 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -475,14 +475,14 @@ impl TestStacksNode { }; if StacksChainState::has_stored_block( - &self.chainstate.db(), + self.chainstate.db(), &self.chainstate.blocks_path, &consensus_hash, &bc.block_header_hash, ) .unwrap() && !StacksChainState::is_block_orphaned( - &self.chainstate.db(), + self.chainstate.db(), &consensus_hash, &bc.block_header_hash, ) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 82282926d3e..06cc4799ffa 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -186,12 +186,11 @@ pub fn make_bitcoin_indexer( let (_, network_type) = config.burnchain.get_bitcoin_network(); let indexer_runtime = BitcoinIndexerRuntime::new(network_type); - let burnchain_indexer = BitcoinIndexer { + BitcoinIndexer { config: indexer_config, runtime: indexer_runtime, - should_keep_running: should_keep_running, - }; - burnchain_indexer + should_keep_running, + } } pub fn get_satoshis_per_byte(config: &Config) -> u64 { @@ -215,7 +214,7 @@ impl LeaderBlockCommitFees { let mut fees = LeaderBlockCommitFees::estimated_fees_from_payload(payload, config); fees.spent_in_attempts = cmp::max(1, self.spent_in_attempts); fees.final_size = self.final_size; - fees.fee_rate = self.fee_rate + get_rbf_fee_increment(&config); + fees.fee_rate = self.fee_rate + get_rbf_fee_increment(config); fees.is_rbf_enabled = true; fees } @@ -306,8 +305,7 @@ impl BitcoinRegtestController { burnchain: Option, should_keep_running: Option>, ) -> Self { - std::fs::create_dir_all(&config.get_burnchain_path_str()) - .expect("Unable to create workdir"); + std::fs::create_dir_all(config.get_burnchain_path_str()).expect("Unable to create workdir"); let (_, network_id) = config.burnchain.get_bitcoin_network(); let res = SpvClient::new( @@ -434,11 +432,10 @@ impl BitcoinRegtestController { /// Get the default Burnchain instance from our config fn default_burnchain(&self) -> Burnchain { - let burnchain = match &self.burnchain_config { + match &self.burnchain_config { Some(burnchain) => burnchain.clone(), None => self.config.get_burnchain(), - }; - burnchain + } } /// Get the PoX constants in use @@ -491,7 +488,7 @@ impl BitcoinRegtestController { (None, Some(chain_tip)) => chain_tip.clone(), (Some(state_transition), _) => { let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::from(state_transition), received_at: Instant::now(), }; @@ -501,7 +498,7 @@ impl BitcoinRegtestController { (None, None) => { // can happen at genesis let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::noop(), received_at: Instant::now(), }; @@ -602,8 +599,8 @@ impl BitcoinRegtestController { }; let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, - state_transition: state_transition, + block_snapshot, + state_transition, received_at: Instant::now(), }; @@ -641,11 +638,11 @@ impl BitcoinRegtestController { let filter_addresses = vec![addr2str(&address)]; let pubk = if self.config.miner.segwit { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; test_debug!("Import public key '{}'", &pubk.to_hex()); @@ -753,11 +750,11 @@ impl BitcoinRegtestController { } let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; // Configure UTXO filter @@ -1013,7 +1010,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1026,10 +1023,8 @@ impl BitcoinRegtestController { }; tx.output = vec![consensus_output]; - tx.output.push( - PoxAddress::Standard(payload.recipient.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), - ); + tx.output + .push(PoxAddress::Standard(payload.recipient, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT)); self.finalize_tx( epoch_id, @@ -1099,7 +1094,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1113,8 +1108,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output.push( - PoxAddress::Standard(payload.delegate_to.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), + PoxAddress::Standard(payload.delegate_to, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT), ); self.finalize_tx( @@ -1180,7 +1174,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1271,7 +1265,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output - .push(PoxAddress::Standard(payload.output.clone(), None).to_bitcoin_tx_out(output_amt)); + .push(PoxAddress::Standard(payload.output, None).to_bitcoin_tx_out(output_amt)); self.finalize_tx( epoch_id, @@ -1347,7 +1341,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1388,10 +1382,9 @@ impl BitcoinRegtestController { fn magic_bytes(&self) -> Vec { #[cfg(test)] { - if let Some(set_bytes) = TEST_MAGIC_BYTES + if let Some(set_bytes) = *TEST_MAGIC_BYTES .lock() .expect("FATAL: test magic bytes mutex poisoned") - .clone() { return set_bytes.to_vec(); } @@ -1399,6 +1392,7 @@ impl BitcoinRegtestController { self.config.burnchain.magic_bytes.as_bytes().to_vec() } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation( &mut self, epoch_id: StacksEpochId, @@ -1407,7 +1401,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, previous_fees: Option, - previous_txids: &Vec, + previous_txids: &[Txid], ) -> Result { let _ = self.sortdb_mut(); let burn_chain_tip = self @@ -1433,6 +1427,7 @@ impl BitcoinRegtestController { ) } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation_at_burnchain_height( &mut self, epoch_id: StacksEpochId, @@ -1441,7 +1436,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, mut estimated_fees: LeaderBlockCommitFees, - previous_txids: &Vec, + previous_txids: &[Txid], burnchain_block_height: u64, ) -> Result { let public_key = signer.get_public_key(); @@ -1502,8 +1497,8 @@ impl BitcoinRegtestController { debug!("Transaction relying on UTXOs: {:?}", utxos); let txid = Txid::from_bytes(&txid[..]).unwrap(); - let mut txids = previous_txids.clone(); - txids.push(txid.clone()); + let mut txids = previous_txids.to_vec(); + txids.push(txid); let ongoing_block_commit = OngoingBlockCommit { payload, utxos, @@ -1537,15 +1532,8 @@ impl BitcoinRegtestController { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1574,7 +1562,7 @@ impl BitcoinRegtestController { None, None, None, - &vec![], + &[], ); return res; } else { @@ -1589,13 +1577,13 @@ impl BitcoinRegtestController { .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { - if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { + if burn_chain_tip.block_hash == ongoing_op.utxos.bhh { found_last_mined_at = true; break; } let parent = BurnchainDB::get_burnchain_block( - &burnchain_db.conn(), + burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) .map_err(|_| BurnchainControllerError::BurnchainError)?; @@ -1609,15 +1597,8 @@ impl BitcoinRegtestController { "Possible presence of fork or stale UTXO cache, invalidating cached set of UTXOs."; "cached_burn_block_hash" => %ongoing_op.utxos.bhh, ); - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1659,7 +1640,7 @@ impl BitcoinRegtestController { None, Some(ongoing_op.utxos.clone()), None, - &vec![], + &[], ) } else { // Case 2) ii): Attempt to RBF @@ -1724,9 +1705,9 @@ impl BitcoinRegtestController { } else { // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); - let utxos = match self.get_utxos( + match self.get_utxos( epoch_id, - &public_key, + public_key, total_required, utxos_to_exclude, block_height, @@ -1741,8 +1722,7 @@ impl BitcoinRegtestController { ); return Err(BurnchainControllerError::NoUTXOs); } - }; - utxos + } }; // Prepare a backbone for the tx @@ -1756,6 +1736,7 @@ impl BitcoinRegtestController { Ok((transaction, utxos)) } + #[allow(clippy::too_many_arguments)] fn finalize_tx( &mut self, epoch_id: StacksEpochId, @@ -1884,7 +1865,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (_i, utxo) in utxos_set.utxos.iter().enumerate() { + for utxo in utxos_set.utxos.iter() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -2118,7 +2099,7 @@ impl BitcoinRegtestController { } }; - transaction.map(|tx| SerializedTx::new(tx)) + transaction.map(SerializedTx::new) } #[cfg(test)] @@ -2139,7 +2120,7 @@ impl BitcoinRegtestController { for pk in pks { debug!("Import public key '{}'", &pk.to_hex()); - if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, pk) { warn!("Error when importing pubkey: {e:?}"); } } @@ -2165,7 +2146,7 @@ impl BitcoinRegtestController { // otherwise, round robin generate blocks for i in 0..num_blocks { - let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let pk = &pks[i % pks.len()]; let address = self.get_miner_address(StacksEpochId::Epoch21, pk); if i < pks.len() { debug!( @@ -2249,10 +2230,7 @@ impl BurnchainController for BitcoinRegtestController { target_block_height_opt: Option, ) -> Result<(BurnchainTip, u64), BurnchainControllerError> { // if no target block height is given, just fetch the first burnchain block. - self.receive_blocks( - false, - target_block_height_opt.map_or_else(|| Some(1), |x| Some(x)), - ) + self.receive_blocks(false, target_block_height_opt.map_or_else(|| Some(1), Some)) } fn sync( @@ -2351,13 +2329,13 @@ impl SerializedTx { } pub fn txid(&self) -> Txid { - self.txid.clone() + self.txid } pub fn to_hex(&self) -> String { let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); - format!("{}", formatted_bytes.join("")) + formatted_bytes.join("").to_string() } } @@ -2419,7 +2397,7 @@ impl ParsedUTXO { } (lhs, rhs) => { warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); - return None; + None } } } @@ -2516,13 +2494,12 @@ impl BitcoinRPCRequest { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match (&config.burnchain.username, &config.burnchain.password) { - (Some(username), Some(password)) => { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - request.add_header("Authorization".into(), auth_token); - } - (_, _) => {} - }; + if let (Some(username), Some(password)) = + (&config.burnchain.username, &config.burnchain.password) + { + let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); + request.add_header("Authorization".into(), auth_token); + } request } @@ -2535,7 +2512,7 @@ impl BitcoinRPCRequest { id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; debug!("Got raw transaction {}: {:?}", txid, &res); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2548,7 +2525,7 @@ impl BitcoinRPCRequest { id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; let confirmations = res .get("result") .ok_or_else(|| RPCError::Parsing("No 'result' field in bitcoind RPC response".into()))? @@ -2575,7 +2552,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; debug!( "Generated {} blocks to {}: {:?}", num_blocks, &address, &res @@ -2598,21 +2575,17 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; - let bhh = match res.as_object_mut() { - Some(res) => { - let res = res - .get("result") - .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh: String = serde_json::from_value(res.to_owned()) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh = BurnchainHeaderHash::from_hex(&bhh) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - bhh - } - _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), + let mut res = BitcoinRPCRequest::send(config, payload)?; + let Some(res) = res.as_object_mut() else { + return Err(RPCError::Parsing("Failed to get UTXOs".to_string())); }; - + let res = res + .get("result") + .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh_string: String = serde_json::from_value(res.to_owned()) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh = BurnchainHeaderHash::from_hex(&bhh_string) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); @@ -2630,7 +2603,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let txids_to_filter = if let Some(utxos_to_exclude) = utxos_to_exclude { utxos_to_exclude .utxos @@ -2710,7 +2683,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let json_resp = BitcoinRPCRequest::send(&config, payload)?; + let json_resp = BitcoinRPCRequest::send(config, payload)?; if let Some(e) = json_resp.get("error") { if !e.is_null() { @@ -2756,9 +2729,9 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let result = BitcoinRPCRequest::send(&config, payload)?; + let result = BitcoinRPCRequest::send(config, payload)?; let checksum = result - .get(&"result".to_string()) + .get("result") .and_then(|res| res.as_object()) .and_then(|obj| obj.get("checksum")) .and_then(|checksum_val| checksum_val.as_str()) @@ -2776,7 +2749,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; } Ok(()) } @@ -2790,7 +2763,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let mut wallets = Vec::new(); match res.as_object_mut() { Some(ref mut object) => match object.get_mut("result") { @@ -2828,12 +2801,12 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; Ok(()) } pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let request = BitcoinRPCRequest::build_rpc_request(config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); @@ -2841,9 +2814,9 @@ impl BitcoinRPCRequest { let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { - return Ok(js); + Ok(js) } else { - return Err(RPCError::Parsing("Did not get a JSON response".into())); + Err(RPCError::Parsing("Did not get a JSON response".into())) } } } @@ -3025,7 +2998,7 @@ mod tests { Some(utxo_set), None, leader_fees, - &vec![], + &[], 2212, ) .unwrap(); diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index d518f5bdea9..a626cfb4438 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -44,8 +44,8 @@ impl MocknetController { let burnchain = config.get_burnchain(); Self { - config: config, - burnchain: burnchain, + config, + burnchain, db: None, queued_operations: VecDeque::new(), chain_tip: None, @@ -54,7 +54,7 @@ impl MocknetController { fn build_next_block_header(current_block: &BlockSnapshot) -> BurnchainBlockHeader { let curr_hash = ¤t_block.burn_header_hash.to_bytes()[..]; - let next_hash = Sha256Sum::from_data(&curr_hash); + let next_hash = Sha256Sum::from_data(curr_hash); let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( current_block.block_height + 1, diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index b1e32c15ea2..c7fdaf6cee1 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -81,7 +81,7 @@ impl MinerStats { { commits_at_sortition.push(missed); } else { - missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + missed_commits_map.insert(missed.intended_sortition, vec![missed]); } } @@ -106,8 +106,7 @@ impl MinerStats { &sortition_id, )?; if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { - missed_commits_at_height - .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + missed_commits_at_height.extend(missed_commit_in_block.into_iter().cloned()); } windowed_missed_commits.push(missed_commits_at_height); @@ -223,7 +222,7 @@ impl MinerStats { all_miners: &[&str], ) -> Result, String> { let (exit_code, stdout, _stderr) = - Self::run_subprocess(&self.unconfirmed_commits_helper, &all_miners)?; + Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( "Failed to run `{}`: exit code {}", @@ -255,7 +254,7 @@ impl MinerStats { }; let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { - let Ok(pox_addr_bytes) = hex_bytes(&pox_addr_hex) else { + let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); }; let Some(bitcoin_addr) = @@ -279,8 +278,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 1, parent_vtxindex: 1, key_block_ptr: 1, @@ -295,7 +294,7 @@ impl MinerStats { block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; unconfirmed_spends.push(mocked_commit); @@ -306,7 +305,7 @@ impl MinerStats { /// Convert a list of burn sample points into a probability distribution by candidate's /// apparent sender (e.g. miner address). pub fn burn_dist_to_prob_dist(burn_dist: &[BurnSamplePoint]) -> HashMap { - if burn_dist.len() == 0 { + if burn_dist.is_empty() { return HashMap::new(); } if burn_dist.len() == 1 { @@ -343,13 +342,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -391,7 +388,7 @@ impl MinerStats { let (dist, total_spend) = Self::get_spend_distribution( active_miners_and_commits, unconfirmed_block_commits, - &expected_pox_addrs, + expected_pox_addrs, ); let mut probs = HashMap::new(); @@ -444,8 +441,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 2, parent_vtxindex: 2, key_block_ptr: 2, @@ -455,13 +452,13 @@ impl MinerStats { burn_fee: last_commit.burn_fee, input: (last_commit.txid, expected_input_index), apparent_sender: last_commit.apparent_sender.clone(), - txid: Txid(DEADBEEF.clone()), + txid: Txid(DEADBEEF), vtxindex: 1, block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; commit_table.insert(miner.to_string(), mocked_commit); } @@ -473,13 +470,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -520,9 +515,7 @@ impl MinerStats { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id)?; for commit in commits.into_iter() { let miner = commit.apparent_sender.to_string(); - if miners.get(&miner).is_none() { - miners.insert(miner, commit); - } + miners.entry(miner).or_insert(commit); } tip = SortitionDB::get_block_snapshot(sortdb.conn(), &tip.parent_sortition_id)? .ok_or(DBError::NotFoundError)?; @@ -750,11 +743,11 @@ echo < Result { let mut config: ConfigFile = toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; @@ -367,7 +368,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; - return config.miner; + config.miner } pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { @@ -380,7 +381,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { return self.node.clone(); }; - return config.node; + config.node } /// Apply any test settings to this burnchain config struct @@ -411,7 +412,7 @@ impl Config { "Override first_burn_block_hash from {} to {}", burnchain.first_block_hash, first_burn_block_hash ); - burnchain.first_block_hash = BurnchainHeaderHash::from_hex(&first_burn_block_hash) + burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); } @@ -525,7 +526,7 @@ impl Config { } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. - self.check_nakamoto_config(&burnchain); + self.check_nakamoto_config(burnchain); } fn check_nakamoto_config(&self, burnchain: &Burnchain) { @@ -612,7 +613,7 @@ impl Config { let _ = StacksEpoch::validate_epochs(epochs); // sanity check: v1_unlock_height must happen after pox-2 instantiation - let epoch21_index = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch21) + let epoch21_index = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch21) .expect("FATAL: no epoch 2.1 defined"); let epoch21 = &epochs[epoch21_index]; @@ -810,7 +811,7 @@ impl Config { } if burnchain.mode == "helium" && burnchain.local_mining_public_key.is_none() { - return Err(format!("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)")); + return Err("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)".into()); } let is_mainnet = burnchain.mode == "mainnet"; @@ -834,27 +835,17 @@ impl Config { burnchain.peer_version, ); } - } else { - if is_mainnet && resolve_bootstrap_nodes { - let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); - node.set_bootstrap_nodes( - bootstrap_node, - burnchain.chain_id, - burnchain.peer_version, - ); - } + } else if is_mainnet && resolve_bootstrap_nodes { + let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); + node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); } if let Some(deny_nodes) = deny_nodes { node.set_deny_nodes(deny_nodes, burnchain.chain_id, burnchain.peer_version); } // Validate the node config - if is_mainnet { - if node.use_test_genesis_chainstate == Some(true) { - return Err(format!( - "Attempted to run mainnet node with `use_test_genesis_chainstate`" - )); - } + if is_mainnet && node.use_test_genesis_chainstate == Some(true) { + return Err("Attempted to run mainnet node with `use_test_genesis_chainstate`".into()); } if node.stacker || node.miner { @@ -869,10 +860,10 @@ impl Config { let initial_balances: Vec = match config_file.ustx_balance { Some(balances) => { - if is_mainnet && balances.len() > 0 { - return Err(format!( - "Attempted to run mainnet node with specified `initial_balances`" - )); + if is_mainnet && !balances.is_empty() { + return Err( + "Attempted to run mainnet node with specified `initial_balances`".into(), + ); } balances .iter() @@ -913,16 +904,12 @@ impl Config { }; // check for observer config in env vars - match std::env::var("STACKS_EVENT_OBSERVER") { - Ok(val) => { - events_observers.insert(EventObserverConfig { - endpoint: val, - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1_000, - }); - () - } - _ => (), + if let Ok(val) = std::env::var("STACKS_EVENT_OBSERVER") { + events_observers.insert(EventObserverConfig { + endpoint: val, + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, + }); }; let connection_options = match config_file.connection_options { @@ -1070,14 +1057,11 @@ impl Config { } pub fn is_mainnet(&self) -> bool { - match self.burnchain.mode.as_str() { - "mainnet" => true, - _ => false, - } + matches!(self.burnchain.mode.as_str(), "mainnet") } pub fn is_node_event_driven(&self) -> bool { - self.events_observers.len() > 0 + !self.events_observers.is_empty() } pub fn make_nakamoto_block_builder_settings( @@ -1157,12 +1141,11 @@ impl Config { /// part dependent on the state machine getting block data back to the miner quickly, and thus /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { - let poll_timeout = if self.node.miner { + if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 1000 - }; - poll_timeout + } } } @@ -1253,7 +1236,7 @@ impl BurnchainConfig { username: None, password: None, timeout: 60, - magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), + magic_bytes: BLOCKSTACK_MAGIC_MAINNET, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: 10, // TODO: this is a testnet specific value. @@ -1298,8 +1281,7 @@ impl BurnchainConfig { let mut addrs_iter = format!("{}:{}", self.peer_host, self.rpc_port) .to_socket_addrs() .unwrap(); - let sock_addr = addrs_iter.next().unwrap(); - sock_addr + addrs_iter.next().unwrap() } pub fn get_bitcoin_network(&self) -> (String, BitcoinNetworkType) { @@ -1320,15 +1302,15 @@ pub struct StacksEpochConfigFile { start_height: i64, } -pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; -pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; -pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; -pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; -pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; -pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; -pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; -pub const EPOCH_CONFIG_2_5_0: &'static str = "2.5"; -pub const EPOCH_CONFIG_3_0_0: &'static str = "3.0"; +pub const EPOCH_CONFIG_1_0_0: &str = "1.0"; +pub const EPOCH_CONFIG_2_0_0: &str = "2.0"; +pub const EPOCH_CONFIG_2_0_5: &str = "2.05"; +pub const EPOCH_CONFIG_2_1_0: &str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; +pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; +pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -1978,9 +1960,8 @@ impl NodeConfig { /// Get a SocketAddr for this node's RPC endpoint which uses the loopback address pub fn get_rpc_loopback(&self) -> Option { let rpc_port = SocketAddr::from_str(&self.rpc_bind) - .or_else(|e| { + .map_err(|e| { error!("Could not parse node.rpc_bind configuration setting as SocketAddr: {e}"); - Err(()) }) .ok()? .port(); @@ -2090,8 +2071,8 @@ impl NodeConfig { peer_version: u32, ) { for part in bootstrap_nodes.split(',') { - if part.len() > 0 { - self.add_bootstrap_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_bootstrap_node(part, chain_id, peer_version); } } } @@ -2109,8 +2090,8 @@ impl NodeConfig { pub fn set_deny_nodes(&mut self, deny_nodes: String, chain_id: u32, peer_version: u32) { for part in deny_nodes.split(',') { - if part.len() > 0 { - self.add_deny_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_deny_node(part, chain_id, peer_version); } } } @@ -2124,10 +2105,7 @@ impl NodeConfig { MARFOpenOpts::new( hash_mode, - &self - .marf_cache_strategy - .as_ref() - .unwrap_or(&"noop".to_string()), + self.marf_cache_strategy.as_deref().unwrap_or("noop"), false, ) } @@ -2288,21 +2266,21 @@ impl ConnectionOptionsFile { let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS .read_only_call_limit .clone(); - self.read_only_call_limit_write_length.map(|x| { + if let Some(x) = self.read_only_call_limit_write_length { read_only_call_limit.write_length = x; - }); - self.read_only_call_limit_write_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_write_count { read_only_call_limit.write_count = x; - }); - self.read_only_call_limit_read_length.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_length { read_only_call_limit.read_length = x; - }); - self.read_only_call_limit_read_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_count { read_only_call_limit.read_count = x; - }); - self.read_only_call_limit_runtime.map(|x| { + } + if let Some(x) = self.read_only_call_limit_runtime { read_only_call_limit.runtime = x; - }); + }; let default = ConnectionOptions::default(); Ok(ConnectionOptions { read_only_call_limit, @@ -2353,7 +2331,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.soft_max_clients_per_host), walk_interval: self .walk_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval), walk_seed_probability: self .walk_seed_probability .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), @@ -2375,7 +2353,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.maximum_call_argument_size), download_interval: self .download_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval), inv_sync_interval: self .inv_sync_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_sync_interval), @@ -2396,7 +2374,7 @@ impl ConnectionOptionsFile { force_disconnect_interval: self.force_disconnect_interval, max_http_clients: self .max_http_clients - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients), connect_timeout: self.connect_timeout.unwrap_or(10), handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, @@ -2457,7 +2435,7 @@ impl NodeConfigFile { name: self.name.unwrap_or(default_node_config.name), seed: match self.seed { Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.seed should be a hex encoded string"))?, + .map_err(|_e| "node.seed should be a hex encoded string".to_string())?, None => default_node_config.seed, }, working_dir: std::env::var("STACKS_WORKING_DIR") @@ -2471,8 +2449,9 @@ impl NodeConfigFile { .data_url .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { - Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, + Some(seed) => hex_bytes(&seed).map_err(|_e| { + "node.local_peer_seed should be a hex encoded string".to_string() + })?, None => default_node_config.local_peer_seed, }, miner, @@ -2527,7 +2506,7 @@ impl NodeConfigFile { .unwrap_or(default_node_config.chain_liveness_poll_time_secs), stacker_dbs: self .stacker_dbs - .unwrap_or(vec![]) + .unwrap_or_default() .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), @@ -2714,6 +2693,7 @@ pub struct AtlasConfigFile { impl AtlasConfigFile { // Can't inplement `Into` trait because this takes a parameter + #[allow(clippy::wrong_self_convention)] fn into_config(&self, mainnet: bool) -> AtlasConfig { let mut conf = AtlasConfig::new(mainnet); if let Some(val) = self.attachments_max_size { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index bb05cd6128a..dd587077a65 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -181,6 +181,12 @@ impl InnerStackerDBChannel { } } +impl Default for StackerDBChannel { + fn default() -> Self { + Self::new() + } +} + impl StackerDBChannel { pub const fn new() -> Self { Self { @@ -256,7 +262,7 @@ where serializer.serialize_str(&value.to_string()) } -fn serialize_pox_addresses(value: &Vec, serializer: S) -> Result +fn serialize_pox_addresses(value: &[PoxAddress], serializer: S) -> Result where S: serde::Serializer, { @@ -402,8 +408,8 @@ impl EventObserver { let id: i64 = row.get(0)?; let url: String = row.get(1)?; let payload_text: String = row.get(2)?; - let payload: serde_json::Value = serde_json::from_str(&payload_text) - .map_err(|e| db_error::SerializationError(e))?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; let timeout_ms: u64 = row.get(3)?; Ok((id, url, payload, timeout_ms)) }, @@ -642,7 +648,7 @@ impl EventObserver { TransactionOrigin::Burn(op) => ( op.txid().to_string(), "00".to_string(), - BlockstackOperationType::blockstack_op_to_json(&op), + BlockstackOperationType::blockstack_op_to_json(op), ), TransactionOrigin::Stacks(ref tx) => { let txid = tx.txid().to_string(); @@ -776,6 +782,7 @@ impl EventObserver { self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); } + #[allow(clippy::too_many_arguments)] fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, @@ -806,12 +813,15 @@ impl EventObserver { }) .collect(); - let mut tx_index: u32 = 0; let mut serialized_txs = vec![]; - for receipt in receipts.iter() { - let payload = EventObserver::make_new_block_txs_payload(receipt, tx_index); + for (tx_index, receipt) in receipts.iter().enumerate() { + let payload = EventObserver::make_new_block_txs_payload( + receipt, + tx_index + .try_into() + .expect("BUG: more receipts than U32::MAX"), + ); serialized_txs.push(payload); - tx_index += 1; } let signer_bitvec_value = signer_bitvec_opt @@ -821,7 +831,7 @@ impl EventObserver { let (reward_set_value, cycle_number_value) = match &reward_set_data { Some(data) => ( - serde_json::to_value(&RewardSetEventPayload::from_reward_set(&data.reward_set)) + serde_json::to_value(RewardSetEventPayload::from_reward_set(&data.reward_set)) .unwrap_or_default(), serde_json::to_value(data.cycle_number).unwrap_or_default(), ), @@ -1097,6 +1107,12 @@ impl BlockEventDispatcher for EventDispatcher { } } +impl Default for EventDispatcher { + fn default() -> Self { + EventDispatcher::new() + } +} + impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { @@ -1125,7 +1141,7 @@ impl EventDispatcher { ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.burn_block_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1149,6 +1165,7 @@ impl EventDispatcher { /// - dispatch_matrix: a vector where each index corresponds to the hashset of event indexes /// that each respective event observer is subscribed to /// - events: a vector of all events from all the tx receipts + #[allow(clippy::type_complexity)] fn create_dispatch_matrix_and_event_vector<'a>( &self, receipts: &'a Vec, @@ -1241,6 +1258,7 @@ impl EventDispatcher { (dispatch_matrix, events) } + #[allow(clippy::too_many_arguments)] pub fn process_chain_tip( &self, block: &StacksBlockEventData, @@ -1264,7 +1282,7 @@ impl EventDispatcher { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); - if dispatch_matrix.len() > 0 { + if !dispatch_matrix.is_empty() { let mature_rewards_vec = if let Some(rewards_info) = mature_rewards_info { mature_rewards .iter() @@ -1297,7 +1315,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - &block, + block, metadata, receipts, parent_index_hash, @@ -1342,7 +1360,7 @@ impl EventDispatcher { ) }) .collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let flattened_receipts = processed_unconfirmed_state @@ -1390,12 +1408,12 @@ impl EventDispatcher { .enumerate() .filter_map(|(obs_id, observer)| { let lookup_ix = u16::try_from(obs_id).expect("FATAL: more than 2^16 observers"); - if lookup.contains(&lookup_ix) { - return Some(observer); - } else if include_any && self.any_event_observers_lookup.contains(&lookup_ix) { - return Some(observer); + if lookup.contains(&lookup_ix) + || (include_any && self.any_event_observers_lookup.contains(&lookup_ix)) + { + Some(observer) } else { - return None; + None } }) .collect() @@ -1405,7 +1423,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1427,7 +1445,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1456,7 +1474,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.mined_microblocks_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1483,7 +1501,7 @@ impl EventDispatcher { tx_events: Vec, ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1502,7 +1520,7 @@ impl EventDispatcher { block_size: block_size_bytes, cost: consumed.clone(), tx_events, - miner_signature: block.header.miner_signature.clone(), + miner_signature: block.header.miner_signature, signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1558,7 +1576,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1577,9 +1595,9 @@ impl EventDispatcher { } } - pub fn process_new_attachments(&self, attachments: &Vec<(AttachmentInstance, Attachment)>) { + pub fn process_new_attachments(&self, attachments: &[(AttachmentInstance, Attachment)]) { let interested_observers: Vec<_> = self.registered_observers.iter().enumerate().collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1598,7 +1616,7 @@ impl EventDispatcher { &self, asset_identifier: &AssetIdentifier, event_index: usize, - dispatch_matrix: &mut Vec>, + dispatch_matrix: &mut [HashSet], ) { if let Some(observer_indexes) = self.assets_observers_lookup.get(asset_identifier) { for o_i in observer_indexes { diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b1ddf2e82b1..3e527e76e47 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -23,6 +23,7 @@ use crate::TipCandidate; pub type NeonGlobals = Globals; /// Command types for the relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -99,6 +100,7 @@ impl Clone for Globals { } impl Globals { + #[allow(clippy::too_many_arguments)] pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, @@ -289,8 +291,8 @@ impl Globals { let active_key = RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: op.block_height, + op_vtxindex: op.vtxindex, memo: op.memo, }; @@ -450,10 +452,7 @@ impl Globals { /// Clear the initiative flag and return its value pub fn take_initiative(&self) -> Option { match self.initiative.lock() { - Ok(mut initiative) => { - let ret = (*initiative).take(); - ret - } + Ok(mut initiative) => (*initiative).take(), Err(_e) => { error!("FATAL: failed to lock initiative"); panic!(); diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index b6df8549c41..9402ebbad51 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -123,10 +123,7 @@ impl Keychain { let proof = VRF::prove(&sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(&pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); proof } @@ -178,7 +175,7 @@ impl Keychain { } /// Sign a transaction as if we were the origin - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let sk = self.get_secret_key(); tx_signer .sign_origin(&sk) @@ -333,7 +330,7 @@ mod tests { } }; sk.set_compress_public(true); - self.microblocks_secret_keys.push(sk.clone()); + self.microblocks_secret_keys.push(sk); debug!("Microblock keypair rotated"; "burn_block_height" => %burn_block_height, @@ -346,7 +343,7 @@ mod tests { self.microblocks_secret_keys.last().cloned() } - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.secret_keys.len() < self.threshold as usize { self.secret_keys.len() } else { @@ -370,12 +367,9 @@ mod tests { }; // Generate the proof - let proof = VRF::prove(&vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(vrf_pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); Some(proof) } @@ -385,7 +379,7 @@ mod tests { let public_keys = self .secret_keys .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) + .map(StacksPublicKey::from_private) .collect(); let version = if is_mainnet { self.hash_mode.to_version_mainnet() @@ -518,7 +512,7 @@ mod tests { TransactionVersion::Testnet, k1.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), @@ -527,7 +521,7 @@ mod tests { TransactionVersion::Testnet, k2.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index fcdc9f58474..e795101c948 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -93,18 +93,18 @@ fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCan at_stacks_height, ); - let best_tip = BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap(); - best_tip + BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap() } /// Implementation of `get_miner_spend` CLI option +#[allow(clippy::incompatible_msrv)] fn cli_get_miner_spend( config_path: &str, mine_start: Option, at_burnchain_height: Option, ) -> u64 { info!("Loading config at path {}", config_path); - let config = match ConfigFile::from_path(&config_path) { + let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); @@ -155,7 +155,7 @@ fn cli_get_miner_spend( &config, &keychain, &burnchain, - &mut sortdb, + &sortdb, &commit_outs, mine_start.unwrap_or(tip.block_height), at_burnchain_height, @@ -171,7 +171,7 @@ fn cli_get_miner_spend( else { return 0.0; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return 0.0; } @@ -207,12 +207,11 @@ fn cli_get_miner_spend( ); let win_probs = if config.miner.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, &commit_outs, - ); - win_probs + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -229,8 +228,7 @@ fn cli_get_miner_spend( return 0.0; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; info!("Unconfirmed spend distribution: {:?}", &spend_dist); @@ -428,7 +426,6 @@ fn main() { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { warn!("Helium runloop exited: {}", e); - return; } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7cda49e10d9..ecf37ae0ecf 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -148,7 +148,7 @@ impl StacksNode { let burnchain = runloop.get_burnchain(); let atlas_config = config.atlas.clone(); let mut keychain = Keychain::default(config.node.seed.clone()); - if let Some(mining_key) = config.miner.mining_key.clone() { + if let Some(mining_key) = config.miner.mining_key { keychain.set_nakamoto_sk(mining_key); } @@ -195,7 +195,7 @@ impl StacksNode { match &data_from_neon.leader_key_registration_state { LeaderKeyRegistrationState::Active(registered_key) => { let pubkey_hash = keychain.get_nakamoto_pkh(); - if pubkey_hash.as_ref() == ®istered_key.memo { + if pubkey_hash.as_ref() == registered_key.memo { data_from_neon.leader_key_registration_state } else { LeaderKeyRegistrationState::Inactive @@ -366,7 +366,7 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) return; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { warn!("Failed to create {}: {:?}", &path, &e); @@ -374,7 +374,7 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + if let Err(e) = f.write_all(key_json.as_bytes()) { warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); return; } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 150762e9654..042df70be15 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -67,6 +67,7 @@ pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync:: /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; +#[allow(clippy::large_enum_variant)] pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { @@ -424,7 +425,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if !self.last_block_mined.is_none() { + if self.last_block_mined.is_some() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -573,12 +574,12 @@ impl BlockMinerThread { &self.burnchain, &sort_db, &mut chain_state, - &stackerdbs, + stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, )?; - return Ok((reward_set, signature)); + Ok((reward_set, signature)) } /// Fault injection -- possibly fail to broadcast @@ -590,13 +591,12 @@ impl BlockMinerThread { .fault_injection_block_push_fail_probability .unwrap_or(0) .min(100); - let will_drop = if drop_prob > 0 { + if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob } else { false - }; - will_drop + } } /// Store a block to the chainstate, and if successful (it should be since we mined it), @@ -621,7 +621,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &chainstate_config, - &block, + block, &mut sortition_handle, &staging_tx, headers_conn, @@ -704,7 +704,7 @@ impl BlockMinerThread { miner_privkey, &sort_db, &self.burn_block, - &stackerdbs, + stackerdbs, SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, @@ -869,24 +869,21 @@ impl BlockMinerThread { "Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id ); - let epoch2_header = - NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "No header info for epoch2x tenure block ID {}", - &self.parent_tenure_id - ); - NakamotoNodeError::ParentNotFound - })?; - - epoch2_header + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? } }; @@ -1147,9 +1144,9 @@ impl BlockMinerThread { let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { - tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), + tenure_consensus_hash: self.burn_election_block.consensus_hash, prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), + burn_view_consensus_hash: self.burn_election_block.consensus_hash, previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), @@ -1252,7 +1249,7 @@ impl ParentStacksBlockInfo { } let Ok(Some(parent_tenure_header)) = - NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + NakamotoChainState::get_block_header(chain_state.db(), parent_tenure_id) else { warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); return Err(NakamotoNodeError::ParentNotFound); @@ -1293,7 +1290,7 @@ impl ParentStacksBlockInfo { } else { 1 }; - let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash; Some(ParentTenureInfo { parent_tenure_blocks, parent_tenure_consensus_hash, @@ -1321,7 +1318,7 @@ impl ParentStacksBlockInfo { let account = chain_state .with_read_only_clarity_tx( &burn_db - .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .index_handle_at_block(chain_state, &stacks_tip_header.index_block_hash()) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 004023ea263..78deb69b9f9 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -227,6 +227,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub(crate) fn run_one_pass( &mut self, indexer: &B, @@ -238,7 +239,7 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( @@ -258,7 +259,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -266,7 +267,6 @@ impl PeerThread { cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), coord_comms: Some(&self.globals.coord_comms), - ..RPCHandlerArgs::default() }; self.net.run( indexer, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b0..441d7ecd2c9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -67,6 +67,7 @@ use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; /// Command types for the Nakamoto relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -142,7 +143,7 @@ impl LastCommit { /// What's the parent tenure's tenure-start block hash? pub fn parent_tenure_id(&self) -> StacksBlockId { - StacksBlockId(self.block_commit.block_header_hash.clone().0) + StacksBlockId(self.block_commit.block_header_hash.0) } /// What's the stacks tip at the time of commit? @@ -167,7 +168,7 @@ impl LastCommit { /// Set our txid pub fn set_txid(&mut self, txid: &Txid) { - self.txid = Some(txid.clone()); + self.txid = Some(*txid); } } @@ -302,6 +303,8 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? + #[allow(clippy::nonminimal_bool)] + #[allow(clippy::eq_op)] fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes @@ -497,7 +500,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: miner_pkh.as_bytes().to_vec(), - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -564,7 +567,7 @@ impl RelayerThread { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), &stacks_tip, - &tip_block_ch, + tip_block_ch, ) .map_err(|e| { error!( @@ -730,9 +733,7 @@ impl RelayerThread { /// * last_burn_block corresponds to the canonical sortition DB's chain tip /// * the time of issuance is sufficiently recent /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) + /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed) /// * a miner thread is not running already fn create_block_miner( &mut self, @@ -750,11 +751,11 @@ impl RelayerThread { return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = burn_tip.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( @@ -1067,7 +1068,7 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let res = self.bitcoin_controller.submit_operation( - last_committed.get_epoch_id().clone(), + *last_committed.get_epoch_id(), BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), &mut op_signer, 1, @@ -1299,7 +1300,7 @@ impl RelayerThread { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { saved_key_opt = - Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); + Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh()); } if let Some(saved_key) = saved_key_opt { debug!("Relayer: resuming VRF key"); @@ -1371,9 +1372,9 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/does_not_exist.json"; - _ = std::fs::remove_file(&path); + _ = std::fs::remove_file(path); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); } @@ -1384,13 +1385,13 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/empty.json"; - File::create(&path).expect("Failed to create test file"); - assert!(Path::new(&path).exists()); + File::create(path).expect("Failed to create test file"); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1403,15 +1404,15 @@ pub mod test { let json_content = r#"{ "hello": "world" }"#; // Write the JSON content to the file - let mut file = File::create(&path).expect("Failed to create test file"); + let mut file = File::create(path).expect("Failed to create test file"); file.write_all(json_content.as_bytes()) .expect("Failed to write to test file"); - assert!(Path::new(&path).exists()); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1432,10 +1433,10 @@ pub mod test { let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_some()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1460,9 +1461,9 @@ pub mod test { let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); let pubkey_hash = Hash160::from_node_public_key(&pk); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 697dddeb034..b2f892e1f1c 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -188,6 +188,7 @@ impl SignCoordinator { } /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] pub fn send_miners_message( miner_sk: &StacksPrivateKey, sortdb: &SortitionDB, @@ -199,7 +200,7 @@ impl SignCoordinator { miners_session: &mut StackerDBSession, election_sortition: &ConsensusHash, ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -222,7 +223,7 @@ impl SignCoordinator { .saturating_add(1); let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); chunk - .sign(&miner_sk) + .sign(miner_sk) .map_err(|_| "Failed to sign StackerDB chunk")?; match miners_session.put_chunk(&chunk) { @@ -270,13 +271,14 @@ impl SignCoordinator { /// to the signers, and then waits for the signers to respond /// with their signatures. It does so in two ways, concurrently: /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. + /// found, then the block can be broadcast. /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. // Mutants skip here: this function is covered via integration tests, // which the mutation testing does not see. #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] pub fn run_sign_v0( &mut self, block: &NakamotoBlock, @@ -306,7 +308,7 @@ impl SignCoordinator { &self.message_key, sortdb, burn_tip, - &stackerdbs, + stackerdbs, block_proposal_message, MinerSlotID::BlockProposal, self.is_mainnet, @@ -367,7 +369,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip) { + if Self::check_burn_tip_changed(sortdb, burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b3..efc64bf8e74 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -62,79 +62,85 @@ /// [11] Notifies about new transaction attachment events /// [12] Signals VRF key registration /// -/// When the node is running, there are 4-5 active threads at once. They are: +/// When the node is running, there are 4-5 active threads at once. They are: /// -/// * **RunLoop Thread**: This is the main thread, whose code body lives in src/run_loop/neon.rs. -/// This thread is responsible for: -/// * Bootup -/// * Running the burnchain indexer -/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process +/// * **RunLoop Thread**: +/// This is the main thread, whose code body lives in `src/run_loop/neon.rs`. +/// This thread is responsible for: +/// * Bootup +/// * Running the burnchain indexer +/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process /// -/// * **Relayer Thread**: This is the thread that stores and relays blocks and microblocks. Both -/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to -/// ensure that neither one attempts to acquire a write-lock in the underlying databases. -/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks -/// blocks, and it directs the miner thread (if running) to stop when either it or the -/// ChainsCoordinator thread needs to acquire the write-lock. -/// This thread is responsible for: -/// * Receiving new blocks and microblocks from the P2P thread via a shared channel -/// * (Sychronously) requesting the CoordinatorThread to process newly-stored Stacks blocks and -/// microblocks -/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P -/// thread so it can answer queries about the unconfirmed microblock chain -/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast -/// * Registering the VRF public key for the miner -/// * Spawning the block and microblock miner threads, and stopping them if their continued -/// execution would inhibit block or microblock storage or processing. -/// * Submitting the burnchain operation to commit to a freshly-mined block +/// * **Relayer Thread**: +/// This is the thread that stores and relays blocks and microblocks. Both +/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to +/// ensure that neither one attempts to acquire a write-lock in the underlying databases. +/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks +/// blocks, and it directs the miner thread (if running) to stop when either it or the +/// ChainsCoordinator thread needs to acquire the write-lock. +/// This thread is responsible for: +/// * Receiving new blocks and microblocks from the P2P thread via a shared channel +/// * (Synchronously) requesting the CoordinatorThread to process newly-stored Stacks blocks +/// and microblocks +/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P +/// thread so it can answer queries about the unconfirmed microblock chain +/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast +/// * Registering the VRF public key for the miner +/// * Spawning the block and microblock miner threads, and stopping them if their continued +/// execution would inhibit block or microblock storage or processing. +/// * Submitting the burnchain operation to commit to a freshly-mined block /// -/// * **Miner thread**: This is the thread that actually produces new blocks and microblocks. It -/// is spawned only by the Relayer thread to carry out mining activity when the underlying -/// chainstate is not needed by either the Relayer or ChainsCoordinator threeads. -/// This thread does the following: -/// * Walk the mempool DB to build a new block or microblock -/// * Return the block or microblock to the Relayer thread +/// * **Miner Thread**: +/// This is the thread that actually produces new blocks and microblocks. It +/// is spawned only by the Relayer thread to carry out mining activity when the underlying +/// chainstate is not needed by either the Relayer or ChainsCoordinator threads. +/// This thread does the following: +/// * Walk the mempool DB to build a new block or microblock +/// * Return the block or microblock to the Relayer thread /// -/// * **P2P Thread**: This is the thread that communicates with the rest of the p2p network, and -/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock -/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards -/// data it receives from the p2p thread to the Relayer thread for I/O-bound processing. At the -/// time of this writing, it still requires holding a write-lock to handle some RPC request, but -/// future work will remove this so that this thread's execution will not interfere with the -/// others. This is the only thread that does socket I/O. -/// This thread runs the PeerNetwork state machines, which include the following: -/// * Learning the node's public IP address -/// * Discovering neighbor nodes -/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread to -/// other neighbors -/// * Synchronizing block and microblock inventory state with other neighbors -/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and processing -/// * Downloading transaction attachments as their hashes are discovered during block processing -/// * Synchronizing the local mempool database with other neighbors -/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) -/// * Handling HTTP requests +/// * **P2P Thread**: +/// This is the thread that communicates with the rest of the P2P network, and +/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock +/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards +/// data it receives from the P2P thread to the Relayer thread for I/O-bound processing. At the +/// time of this writing, it still requires holding a write-lock to handle some RPC requests, but +/// future work will remove this so that this thread's execution will not interfere with the +/// others. This is the only thread that does socket I/O. +/// This thread runs the PeerNetwork state machines, which include the following: +/// * Learning the node's public IP address +/// * Discovering neighbor nodes +/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread +/// to other neighbors +/// * Synchronizing block and microblock inventory state with other neighbors +/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and +/// processing +/// * Downloading transaction attachments as their hashes are discovered during block processing +/// * Synchronizing the local mempool database with other neighbors +/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) +/// * Handling HTTP requests /// -/// * **ChainsCoordinator Thread**: This thread process sortitions and Stacks blocks and -/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, -/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the -/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in -/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former -/// drives Stacks blocks processing, the latter sortitions). -/// This thread is responsible for: -/// * Responding to requests from other threads to process sortitions -/// * Responding to requests from other threads to process Stacks blocks and microblocks -/// * Processing PoX chain reorgs, should they ever happen -/// * Detecting attachment creation events, and informing the P2P thread of them so it can go -/// and download them +/// * **ChainsCoordinator Thread**: +/// This thread processes sortitions and Stacks blocks and +/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, +/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the +/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in +/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former +/// drives Stacks blocks processing, the latter sortitions). +/// This thread is responsible for: +/// * Responding to requests from other threads to process sortitions +/// * Responding to requests from other threads to process Stacks blocks and microblocks +/// * Processing PoX chain reorgs, should they ever happen +/// * Detecting attachment creation events, and informing the P2P thread of them so it can go +/// and download them /// /// In addition to the mempool and chainstate databases, these threads share access to a Globals -/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant -/// to store inter-thread shared singleton communication media all in one convenient struct. Each -/// thread has a handle to the struct's shared state handles. Global state includes: -/// * The global flag as to whether or not the miner thread can be running -/// * The global shutdown flag that, when set, causes all threads to terminate -/// * Sender channel endpoints that can be shared between threads -/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) +/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant +/// to store inter-thread shared singleton communication media all in one convenient struct. Each +/// thread has a handle to the struct's shared state handles. Global state includes: +/// * The global flag as to whether or not the miner thread can be running +/// * The global shutdown flag that, when set, causes all threads to terminate +/// * Sender channel endpoints that can be shared between threads +/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) /// /// This file may be refactored in the future into a full-fledged module. use std::cmp; @@ -230,6 +236,7 @@ pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB type MinedBlocks = HashMap; /// Result of running the miner thread. It could produce a Stacks block or a microblock. +#[allow(clippy::large_enum_variant)] pub(crate) enum MinerThreadResult { Block( AssembledAnchorBlock, @@ -646,8 +653,8 @@ impl MicroblockMinerThread { sortdb: Some(sortdb), mempool: Some(mempool), event_dispatcher: relayer_thread.event_dispatcher.clone(), - parent_consensus_hash: ch.clone(), - parent_block_hash: bhh.clone(), + parent_consensus_hash: ch, + parent_block_hash: bhh, miner_key, frequency, last_mined: 0, @@ -743,7 +750,7 @@ impl MicroblockMinerThread { let mint_result = { let ic = sortdb.index_handle_at_block( - &chainstate, + chainstate, &block_snapshot.get_canonical_stacks_block_id(), )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( @@ -810,7 +817,7 @@ impl MicroblockMinerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this microblock somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); } @@ -827,7 +834,7 @@ impl MicroblockMinerThread { r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash ); - file.write_all(&mblock_json.as_bytes()).unwrap_or_else(|_| { + file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { panic!("FATAL: failed to write microblock bits to '{:?}'", &path) }); info!( @@ -873,7 +880,7 @@ impl MicroblockMinerThread { // update unconfirmed state cost self.cost_so_far = new_cost; self.quantity += 1; - return Ok(mined_microblock); + Ok(mined_microblock) } /// Can this microblock miner mine off of this given tip? @@ -1086,6 +1093,7 @@ impl BlockMinerThread { } /// Constructs and returns a LeaderBlockCommitOp out of the provided params. + #[allow(clippy::too_many_arguments)] fn inner_generate_block_commit_op( &self, block_header_hash: BlockHeaderHash, @@ -1202,7 +1210,7 @@ impl BlockMinerThread { .expect("FATAL: could not query chain tips") }; - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1213,7 +1221,7 @@ impl BlockMinerThread { .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1269,7 +1277,7 @@ impl BlockMinerThread { pub(crate) fn sort_and_populate_candidates( mut candidates: Vec, ) -> Vec { - if candidates.len() == 0 { + if candidates.is_empty() { return candidates; } candidates.sort_by(|tip1, tip2| { @@ -1373,7 +1381,7 @@ impl BlockMinerThread { // identify leaf tips -- i.e. blocks with no children let parent_consensus_hashes: HashSet<_> = stacks_tips .iter() - .map(|x| x.parent_consensus_hash.clone()) + .map(|x| x.parent_consensus_hash) .collect(); let mut leaf_tips: Vec<_> = stacks_tips @@ -1381,7 +1389,7 @@ impl BlockMinerThread { .filter(|x| !parent_consensus_hashes.contains(&x.consensus_hash)) .collect(); - if leaf_tips.len() == 0 { + if leaf_tips.is_empty() { return None; } @@ -1502,7 +1510,7 @@ impl BlockMinerThread { } } - if scores.len() == 0 { + if scores.is_empty() { // revert to prior tie-breaking scheme return None; } @@ -1576,14 +1584,14 @@ impl BlockMinerThread { let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), + burnchain_params.first_block_height, burnchain_params.first_block_timestamp.into(), ); ( Some(ParentStacksBlockInfo { stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, parent_block_burn_height: 0, parent_block_total_burn: 0, parent_winning_vtxindex: 0, @@ -1671,7 +1679,7 @@ impl BlockMinerThread { { if (prev_block.anchored_block.header.parent_microblock == BlockHeaderHash([0u8; 32]) - && stream.len() == 0) + && stream.is_empty()) || (prev_block.anchored_block.header.parent_microblock != BlockHeaderHash([0u8; 32]) && stream.len() @@ -1699,30 +1707,26 @@ impl BlockMinerThread { best_attempt = cmp::max(best_attempt, prev_block.attempt); } - } else { - if !force { - // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + } else if !force { + // no microblock stream to confirm, and the stacks tip hasn't changed + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); - return None; - } + return None; } - } else { - if self.burn_block.burn_header_hash == prev_block.burn_hash { - // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", + } else if self.burn_block.burn_header_hash == prev_block.burn_hash { + // only try and re-mine if there was no sortition since the last chain tip + info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - // Since the chain tip has changed, we should try to mine a new block, even - // if it has less transactions than the previous block we mined, since that - // previous block would now be a reorg. - max_txs = 0; - } else { - info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); - } + best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; + } else { + info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1838,7 +1842,7 @@ impl BlockMinerThread { }; if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { - if let Some(ref tail) = microblocks.last() { + if let Some(tail) = microblocks.last() { debug!( "Confirm microblock stream tailed at {} (seq {})", &tail.block_hash(), @@ -1848,8 +1852,7 @@ impl BlockMinerThread { // try and confirm as many microblocks as we can (but note that the stream itself may // be too long; we'll try again if that happens). - stacks_parent_header.microblock_tail = - microblocks.last().clone().map(|blk| blk.header.clone()); + stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { debug!("Detected poisoned microblock fork: {:?}", &poison_payload); @@ -1868,7 +1871,7 @@ impl BlockMinerThread { if let Err(e) = mem_pool.miner_submit( chain_state, sortdb, - &parent_consensus_hash, + parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, Some(&self.event_dispatcher), @@ -1920,6 +1923,7 @@ impl BlockMinerThread { } /// Obtain the target burn fee cap, when considering how well this miner is performing. + #[allow(clippy::too_many_arguments)] pub fn get_mining_spend_amount( config: &Config, keychain: &Keychain, @@ -1974,7 +1978,7 @@ impl BlockMinerThread { else { return config_file_burn_fee_cap; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return config_file_burn_fee_cap; } @@ -2009,16 +2013,15 @@ impl BlockMinerThread { let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, + recipients, ); let win_probs = if miner_config.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, - ); - win_probs + recipients, + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -2038,8 +2041,7 @@ impl BlockMinerThread { return config_file_burn_fee_cap; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; info!("Unconfirmed spend distribution: {:?}", &spend_dist); @@ -2102,6 +2104,7 @@ impl BlockMinerThread { /// Produce the block-commit for this anchored block, if we can. /// Returns the op on success /// Returns None if we fail somehow. + #[allow(clippy::too_many_arguments)] pub fn make_block_commit( &self, burn_db: &mut SortitionDB, @@ -2227,12 +2230,10 @@ impl BlockMinerThread { if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { if stacks_tip.anchored_header.height() + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= highest_unprocessed.height + > highest_unprocessed.height && highest_unprocessed_block_sn.block_height + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= sort_tip.block_height + > sort_tip.block_height { // we're close enough to the chain tip that it's a bad idea for us to mine // -- we'll likely create an orphan @@ -2243,7 +2244,7 @@ impl BlockMinerThread { } } // we can mine - return false; + false } /// Only used in mock signing to generate a peer info view @@ -2301,16 +2302,14 @@ impl BlockMinerThread { // Just wait a min amount of time for the mock signatures to come in while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + for chunk in chunks.into_iter().flatten() { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) { - if mock_signature.mock_proposal == *mock_proposal - && !mock_signatures.contains(&mock_signature) - { - mock_signatures.push(mock_signature); - } + mock_signatures.push(mock_signature); } } } @@ -2325,19 +2324,17 @@ impl BlockMinerThread { StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { - for message in messages { - if let Some(message) = message { - if message.is_empty() { - continue; - } - let Ok(SignerMessage::MockBlock(mock_block)) = - SignerMessage::consensus_deserialize(&mut message.as_slice()) - else { - continue; - }; - if mock_block.mock_proposal.peer_info == *peer_info { - return true; - } + for message in messages.into_iter().flatten() { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockBlock(mock_block)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } @@ -2939,6 +2936,8 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? + #[allow(clippy::nonminimal_bool)] + #[allow(clippy::eq_op)] pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes @@ -3022,7 +3021,7 @@ impl RelayerThread { net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + let canonical_tip = unconfirmed_state.confirmed_chain_tip; self.event_dispatcher.process_new_microblocks( canonical_tip, net_receipts.processed_unconfirmed_state, @@ -3094,7 +3093,7 @@ impl RelayerThread { if !Relayer::static_check_problematic_relayed_block( self.chainstate_ref().mainnet, epoch_id, - &anchored_block, + anchored_block, ASTRules::PrecheckSize, ) { // nope! @@ -3107,7 +3106,7 @@ impl RelayerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this block somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); } @@ -3123,7 +3122,7 @@ impl RelayerThread { r#"{{"block":"{}","consensus":"{}"}}"#, &block_bits_hex, &consensus_hash ); - file.write_all(&block_json.as_bytes()).unwrap_or_else(|_| { + file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { panic!("FATAL: failed to write block bits to '{:?}'", &path) }); info!( @@ -3154,8 +3153,8 @@ impl RelayerThread { chainstate.preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, - &parent_consensus_hash, + anchored_block, + parent_consensus_hash, 0, ) })?; @@ -3283,15 +3282,13 @@ impl RelayerThread { }; // advertize _and_ push blocks for now - let blocks_available = Relayer::load_blocks_available_data( - self.sortdb_ref(), - vec![consensus_hash.clone()], - ) - .expect("Failed to obtain block information for a block we mined."); + let blocks_available = + Relayer::load_blocks_available_data(self.sortdb_ref(), vec![consensus_hash]) + .expect("Failed to obtain block information for a block we mined."); let block_data = { let mut bd = HashMap::new(); - bd.insert(consensus_hash.clone(), mined_block.clone()); + bd.insert(consensus_hash, mined_block.clone()); bd }; @@ -3314,7 +3311,7 @@ impl RelayerThread { ); miner_tip = Self::pick_higher_tip(miner_tip, None); } else { - let ch = snapshot.consensus_hash.clone(); + let ch = snapshot.consensus_hash; let bh = mined_block.block_hash(); let height = mined_block.header.total_work.work; @@ -3391,7 +3388,7 @@ impl RelayerThread { let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { let mut tenures = vec![]; let last_sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &last_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), last_ch) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown prior consensus hash"); @@ -3470,8 +3467,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB") .expect("FATAL: no snapshot for consensus hash"); - let old_last_mined_blocks = - mem::replace(&mut self.last_mined_blocks, MinedBlocks::new()); + let old_last_mined_blocks = mem::take(&mut self.last_mined_blocks); self.last_mined_blocks = Self::clear_stale_mined_blocks(this_burn_tip.block_height, old_last_mined_blocks); @@ -3553,7 +3549,7 @@ impl RelayerThread { /// cost since we won't be mining it anymore. fn setup_microblock_mining_state(&mut self, new_miner_tip: Option) { // update state - let my_miner_tip = std::mem::replace(&mut self.miner_tip, None); + let my_miner_tip = std::mem::take(&mut self.miner_tip); let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed @@ -3597,7 +3593,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo, - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -3676,14 +3672,14 @@ impl RelayerThread { /// Create the block miner thread state. /// Only proceeds if all of the following are true: - /// * the miner is not blocked - /// * last_burn_block corresponds to the canonical sortition DB's chain tip - /// * the time of issuance is sufficiently recent - /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) - /// * a miner thread is not running already + /// * The miner is not blocked + /// * `last_burn_block` corresponds to the canonical sortition DB's chain tip + /// * The time of issuance is sufficiently recent + /// * There are no unprocessed stacks blocks in the staging DB + /// * The relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * A miner thread is not running already fn create_block_miner( &mut self, registered_key: RegisteredKey, @@ -3724,11 +3720,11 @@ impl RelayerThread { } } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = last_burn_block.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( @@ -3797,6 +3793,7 @@ impl RelayerThread { /// Try to start up a block miner thread with this given VRF key and current burnchain tip. /// Returns true if the thread was started; false if it was not (for any reason) + #[allow(clippy::incompatible_msrv)] pub fn block_miner_thread_try_start( &mut self, registered_key: RegisteredKey, @@ -3898,11 +3895,13 @@ impl RelayerThread { true } - /// Start up a microblock miner thread if we can: - /// * no miner thread must be running already - /// * the miner must not be blocked - /// * we must have won the sortition on the stacks chain tip - /// Returns true if the thread was started; false if not. + /// Start up a microblock miner thread if possible: + /// * No miner thread must be running already + /// * The miner must not be blocked + /// * We must have won the sortition on the Stacks chain tip + /// + /// Returns `true` if the thread was started; `false` if not. + #[allow(clippy::incompatible_msrv)] pub fn microblock_miner_thread_try_start(&mut self) -> bool { let miner_tip = match self.miner_tip.as_ref() { Some(tip) => tip.clone(), @@ -4003,8 +4002,7 @@ impl RelayerThread { last_mined_block.burn_block_height, &self.last_mined_blocks, ) - .len() - == 0 + .is_empty() { // first time we've mined a block in this burnchain block debug!( @@ -4019,8 +4017,8 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.burn_hash.clone(); - let orig_bhh = last_mined_block.orig_burn_hash.clone(); + let bhh = last_mined_block.burn_hash; + let orig_bhh = last_mined_block.orig_burn_hash; let tenure_begin = last_mined_block.tenure_begin; self.last_mined_blocks.insert( @@ -4058,7 +4056,7 @@ impl RelayerThread { let num_mblocks = chainstate .unconfirmed_state .as_ref() - .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .map(|unconfirmed| unconfirmed.num_microblocks()) .unwrap_or(0); (processed_unconfirmed_state, num_mblocks) @@ -4134,14 +4132,16 @@ impl RelayerThread { None } - /// Try to join with the miner thread. If we succeed, join the thread and return true. - /// Otherwise, if the thread is still running, return false; + /// Try to join with the miner thread. If successful, join the thread and return `true`. + /// Otherwise, if the thread is still running, return `false`. + /// /// Updates internal state gleaned from the miner, such as: - /// * new stacks block data - /// * new keychain state - /// * new metrics - /// * new unconfirmed state - /// Returns true if joined; false if not. + /// * New Stacks block data + /// * New keychain state + /// * New metrics + /// * New unconfirmed state + /// + /// Returns `true` if joined; `false` if not. pub fn miner_thread_try_join(&mut self) -> bool { if let Some(thread_handle) = self.miner_thread.take() { let new_thread_handle = self.inner_miner_thread_try_join(thread_handle); @@ -4193,7 +4193,7 @@ impl RelayerThread { RelayerDirective::RegisterKey(last_burn_block) => { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { - saved_key_opt = Self::load_saved_vrf_key(&path); + saved_key_opt = Self::load_saved_vrf_key(path); } if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); @@ -4266,8 +4266,8 @@ impl ParentStacksBlockInfo { ) -> Result { let stacks_tip_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &mine_tip_ch, - &mine_tip_bh, + mine_tip_ch, + mine_tip_bh, ) .unwrap() .ok_or_else(|| { @@ -4358,9 +4358,9 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, - parent_consensus_hash: mine_tip_ch.clone(), + parent_consensus_hash: *mine_tip_ch, parent_block_burn_height: parent_block_height, - parent_block_total_burn: parent_block_total_burn, + parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) @@ -4412,16 +4412,14 @@ impl PeerThread { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("Database failure opening mempool"); - - mempool + .expect("Database failure opening mempool") } /// Instantiate the p2p thread. @@ -4531,6 +4529,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub fn run_one_pass( &mut self, indexer: &B, @@ -4542,7 +4541,7 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( @@ -4566,11 +4565,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: p2p_thread.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -4726,32 +4721,32 @@ impl StacksNode { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("BUG: failed to instantiate mempool"); - - mempool + .expect("BUG: failed to instantiate mempool") } - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * Blacklisted/whitelisted nodes + /// * Node keys + /// * Bootstrap nodes + /// + /// Returns the instantiated `PeerDB`. + /// /// Panics on failure. fn setup_peer_db( config: &Config, burnchain: &Burnchain, stackerdb_contract_ids: &[QualifiedContractIdentifier], ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(config.node.data_url.to_string()).unwrap(); let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { + if !initial_neighbors.is_empty() { info!( "Will bootstrap from peers {}", VecDisplay(&initial_neighbors) @@ -4778,7 +4773,7 @@ impl StacksNode { config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), + config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -4798,12 +4793,12 @@ impl StacksNode { // allow all bootstrap nodes { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for initial_neighbor in initial_neighbors.iter() { // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::update_peer(&tx, initial_neighbor).unwrap(); PeerDB::set_allow_peer( - &mut tx, + &tx, initial_neighbor.addr.network_id, &initial_neighbor.addr.addrbytes, initial_neighbor.addr.port, @@ -4820,10 +4815,10 @@ impl StacksNode { // deny all config-denied peers { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -4836,9 +4831,9 @@ impl StacksNode { // update services to indicate we can support mempool sync and stackerdb { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); PeerDB::set_local_services( - &mut tx, + &tx, (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16) | (ServiceFlags::STACKERDB as u16), @@ -4867,7 +4862,7 @@ impl StacksNode { .expect("Error while loading stacks epochs"); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() @@ -4914,7 +4909,7 @@ impl StacksNode { _ => panic!("Unable to retrieve local peer"), }; - let p2p_net = PeerNetwork::new( + PeerNetwork::new( peerdb, atlasdb, stackerdbs, @@ -4925,9 +4920,7 @@ impl StacksNode { config.connection_options.clone(), stackerdb_machines, epochs, - ); - - p2p_net + ) } /// Main loop of the relayer. @@ -5223,9 +5216,9 @@ impl StacksNode { .globals .relay_send .send(RelayerDirective::ProcessTenure( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) .is_ok(); } @@ -5270,13 +5263,11 @@ impl StacksNode { block_height, op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); } } @@ -5313,7 +5304,7 @@ impl StacksNode { return ret; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { warn!("Failed to create {}: {:?}", &path, &e); @@ -5321,13 +5312,13 @@ impl StacksNode { } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + if let Err(e) = f.write_all(key_json.as_bytes()) { warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); return ret; } info!("Saved activated VRF key to {}", &path); - return ret; + ret } /// Join all inner threads diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 1895912ba52..8aebd4814a9 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -151,6 +151,7 @@ pub fn get_names(use_test_chainstate_data: bool) -> Box bool { impl Node { /// Instantiate and initialize a new node, given a config - pub fn new(config: Config, boot_block_exec: Box ()>) -> Self { + pub fn new(config: Config, boot_block_exec: Box) -> Self { let use_test_genesis_data = if config.burnchain.mode == "mocknet" { use_test_genesis_chainstate(&config) } else { @@ -407,14 +408,14 @@ impl Node { Config::assert_valid_epoch_settings(&burnchain, &epochs); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() }; // create a new peerdb - let data_url = UrlString::try_from(format!("{}", self.config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(self.config.node.data_url.to_string()).unwrap(); let initial_neighbors = self.config.node.bootstrap_node.clone(); @@ -452,7 +453,7 @@ impl Node { self.config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - self.config.connection_options.private_key_lifetime.clone(), + self.config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -464,10 +465,10 @@ impl Node { println!("DENY NEIGHBORS {:?}", &self.config.node.deny_nodes); { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in self.config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -488,7 +489,7 @@ impl Node { }; let event_dispatcher = self.event_dispatcher.clone(); - let exit_at_block_height = self.config.burnchain.process_exit_at_block_height.clone(); + let exit_at_block_height = self.config.burnchain.process_exit_at_block_height; let p2p_net = PeerNetwork::new( peerdb, @@ -577,9 +578,9 @@ impl Node { // Registered key has been mined new_key = Some(RegisteredKey { vrf_public_key: op.public_key.clone(), - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, - target_block_height: (op.block_height as u64) - 1, + block_height: op.block_height, + op_vtxindex: op.vtxindex, + target_block_height: op.block_height - 1, memo: op.memo.clone(), }); } @@ -649,7 +650,7 @@ impl Node { burnchain.pox_constants, ) .expect("Error while opening sortition db"); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. @@ -734,7 +735,7 @@ impl Node { anchored_block_from_ongoing_tenure.header.block_hash(), burn_fee, ®istered_key, - &burnchain_tip, + burnchain_tip, VRFSeed::from_proof(&vrf_proof), ); @@ -802,7 +803,7 @@ impl Node { .preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, + anchored_block, &parent_consensus_hash, 0, ) @@ -813,7 +814,7 @@ impl Node { let res = self .chain_state .preprocess_streamed_microblock( - &consensus_hash, + consensus_hash, &anchored_block.block_hash(), microblock, ) @@ -849,31 +850,28 @@ impl Node { match process_blocks_at_tip { Err(e) => panic!("Error while processing block - {:?}", e), Ok(ref mut blocks) => { - if blocks.len() == 0 { + if blocks.is_empty() { break; } else { for block in blocks.iter() { - match block { - (Some(epoch_receipt), _) => { - let attachments_instances = - self.get_attachment_instances(epoch_receipt, &atlas_config); - if !attachments_instances.is_empty() { - for new_attachment in attachments_instances.into_iter() { - if let Err(e) = - atlas_db.queue_attachment_instance(&new_attachment) - { - warn!( - "Atlas: Error writing attachment instance to DB"; - "err" => ?e, - "index_block_hash" => %new_attachment.index_block_hash, - "contract_id" => %new_attachment.contract_id, - "attachment_index" => %new_attachment.attachment_index, - ); - } + if let (Some(epoch_receipt), _) = block { + let attachments_instances = + self.get_attachment_instances(epoch_receipt, &atlas_config); + if !attachments_instances.is_empty() { + for new_attachment in attachments_instances.into_iter() { + if let Err(e) = + atlas_db.queue_attachment_instance(&new_attachment) + { + warn!( + "Atlas: Error writing attachment instance to DB"; + "err" => ?e, + "index_block_hash" => %new_attachment.index_block_hash, + "contract_id" => %new_attachment.contract_id, + "attachment_index" => %new_attachment.attachment_index, + ); } } } - _ => {} } } @@ -990,7 +988,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: vec![], - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 1, txid, block_height: 0, diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 4680098d2b4..0109077a5fd 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -31,8 +31,7 @@ impl BurnchainOpSigner { } pub fn get_public_key(&mut self) -> Secp256k1PublicKey { - let public_key = Secp256k1PublicKey::from_private(&self.secret_key); - public_key + Secp256k1PublicKey::from_private(&self.secret_key) } pub fn sign_message(&mut self, hash: &[u8]) -> Option { diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 85ace37fa43..23331673340 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -137,8 +137,8 @@ impl BootRunLoop { /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { match self.active_loop { - InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), - InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + InnerLoops::Epoch2(_) => self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => self.start_from_naka(burnchain_opt, mine_start), } } @@ -227,7 +227,7 @@ impl BootRunLoop { // if loop exited, do the transition info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); neon_term_switch.store(false, Ordering::SeqCst); - return true + true }) } diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2922ce584ac..c61581553cc 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -21,10 +21,7 @@ impl RunLoop { } /// Sets up a runloop and node, given a config. - pub fn new_with_boot_exec( - config: Config, - boot_exec: Box ()>, - ) -> Self { + pub fn new_with_boot_exec(config: Config, boot_exec: Box) -> Self { // Build node based on config let node = Node::new(config.clone(), boot_exec); @@ -174,17 +171,14 @@ impl RunLoop { None => None, }; - match artifacts_from_tenure { - Some(ref artifacts) => { - // Have each node receive artifacts from the current tenure - self.node.commit_artifacts( - &artifacts.anchored_block, - &artifacts.parent_block, - &mut burnchain, - artifacts.burn_fee, - ); - } - None => {} + if let Some(artifacts) = &artifacts_from_tenure { + // Have each node receive artifacts from the current tenure + self.node.commit_artifacts( + &artifacts.anchored_block, + &artifacts.parent_block, + &mut burnchain, + artifacts.burn_fee, + ); } let (new_burnchain_tip, _) = burnchain.sync(None)?; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b2b9aa3f752..ce4c06a16c3 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -37,6 +37,7 @@ macro_rules! info_green { }) } +#[allow(clippy::type_complexity)] pub struct RunLoopCallbacks { on_burn_chain_initialized: Option)>, on_new_burn_chain_state: Option, @@ -45,6 +46,12 @@ pub struct RunLoopCallbacks { on_new_tenure: Option, } +impl Default for RunLoopCallbacks { + fn default() -> Self { + Self::new() + } +} + impl RunLoopCallbacks { pub fn new() -> RunLoopCallbacks { RunLoopCallbacks { @@ -167,7 +174,7 @@ pub fn announce_boot_receipts( event_dispatcher: &mut EventDispatcher, chainstate: &StacksChainState, pox_constants: &PoxConstants, - boot_receipts: &Vec, + boot_receipts: &[StacksTransactionReceipt], ) { let block_header_0 = StacksChainState::get_genesis_header_info(chainstate.db()) .expect("FATAL: genesis block header not stored"); @@ -189,7 +196,7 @@ pub fn announce_boot_receipts( Txid([0x00; 32]), &[], None, - block_header_0.burn_header_hash.clone(), + block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, &ExecutionCost::zero(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 3d94b1c3515..de836568d27 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -100,7 +100,7 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - counters: counters.unwrap_or_else(|| Counters::new()), + counters: counters.unwrap_or_default(), should_keep_running, event_dispatcher, pox_watchdog: None, @@ -167,9 +167,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {:?}", e); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -285,7 +284,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -296,7 +294,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -325,13 +323,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -382,7 +379,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -477,7 +474,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a18a61988ba..7be8939d9eb 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -369,9 +369,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {:?}", e); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -490,14 +489,11 @@ impl RunLoop { burnchain_controller .start(Some(target_burnchain_block_height)) .map_err(|e| { - match e { - Error::CoordinatorClosed => { - if !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); - return burnchain_error::ShutdownInitiated; - } - } - _ => {} + if matches!(e, Error::CoordinatorClosed) + && !should_keep_running.load(Ordering::SeqCst) + { + info!("Shutdown initiated during burnchain initialization: {}", e); + return burnchain_error::ShutdownInitiated; } error!("Burnchain controller stopped: {}", e); panic!(); @@ -581,7 +577,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -592,7 +587,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -621,13 +616,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -685,7 +679,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -737,7 +731,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -885,7 +879,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -899,11 +893,11 @@ impl RunLoop { }; let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, - &chain_state_db, + chain_state_db, &sn.sortition_id, ) { Ok(am) => am, @@ -1018,15 +1012,13 @@ impl RunLoop { ) .unwrap(); - let liveness_thread_handle = thread::Builder::new() + thread::Builder::new() .name(format!("chain-liveness-{}", config.node.rpc_bind)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) }) - .expect("FATAL: failed to spawn chain liveness thread"); - - liveness_thread_handle + .expect("FATAL: failed to spawn chain liveness thread") } /// Starts the node runloop. @@ -1109,7 +1101,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -1137,7 +1129,7 @@ impl RunLoop { .tx_begin() .expect("FATAL: failed to begin burnchain DB tx"); for (reward_cycle, affirmation) in self.config.burnchain.affirmation_overrides.iter() { - tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).expect(&format!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); + tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).unwrap_or_else(|_| panic!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); } tx.commit() .expect("FATAL: failed to commit burnchain DB tx"); diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index f63b17a6abb..2f96bbfe660 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -92,7 +92,7 @@ fn handle_connection(mut stream: TcpStream) { contents ); - stream.write(response.as_bytes()).unwrap(); + let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index ff68126a831..d4c05ec7fe3 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -69,7 +69,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { @@ -95,7 +95,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } pub fn should_keep_running(&self) -> bool { @@ -192,7 +192,7 @@ impl PoxSyncWatchdog { new_processed_blocks: VecDeque::new(), last_attachable_query: 0, last_processed_query: 0, - max_samples: max_samples, + max_samples, max_staging: 10, watch_start_ts: 0, last_block_processed_ts: 0, @@ -200,7 +200,7 @@ impl PoxSyncWatchdog { estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, steady_state_resync_ts: 0, - chainstate: chainstate, + chainstate, relayer_comms: watchdog_comms, }) } @@ -213,7 +213,7 @@ impl PoxSyncWatchdog { fn count_attachable_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_attachable_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_attachable_query, ) @@ -229,7 +229,7 @@ impl PoxSyncWatchdog { fn count_processed_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_processed_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_processed_query, ) @@ -281,7 +281,7 @@ impl PoxSyncWatchdog { /// Is a derivative approximately flat, with a maximum absolute deviation from 0? /// Return whether or not the sample is mostly flat, and how many points were over the given /// error bar in either direction. - fn is_mostly_flat(deriv: &Vec, error: i64) -> (bool, usize) { + fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { let mut total_deviates = 0; let mut ret = true; for d in deriv.iter() { @@ -294,7 +294,7 @@ impl PoxSyncWatchdog { } /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &Vec) -> f64 { + fn hilo_filter_avg(samples: &[i64]) -> f64 { // take average with low and high pass let mut min = i64::MAX; let mut max = i64::MIN; @@ -358,7 +358,7 @@ impl PoxSyncWatchdog { } let block_wait_times = - StacksChainState::measure_block_wait_time(&chainstate.db(), start_height, end_height) + StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) .expect("BUG: failed to query chainstate block-processing times"); PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) @@ -386,7 +386,7 @@ impl PoxSyncWatchdog { } let block_download_times = StacksChainState::measure_block_download_time( - &chainstate.db(), + chainstate.db(), start_height, end_height, ) diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 5dd67cddabb..73221338897 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -41,7 +41,8 @@ pub struct Tenure { parent_block_total_burn: u64, } -impl<'a> Tenure { +impl Tenure { + #[allow(clippy::too_many_arguments)] pub fn new( parent_block: ChainTip, coinbase_tx: StacksTransaction, @@ -82,7 +83,7 @@ impl<'a> Tenure { elapsed = Instant::now().duration_since(self.burnchain_tip.received_at); } - let (mut chain_state, _) = StacksChainState::open( + let (chain_state, _) = StacksChainState::open( self.config.is_mainnet(), self.config.burnchain.chain_id, &self.config.get_chainstate_path_str(), @@ -91,13 +92,13 @@ impl<'a> Tenure { .unwrap(); let (anchored_block, _, _) = StacksBlockBuilder::build_anchored_block( - &mut chain_state, + &chain_state, burn_dbconn, &mut self.mem_pool, &self.parent_block.metadata, self.parent_block_total_burn, self.vrf_proof.clone(), - self.microblock_pubkeyhash.clone(), + self.microblock_pubkeyhash, &self.coinbase_tx, BlockBuilderSettings::limited(), None, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 90b13101832..702f6d59535 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -43,21 +43,18 @@ impl BitcoinCoreController { fn add_rpc_cli_args(&self, command: &mut Command) { command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - match ( + if let (Some(username), Some(password)) = ( &self.config.burnchain.username, &self.config.burnchain.password, ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } } pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { - std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); + std::fs::create_dir_all(self.config.get_burnchain_path_str()).unwrap(); let mut command = Command::new("bitcoind"); command @@ -104,7 +101,7 @@ impl BitcoinCoreController { } pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { - if let Some(_) = self.bitcoind_process.take() { + if self.bitcoind_process.take().is_some() { let payload = BitcoinRPCRequest { method: "stop".to_string(), params: vec![], @@ -217,11 +214,11 @@ fn bitcoind_integration(segwit_flag: bool) { .callbacks .on_new_burn_chain_state(|round, burnchain_tip, chain_tip| { let block = &burnchain_tip.block_snapshot; - let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round as u64 + 1); + let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round + 1); assert_eq!(block.total_burn, expected_total_burn); - assert_eq!(block.sortition, true); - assert_eq!(block.num_sortitions, round as u64 + 1); - assert_eq!(block.block_height, round as u64 + 2003); + assert!(block.sortition); + assert_eq!(block.num_sortitions, round + 1); + assert_eq!(block.block_height, round + 2003); let leader_key = "f888e0cab5c16de8edf72b544a189ece5c0b95cd9178606c970789ac71d17bb4"; match round { @@ -246,7 +243,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert!(op.parent_vtxindex == 0); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } } @@ -270,7 +267,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2003); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -299,7 +296,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2004); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -328,7 +325,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2005); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -357,7 +354,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2006); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -386,7 +383,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2007); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -464,7 +461,6 @@ fn bitcoind_integration(segwit_flag: bool) { }, _ => {} }; - return }); // Use block's hook for asserting expectations diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 076a5f61f36..6fe0018ced0 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -19,9 +19,7 @@ use stacks::core::{ PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, -}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; @@ -50,7 +48,7 @@ fn test_exact_block_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let epoch_205_transition_height = 210; let transactions_to_broadcast = 25; @@ -256,10 +254,8 @@ fn test_exact_block_costs() { if dbget_txs.len() >= 2 { processed_txs_before_205 = true; } - } else { - if dbget_txs.len() >= 2 { - processed_txs_after_205 = true; - } + } else if dbget_txs.len() >= 2 { + processed_txs_after_205 = true; } assert_eq!(mined_anchor_cost, anchor_cost as u64); @@ -287,7 +283,7 @@ fn test_dynamic_db_method_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let contract_name = "test-contract"; let epoch_205_transition_height = 210; @@ -455,8 +451,7 @@ fn test_dynamic_db_method_costs() { .as_i64() .unwrap(); eprintln!( - "Burn height = {}, runtime_cost = {}, function_name = {}", - burn_height, runtime_cost, function_name + "Burn height = {burn_height}, runtime_cost = {runtime_cost}, function_name = {function_name}" ); if function_name == "db-get1" { @@ -569,21 +564,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 { @@ -831,7 +825,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::SmartContract(contract, ..) => { - contract.name == ContractName::try_from("increment-contract").unwrap() + contract.name == ContractName::from("increment-contract") } _ => false, }, @@ -847,7 +841,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -863,7 +857,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -882,7 +876,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -897,7 +891,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -916,10 +910,7 @@ fn bigger_microblock_streams_in_2_05() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -993,7 +984,7 @@ fn bigger_microblock_streams_in_2_05() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -1035,9 +1026,8 @@ fn bigger_microblock_streams_in_2_05() { ) ) (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) + (crash-me \"large-contract-{ix}\")) + " ) ) }) @@ -1176,9 +1166,9 @@ fn bigger_microblock_streams_in_2_05() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("costs-2").is_some() { + if tsc.name.to_string().contains("costs-2") { in_205 = true; - } else if tsc.name.to_string().find("large").is_some() { + } else if tsc.name.to_string().contains("large") { num_big_microblock_txs += 1; if in_205 { total_big_txs_per_microblock_205 += 1; @@ -1209,7 +1199,7 @@ fn bigger_microblock_streams_in_2_05() { max_big_txs_per_microblock_20 = num_big_microblock_txs; } - eprintln!("Epoch size: {:?}", &total_execution_cost); + eprintln!("Epoch size: {total_execution_cost:?}"); if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { epoch_20_stream_cost = total_execution_cost; @@ -1232,21 +1222,13 @@ fn bigger_microblock_streams_in_2_05() { } eprintln!( - "max_big_txs_per_microblock_20: {}, total_big_txs_per_microblock_20: {}", - max_big_txs_per_microblock_20, total_big_txs_per_microblock_20 - ); - eprintln!( - "max_big_txs_per_microblock_205: {}, total_big_txs_per_microblock_205: {}", - max_big_txs_per_microblock_205, total_big_txs_per_microblock_205 - ); - eprintln!( - "confirmed stream execution in 2.0: {:?}", - &epoch_20_stream_cost + "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" ); eprintln!( - "confirmed stream execution in 2.05: {:?}", - &epoch_205_stream_cost + "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" ); + eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); + eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); // stuff happened assert!(epoch_20_stream_cost.runtime > 0); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 8f6c4663187..ebe14bae16f 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1,6 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::{env, thread}; +use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use stacks::burnchains::bitcoin::address::{ @@ -46,7 +47,7 @@ use crate::tests::neon_integrations::*; use crate::tests::*; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; -const MINER_BURN_PUBLIC_KEY: &'static str = +const MINER_BURN_PUBLIC_KEY: &str = "03dc62fe0b8964d01fc9ca9a5eec0e22e557a12cc656919e648f04e0b26fea5faa"; fn advance_to_2_1( @@ -210,7 +211,7 @@ fn advance_to_2_1( ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -264,13 +265,13 @@ fn advance_to_2_1( assert_eq!(account.nonce, 9); eprintln!("Begin Stacks 2.1"); - return ( + ( conf, btcd_controller, btc_regtest_controller, blocks_processed, channel, - ); + ) } #[test] @@ -285,7 +286,7 @@ fn transition_adds_burn_block_height() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1( @@ -411,9 +412,8 @@ fn transition_adds_burn_block_height() { // strip leading `0x` eprintln!("{:#?}", &cev); let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -544,7 +544,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -554,7 +554,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let epoch_2_05 = 210; let epoch_2_1 = 215; @@ -655,7 +655,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op for a transfer-stx op that will get mined before the 2.1 epoch let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -687,8 +687,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -698,7 +698,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -728,7 +728,7 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -812,7 +812,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -840,8 +840,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -851,7 +851,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -885,7 +885,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -914,8 +914,8 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -925,7 +925,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -952,7 +952,7 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off another transfer op that will fall outside the window let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -980,8 +980,8 @@ fn transition_fixes_bitcoin_rigidity() { }; let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 123, memo: vec![], // to be filled in @@ -991,7 +991,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -1070,11 +1070,7 @@ fn transition_adds_get_pox_addr_recipients() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1(initial_balances, None, Some(pox_constants.clone()), false); @@ -1094,7 +1090,7 @@ fn transition_adds_get_pox_addr_recipients() { .iter() .enumerate() { - let spender_sk = spender_sks[i].clone(); + let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( "{{ hashbytes: 0x{}, version: 0x{:02x} }}", @@ -1126,9 +1122,8 @@ fn transition_adds_get_pox_addr_recipients() { } // stack some STX to segwit addressses - for i in 4..7 { - let spender_sk = spender_sks[i].clone(); - let pubk = Secp256k1PublicKey::from_private(&spender_sk); + for (i, spender_sk) in spender_sks.iter().enumerate().take(7).skip(4) { + let pubk = Secp256k1PublicKey::from_private(spender_sk); let version = i as u8; let bytes = match i { 4 => { @@ -1147,7 +1142,7 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap() .unwrap(); let tx = make_contract_call( - &spender_sk, + spender_sk, 0, 300, conf.burnchain.chain_id, @@ -1183,7 +1178,7 @@ fn transition_adds_get_pox_addr_recipients() { ) "; - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sks[0])); + let spender_addr_c32 = to_addr(&spender_sks[0]); let contract_tx = make_contract_publish( &spender_sks[0], 1, @@ -1202,9 +1197,7 @@ fn transition_adds_get_pox_addr_recipients() { // mine through two reward cycles // now let's mine until the next reward cycle starts ... - while sort_height - < (stack_sort_height as u64) + (((2 * pox_constants.reward_cycle_length) + 1) as u64) - { + while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); eprintln!("Sort height: {}", sort_height); @@ -1244,13 +1237,12 @@ fn transition_adds_get_pox_addr_recipients() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if parsed.txid() == cc_txid { // check events for this block - for (_i, event) in events.iter().enumerate() { + for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -1313,9 +1305,10 @@ fn transition_adds_get_pox_addr_recipients() { for pox_addr_value in pox_addr_tuples.into_iter() { let pox_addr = - PoxAddress::try_from_pox_tuple(false, &pox_addr_value).expect( - &format!("FATAL: invalid PoX tuple {:?}", &pox_addr_value), - ); + PoxAddress::try_from_pox_tuple(false, &pox_addr_value) + .unwrap_or_else(|| { + panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") + }); eprintln!("at {}: {:?}", burn_block_height, &pox_addr); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); @@ -1388,7 +1381,7 @@ fn transition_adds_mining_from_segwit() { let utxos = btc_regtest_controller .get_all_utxos(&Secp256k1PublicKey::from_hex(MINER_BURN_PUBLIC_KEY).unwrap()); - assert!(utxos.len() > 0); + assert!(!utxos.is_empty()); // all UTXOs should be segwit for utxo in utxos.iter() { @@ -1428,7 +1421,7 @@ fn transition_adds_mining_from_segwit() { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id).unwrap(); assert_eq!(commits.len(), 1); - let txid = commits[0].txid.clone(); + let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); eprintln!("tx = {:?}", &tx); @@ -1462,11 +1455,7 @@ fn transition_removes_pox_sunset() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -1518,8 +1507,8 @@ fn transition_removes_pox_sunset() { 4 * prepare_phase_len / 5, 5, 15, - (sunset_start_rc * reward_cycle_len - 1).into(), - (sunset_end_rc * reward_cycle_len).into(), + sunset_start_rc * reward_cycle_len - 1, + sunset_end_rc * reward_cycle_len, (epoch_21 as u32) + 1, u32::MAX, u32::MAX, @@ -1573,11 +1562,8 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); let tx = make_contract_call( @@ -1617,11 +1603,8 @@ fn transition_removes_pox_sunset() { // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!("pox_info in pox-1 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); // advance to 2.1 while sort_height <= epoch_21 + 1 { @@ -1636,11 +1619,8 @@ fn transition_removes_pox_sunset() { // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle eprintln!("pox_info in pox-2 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); // re-stack let tx = make_contract_call( @@ -1677,7 +1657,7 @@ fn transition_removes_pox_sunset() { ); let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // get pox back online while sort_height <= epoch_21 + reward_cycle_len { @@ -1688,13 +1668,10 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!("pox_info = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); let burn_blocks = test_observer::get_burn_blocks(); let mut pox_out_opt = None; @@ -1719,9 +1696,9 @@ fn transition_removes_pox_sunset() { if (i as u64) < (sunset_start_rc * reward_cycle_len) { // before sunset - if recipients.len() >= 1 { + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { - pox_out_opt = if let Some(pox_out) = pox_out_opt.clone() { + pox_out_opt = if let Some(pox_out) = pox_out_opt { Some(std::cmp::max(amt, pox_out)) } else { Some(amt) @@ -1730,16 +1707,16 @@ fn transition_removes_pox_sunset() { } } else if (i as u64) >= (sunset_start_rc * reward_cycle_len) && (i as u64) + 1 < epoch_21 { // some sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { assert!(amt < pox_out); } } } else if (i as u64) + 1 >= epoch_21 { // no sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { // NOTE: odd number of reward cycles if !burnchain_config.is_in_prepare_phase((i + 2) as u64) { @@ -1875,7 +1852,7 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -1987,7 +1964,7 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: let mut stacks_tip_bhh = None; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.stacks_tip_height < max_stacks_tip { @@ -2057,15 +2034,9 @@ fn test_pox_reorgs_three_flaps() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2112,7 +2083,7 @@ fn test_pox_reorgs_three_flaps() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2134,12 +2105,11 @@ fn test_pox_reorgs_three_flaps() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -2151,8 +2121,8 @@ fn test_pox_reorgs_three_flaps() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2187,10 +2157,10 @@ fn test_pox_reorgs_three_flaps() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2215,8 +2185,8 @@ fn test_pox_reorgs_three_flaps() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2238,10 +2208,10 @@ fn test_pox_reorgs_three_flaps() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -2250,11 +2220,7 @@ fn test_pox_reorgs_three_flaps() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2264,19 +2230,14 @@ fn test_pox_reorgs_three_flaps() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2311,11 +2272,9 @@ fn test_pox_reorgs_three_flaps() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2325,7 +2284,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -2344,7 +2303,7 @@ fn test_pox_reorgs_three_flaps() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -2353,7 +2312,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); @@ -2374,7 +2333,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2386,7 +2345,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2399,7 +2358,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2411,7 +2370,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -2428,7 +2387,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2440,7 +2399,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history continues to overtake miner 0's. @@ -2457,7 +2416,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2469,7 +2428,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. @@ -2484,7 +2443,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2496,7 +2455,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0's affirmation map now becomes the heaviest. @@ -2511,7 +2470,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2524,7 +2483,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. @@ -2538,7 +2497,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2554,7 +2513,7 @@ fn test_pox_reorgs_three_flaps() { // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -2599,15 +2558,9 @@ fn test_pox_reorg_one_flap() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2654,7 +2607,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2674,12 +2627,11 @@ fn test_pox_reorg_one_flap() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -2691,8 +2643,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2727,10 +2679,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2755,8 +2707,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2778,10 +2730,10 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); if tip_info.stacks_tip_height > 0 { @@ -2790,11 +2742,7 @@ fn test_pox_reorg_one_flap() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2804,19 +2752,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2851,11 +2794,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2865,7 +2806,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -2884,7 +2825,7 @@ fn test_pox_reorg_one_flap() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -2893,7 +2834,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2912,7 +2853,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2924,7 +2865,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2937,7 +2878,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2950,7 +2891,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -2966,7 +2907,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2982,7 +2923,7 @@ fn test_pox_reorg_one_flap() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3025,15 +2966,9 @@ fn test_pox_reorg_flap_duel() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3080,7 +3015,7 @@ fn test_pox_reorg_flap_duel() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3102,12 +3037,12 @@ fn test_pox_reorg_flap_duel() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); - confs[i].node.set_bootstrap_nodes( + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3119,8 +3054,8 @@ fn test_pox_reorg_flap_duel() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3155,10 +3090,10 @@ fn test_pox_reorg_flap_duel() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3183,8 +3118,8 @@ fn test_pox_reorg_flap_duel() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3206,10 +3141,10 @@ fn test_pox_reorg_flap_duel() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -3218,11 +3153,7 @@ fn test_pox_reorg_flap_duel() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3232,19 +3163,14 @@ fn test_pox_reorg_flap_duel() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3279,11 +3205,9 @@ fn test_pox_reorg_flap_duel() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3293,7 +3217,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -3312,7 +3236,7 @@ fn test_pox_reorg_flap_duel() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -3321,7 +3245,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); @@ -3349,7 +3273,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3362,7 +3286,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3375,7 +3299,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3387,7 +3311,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -3404,7 +3328,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3423,7 +3347,7 @@ fn test_pox_reorg_flap_duel() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3465,15 +3389,9 @@ fn test_pox_reorg_flap_reward_cycles() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3520,7 +3438,7 @@ fn test_pox_reorg_flap_reward_cycles() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3540,12 +3458,11 @@ fn test_pox_reorg_flap_reward_cycles() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3557,8 +3474,8 @@ fn test_pox_reorg_flap_reward_cycles() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in confs.iter() { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3593,10 +3510,10 @@ fn test_pox_reorg_flap_reward_cycles() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3621,8 +3538,8 @@ fn test_pox_reorg_flap_reward_cycles() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3644,10 +3561,10 @@ fn test_pox_reorg_flap_reward_cycles() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -3656,11 +3573,7 @@ fn test_pox_reorg_flap_reward_cycles() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3670,19 +3583,14 @@ fn test_pox_reorg_flap_reward_cycles() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3717,11 +3625,9 @@ fn test_pox_reorg_flap_reward_cycles() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3731,7 +3637,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -3750,7 +3656,7 @@ fn test_pox_reorg_flap_reward_cycles() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -3759,7 +3665,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3785,7 +3691,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3794,7 +3700,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3808,7 +3714,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } @@ -3816,7 +3722,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -3833,7 +3739,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3852,7 +3758,7 @@ fn test_pox_reorg_flap_reward_cycles() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3897,15 +3803,9 @@ fn test_pox_missing_five_anchor_blocks() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3952,7 +3852,7 @@ fn test_pox_missing_five_anchor_blocks() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3972,12 +3872,11 @@ fn test_pox_missing_five_anchor_blocks() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3989,8 +3888,8 @@ fn test_pox_missing_five_anchor_blocks() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4025,10 +3924,10 @@ fn test_pox_missing_five_anchor_blocks() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4053,8 +3952,8 @@ fn test_pox_missing_five_anchor_blocks() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4076,10 +3975,10 @@ fn test_pox_missing_five_anchor_blocks() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -4088,11 +3987,7 @@ fn test_pox_missing_five_anchor_blocks() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4102,19 +3997,14 @@ fn test_pox_missing_five_anchor_blocks() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4149,11 +4039,9 @@ fn test_pox_missing_five_anchor_blocks() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4163,7 +4051,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -4182,7 +4070,7 @@ fn test_pox_missing_five_anchor_blocks() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -4191,7 +4079,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -4212,7 +4100,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4223,7 +4111,7 @@ fn test_pox_missing_five_anchor_blocks() { signal_mining_ready(miner_status[1].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4236,7 +4124,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4253,7 +4141,7 @@ fn test_pox_missing_five_anchor_blocks() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -4297,15 +4185,9 @@ fn test_sortition_divergence_pre_21() { epochs[3].start_height = 241; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -4352,7 +4234,7 @@ fn test_sortition_divergence_pre_21() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -4376,12 +4258,11 @@ fn test_sortition_divergence_pre_21() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -4393,8 +4274,8 @@ fn test_sortition_divergence_pre_21() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4429,10 +4310,10 @@ fn test_sortition_divergence_pre_21() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4457,8 +4338,8 @@ fn test_sortition_divergence_pre_21() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4480,10 +4361,10 @@ fn test_sortition_divergence_pre_21() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -4492,11 +4373,7 @@ fn test_sortition_divergence_pre_21() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4506,19 +4383,14 @@ fn test_sortition_divergence_pre_21() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4553,11 +4425,9 @@ fn test_sortition_divergence_pre_21() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4567,7 +4437,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -4586,7 +4456,7 @@ fn test_sortition_divergence_pre_21() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -4595,7 +4465,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -4616,7 +4486,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4645,7 +4515,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4657,14 +4527,14 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4678,7 +4548,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } @@ -4690,7 +4560,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4706,7 +4576,7 @@ fn test_sortition_divergence_pre_21() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -4722,7 +4592,7 @@ fn trait_invocation_cross_epoch() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; let impl_contract = @@ -4907,7 +4777,7 @@ fn trait_invocation_cross_epoch() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } - let interesting_txids = vec![ + let interesting_txids = [ invoke_txid.clone(), invoke_1_txid.clone(), invoke_2_txid.clone(), @@ -4988,21 +4858,13 @@ fn test_v1_unlock_height_with_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5200,12 +5062,10 @@ fn test_v1_unlock_height_with_current_stackers() { assert_eq!(addr_tuple, pox_addr_tuple_1); } } - } else { - if !burnchain_config.is_in_prepare_phase(height) { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - assert_eq!(addr_tuple, pox_addr_tuple_2); - } + } else if !burnchain_config.is_in_prepare_phase(height) { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + assert_eq!(addr_tuple, pox_addr_tuple_2); } } } @@ -5251,21 +5111,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5475,7 +5327,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { - if pox_addrs.len() > 0 { + if !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); for addr_tuple in pox_addrs { // can either pay to pox tuple 1, or burn @@ -5485,15 +5337,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { } } } - } else { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - // can either pay to pox tuple 2, or burn - assert_ne!(addr_tuple, pox_addr_tuple_1); - if addr_tuple == pox_addr_tuple_2 { - have_expected_payout = true; - } + } else if !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + // can either pay to pox tuple 2, or burn + assert_ne!(addr_tuple, pox_addr_tuple_1); + if addr_tuple == pox_addr_tuple_2 { + have_expected_payout = true; } } } diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 9bffca7c8a6..774a83f712e 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -58,7 +58,7 @@ fn disable_pox() { let epoch_2_2 = 255; // two blocks before next prepare phase. let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -92,31 +92,19 @@ fn disable_pox() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -397,9 +385,9 @@ fn disable_pox() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -420,37 +408,35 @@ fn disable_pox() { .unwrap(); debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -522,7 +508,7 @@ fn disable_pox() { for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], + expected_slots[&reward_cycle][pox_addr], "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", &pox_addr, reward_cycle, @@ -544,8 +530,7 @@ fn disable_pox() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr - && parsed.auth.get_origin_nonce() == aborted_increase_nonce + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == aborted_increase_nonce { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, @@ -626,31 +611,19 @@ fn pox_2_unlock_all() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -892,7 +865,7 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` // will be included in that bitcoin block. // this will build the last block before 2.2 activates - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let tx = make_contract_call( &spender_sk, @@ -913,19 +886,19 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` // will be included in that bitcoin block. // this block activates 2.2 - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this *burn block* is when the unlock occurs - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // and this will mine the first block whose parent is the unlock block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance as u64, @@ -943,7 +916,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance as u64, - spender_2_initial_balance - stacked - (1 * tx_fee), + spender_2_initial_balance - stacked - tx_fee, "Spender 2 should still be locked" ); assert_eq!( @@ -957,13 +930,13 @@ fn pox_2_unlock_all() { // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block // (which is the criterion for the unlock) - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance, @@ -978,7 +951,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1001,16 +974,16 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this block selects the previously mined block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); let spender_3_account = get_account(&http_origin, &spender_3_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_3_account.balance, 1_000_000, @@ -1038,7 +1011,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1080,9 +1053,9 @@ fn pox_2_unlock_all() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1103,37 +1076,35 @@ fn pox_2_unlock_all() { .unwrap(); debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1191,7 +1162,7 @@ fn pox_2_unlock_all() { for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], + expected_slots[&reward_cycle][pox_addr], "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", &pox_addr, reward_cycle, @@ -1215,7 +1186,7 @@ fn pox_2_unlock_all() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1231,7 +1202,7 @@ fn pox_2_unlock_all() { assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1303,15 +1274,9 @@ fn test_pox_reorg_one_flap() { epochs.truncate(5); conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -1358,7 +1323,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -1379,12 +1344,11 @@ fn test_pox_reorg_one_flap() { let node_privkey_1 = StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -1396,8 +1360,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -1432,10 +1396,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -1460,8 +1424,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -1483,10 +1447,10 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); if tip_info.stacks_tip_height > 0 { @@ -1495,11 +1459,7 @@ fn test_pox_reorg_one_flap() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -1509,19 +1469,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -1533,7 +1488,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1556,11 +1511,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -1570,7 +1523,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -1589,7 +1542,7 @@ fn test_pox_reorg_one_flap() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -1598,7 +1551,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -1617,7 +1570,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1629,7 +1582,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -1642,7 +1595,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1655,7 +1608,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -1671,7 +1624,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1687,7 +1640,7 @@ fn test_pox_reorg_one_flap() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 2355f7521d8..a0cbbfe876c 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -52,7 +52,7 @@ fn trait_invocation_behavior() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let impl_contract_id = - QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + QualifiedContractIdentifier::new(contract_addr.into(), "impl-simple".into()); let mut spender_nonce = 0; let fee_amount = 10_000; @@ -526,7 +526,7 @@ fn trait_invocation_behavior() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr { + if tx_sender == spender_addr { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, // only interested in contract calls @@ -583,29 +583,27 @@ fn trait_invocation_behavior() { assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); } - for tx_nonce in [expected_good_23_3_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-1" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_3_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - for tx_nonce in [expected_good_23_4_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-2" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_4_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { assert_eq!( diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 26ad007ca79..e39255678d0 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -55,9 +55,9 @@ pub fn get_reward_set_entries_at_block( ) -> Result, Error> { state .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) - .and_then(|mut addrs| { + .map(|mut addrs| { addrs.sort_by_key(|k| k.reward_address.bytes()); - Ok(addrs) + addrs }) } @@ -86,7 +86,7 @@ fn fix_to_pox_contract() { let pox_3_activation_height = epoch_2_4; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -110,31 +110,19 @@ fn fix_to_pox_contract() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -341,13 +329,13 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.2 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -367,7 +355,7 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.3 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to 2 blocks before epoch 2.4 @@ -411,7 +399,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -431,7 +419,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -458,7 +446,7 @@ fn fix_to_pox_contract() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -492,9 +480,9 @@ fn fix_to_pox_contract() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -502,7 +490,7 @@ fn fix_to_pox_contract() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -514,38 +502,36 @@ fn fix_to_pox_contract() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -624,14 +610,12 @@ fn fix_to_pox_contract() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -651,7 +635,7 @@ fn fix_to_pox_contract() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) { @@ -738,21 +722,13 @@ fn verify_auto_unlock_behavior() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -765,11 +741,7 @@ fn verify_auto_unlock_behavior() { "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -879,14 +851,14 @@ fn verify_auto_unlock_behavior() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -908,7 +880,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -937,7 +909,7 @@ fn verify_auto_unlock_behavior() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -958,7 +930,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -979,7 +951,7 @@ fn verify_auto_unlock_behavior() { // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1044,7 +1016,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -1064,7 +1036,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -1113,7 +1085,7 @@ fn verify_auto_unlock_behavior() { .unwrap(); assert_eq!(reward_set_entries.len(), 2); - info!("reward set entries: {:?}", reward_set_entries); + info!("reward set entries: {reward_set_entries:?}"); assert_eq!( reward_set_entries[0].reward_address.bytes(), pox_pubkey_2_stx_addr.bytes.0.to_vec() @@ -1141,7 +1113,7 @@ fn verify_auto_unlock_behavior() { &[Value::UInt(first_stacked_incr.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -1213,9 +1185,9 @@ fn verify_auto_unlock_behavior() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1223,7 +1195,7 @@ fn verify_auto_unlock_behavior() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1235,37 +1207,35 @@ fn verify_auto_unlock_behavior() { .expect_list() .unwrap(); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1340,14 +1310,12 @@ fn verify_auto_unlock_behavior() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 6af1bee626d..4a3e2a4095a 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -79,8 +79,8 @@ fn microblocks_disabled() { conf.node.wait_time_for_blocks = 2_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); test_observer::register_any(&mut conf); @@ -111,8 +111,8 @@ fn microblocks_disabled() { 4 * prepare_phase_len / 5, 5, 15, - u64::max_value() - 2, - u64::max_value() - 1, + u64::MAX - 2, + u64::MAX - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 236d76b0002..5a8de4d3bd1 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -1,3 +1,4 @@ +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt::Write; use std::sync::Mutex; @@ -43,7 +44,7 @@ use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; -const OTHER_CONTRACT: &'static str = " +const OTHER_CONTRACT: &str = " (define-data-var x uint u0) (define-public (f1) (ok (var-get x))) @@ -51,14 +52,14 @@ const OTHER_CONTRACT: &'static str = " (ok (var-set x val))) "; -const CALL_READ_CONTRACT: &'static str = " +const CALL_READ_CONTRACT: &str = " (define-public (public-no-write) (ok (contract-call? .other f1))) (define-public (public-write) (ok (contract-call? .other f2 u5))) "; -const GET_INFO_CONTRACT: &'static str = " +const GET_INFO_CONTRACT: &str = " (define-map block-data { height: uint } { stacks-hash: (buff 32), @@ -143,7 +144,7 @@ const GET_INFO_CONTRACT: &'static str = " (fn-2 (uint) (response uint uint)))) "; -const IMPL_TRAIT_CONTRACT: &'static str = " +const IMPL_TRAIT_CONTRACT: &str = " ;; explicit trait compliance for trait 1 (impl-trait .get-info.trait-1) (define-private (test-height) burn-block-height) @@ -193,7 +194,7 @@ fn integration_test_get_info() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } run_loop @@ -279,10 +280,10 @@ fn integration_test_get_info() { let old_tip = StacksBlockId::new(&consensus_hash, &header_hash); use std::fs; use std::io::Write; - if fs::metadata(&tmppath).is_ok() { - fs::remove_file(&tmppath).unwrap(); + if fs::metadata(tmppath).is_ok() { + fs::remove_file(tmppath).unwrap(); } - let mut f = fs::File::create(&tmppath).unwrap(); + let mut f = fs::File::create(tmppath).unwrap(); f.write_all(&old_tip.serialize_to_vec()).unwrap(); } else if round == 2 { // block-height = 3 @@ -311,7 +312,7 @@ fn integration_test_get_info() { // block-height > 3 let tx = make_contract_call( &principal_sk, - (round - 3).into(), + round - 3, 10, CHAIN_ID_TESTNET, &to_addr(&contract_sk), @@ -337,7 +338,7 @@ fn integration_test_get_info() { if round >= 1 { let tx_xfer = make_stacks_transfer( &spender_sk, - (round - 1).into(), + round - 1, 10, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -356,16 +357,14 @@ fn integration_test_get_info() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state(|round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_addr = to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()); let contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "get-info")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.get-info")).unwrap(); let impl_trait_contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "impl-trait-contract")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.impl-trait-contract")).unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() @@ -374,7 +373,7 @@ fn integration_test_get_info() { match round { 1 => { // - Chain length should be 2. - let blocks = StacksChainState::list_blocks(&chain_state.db()).unwrap(); + let blocks = StacksChainState::list_blocks(chain_state.db()).unwrap(); assert!(chain_tip.metadata.stacks_block_height == 2); // Block #1 should have 5 txs @@ -382,14 +381,14 @@ fn integration_test_get_info() { let parent = chain_tip.block.header.parent_block; let bhh = &chain_tip.metadata.index_block_hash(); - eprintln!("Current Block: {} Parent Block: {}", bhh, parent); + eprintln!("Current Block: {bhh} Parent Block: {parent}"); let parent_val = Value::buff_from(parent.as_bytes().to_vec()).unwrap(); // find header metadata let mut headers = vec![]; for block in blocks.iter() { let header = StacksChainState::get_anchored_block_header_info(chain_state.db(), &block.0, &block.1).unwrap().unwrap(); - eprintln!("{}/{}: {:?}", &block.0, &block.1, &header); + eprintln!("{}/{}: {header:?}", &block.0, &block.1); headers.push(header); } @@ -500,13 +499,12 @@ fn integration_test_get_info() { burn_dbconn, bhh, &contract_identifier, "(exotic-data-checks u4)")); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/map_entry/{}/{}/{}", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -514,14 +512,14 @@ fn integration_test_get_info() { let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); assert_eq!(result_data, expected_data); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(100))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -532,19 +530,18 @@ fn integration_test_get_info() { let sender_addr = to_addr(&StacksPrivateKey::from_hex(SK_3).unwrap()); // now, let's use a query string to get data without a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=0", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=0"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_none()); + assert!(!res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -553,19 +550,18 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // now, let's use a query string to get data _with_ a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=1", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=1"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -574,9 +570,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // account with a nonce entry + a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &sender_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{sender_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 99860); assert_eq!(res.nonce, 4); @@ -584,9 +579,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a nonce entry but not a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 960); assert_eq!(res.nonce, 4); @@ -594,9 +588,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a balance entry but not a nonce entry - let path = format!("{}/v2/accounts/{}", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -604,27 +597,24 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with neither! - let path = format!("{}/v2/accounts/{}.get-info", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}.get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 0); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_some()); assert!(res.balance_proof.is_some()); - let path = format!("{}/v2/accounts/{}?proof=0", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_none()); assert!(res.balance_proof.is_none()); - let path = format!("{}/v2/accounts/{}?proof=1", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=1"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -632,15 +622,15 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // let's try getting the transfer cost - let path = format!("{}/v2/fees/transfer", &http_origin); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/fees/transfer"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert!(res > 0); // let's get a contract ABI - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); let contract_analysis = mem_type_check(GET_INFO_CONTRACT, ClarityVersion::Clarity2, StacksEpochId::Epoch21).unwrap().1; @@ -652,14 +642,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // let's get a contract SRC - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -667,8 +657,8 @@ fn integration_test_get_info() { assert!(res.marf_proof.is_some()); - let path = format!("{}/v2/contracts/source/{}/{}?proof=0", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -677,14 +667,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // how about a read-only function call! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -705,8 +695,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does not modify anything - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-no-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-no-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -732,8 +722,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does modify something and should fail - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -750,9 +740,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("NotReadOnly")); // let's try a call with a url-encoded string. - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", - "get-exotic-data-info%3F"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info%3F"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -774,8 +763,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -793,8 +782,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("UnwrapFailure")); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "update-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/update-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -814,13 +803,13 @@ fn integration_test_get_info() { // let's submit a valid transaction! let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (valid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (valid)"); // tx_xfer is 180 bytes long let tx_xfer = make_stacks_transfer( &spender_sk, - round.into(), + round, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -846,17 +835,17 @@ fn integration_test_get_info() { .send() .unwrap().json::().unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // let's submit an invalid transaction! - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (invalid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (invalid)"); // tx_xfer_invalid is 180 bytes long // bad nonce - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -869,39 +858,39 @@ fn integration_test_get_info() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("txid").unwrap().as_str().unwrap(), format!("{}", tx_xfer_invalid_tx.txid())); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // testing /v2/trait// // trait does not exist - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "get-info", &contract_addr, "get-info", "dummy-trait"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/get-info/{contract_addr}/get-info/dummy-trait"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // explicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); eprintln!("Test: GET {}", path); assert!(res.is_implemented); // No trait found - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-4"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-4"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // implicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-2"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-2"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // invalid trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-3"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-3"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(!res.is_implemented); // test query parameters for v2/trait endpoint @@ -911,33 +900,33 @@ fn integration_test_get_info() { let tmppath = "/tmp/integration_test_get_info-old-tip"; use std::fs; use std::io::Read; - let mut f = fs::File::open(&tmppath).unwrap(); + let mut f = fs::File::open(tmppath).unwrap(); let mut buf = vec![]; f.read_to_end(&mut buf).unwrap(); let old_tip = StacksBlockId::consensus_deserialize(&mut &buf[..]).unwrap(); - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip={}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1", &old_tip); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip={old_tip}"); let res = client.get(&path).send().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); // evaluate check for explicit compliance where tip is the chain tip of the first block (contract DNE at that block), but tip is "latest" - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=latest", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip=latest"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // perform some tests of the fee rate interface - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = - TransactionPayload::TokenTransfer(contract_addr.clone().into(), 10_000_000, TokenTransferMemo([0; 34])); + TransactionPayload::TokenTransfer(contract_addr.into(), 10_000_000, TokenTransferMemo([0; 34])); let payload_data = tx_payload.serialize_to_vec(); let payload_hex = format!("0x{}", to_hex(&payload_data)); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -948,7 +937,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be 0 -- their cost is just in their length @@ -975,11 +964,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -988,7 +977,7 @@ fn integration_test_get_info() { let payload_data = tx_payload.serialize_to_vec(); let payload_hex = to_hex(&payload_data); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -999,7 +988,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1026,11 +1015,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -1041,7 +1030,7 @@ fn integration_test_get_info() { let estimated_len = 1550; let body = json!({ "transaction_payload": payload_hex.clone(), "estimated_len": estimated_len }); - info!("POST body\n {}", body); + info!("POST body\n {body}"); let res = client.post(&path) .json(&body) @@ -1050,7 +1039,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - info!("{}", res); + info!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1094,7 +1083,7 @@ fn integration_test_get_info() { run_loop.start(num_rounds).unwrap(); } -const FAUCET_CONTRACT: &'static str = " +const FAUCET_CONTRACT: &str = " (define-public (spout) (let ((recipient tx-sender)) (print (as-contract (stx-transfer? u1 .faucet recipient))))) @@ -1111,7 +1100,7 @@ fn contract_stx_transfer() { conf.burnchain.commit_anchor_block_within = 5000; conf.add_initial_balance(addr_3.to_string(), 100000); conf.add_initial_balance( - to_addr(&StacksPrivateKey::from_hex(&SK_2).unwrap()).to_string(), + to_addr(&StacksPrivateKey::from_hex(SK_2).unwrap()).to_string(), 1000, ); conf.add_initial_balance(to_addr(&contract_sk).to_string(), 1000); @@ -1133,9 +1122,8 @@ fn contract_stx_transfer() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1226,7 +1214,7 @@ fn contract_stx_transfer() { .submit_raw( &mut chainstate_copy, &sortdb, - &consensus_hash, + consensus_hash, &header_hash, tx, &ExecutionCost::max_value(), @@ -1287,30 +1275,27 @@ fn contract_stx_transfer() { .unwrap_err() { MemPoolRejection::ConflictingNonceInMempool => (), - e => panic!("{:?}", e), + e => panic!("{e:?}"), }; } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); match round { 1 => { - assert!(chain_tip.metadata.stacks_block_height == 2); + assert_eq!(chain_tip.metadata.stacks_block_height, 2); // Block #1 should have 2 txs -- coinbase + transfer assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1353,19 +1338,19 @@ fn contract_stx_transfer() { ); } 2 => { - assert!(chain_tip.metadata.stacks_block_height == 3); + assert_eq!(chain_tip.metadata.stacks_block_height, 3); // Block #2 should have 2 txs -- coinbase + publish assert_eq!(chain_tip.block.txs.len(), 2); } 3 => { - assert!(chain_tip.metadata.stacks_block_height == 4); + assert_eq!(chain_tip.metadata.stacks_block_height, 4); // Block #3 should have 2 txs -- coinbase + contract-call, // the second publish _should have been rejected_ assert_eq!(chain_tip.block.txs.len(), 2); // check that 1 stx was transfered to SK_2 via the contract-call let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1408,7 +1393,7 @@ fn contract_stx_transfer() { ); } 4 => { - assert!(chain_tip.metadata.stacks_block_height == 5); + assert_eq!(chain_tip.metadata.stacks_block_height, 5); assert_eq!( chain_tip.block.txs.len() as u64, MAXIMUM_MEMPOOL_TX_CHAINING + 1, @@ -1416,7 +1401,7 @@ fn contract_stx_transfer() { ); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1491,9 +1476,8 @@ fn mine_transactions_out_of_order() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1578,16 +1562,13 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1610,7 +1591,7 @@ fn mine_transactions_out_of_order() { // check that 1000 stx _was_ transfered to the contract principal let curr_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); assert_eq!( @@ -1698,15 +1679,14 @@ fn mine_contract_twice() { run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); if round == 2 { let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that the contract published! @@ -1761,9 +1741,8 @@ fn bad_contract_tx_rollback() { let addr_2 = to_addr(&sk_2); let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1872,16 +1851,13 @@ fn bad_contract_tx_rollback() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1892,7 +1868,7 @@ fn bad_contract_tx_rollback() { assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1967,10 +1943,8 @@ fn make_expensive_contract(inner_loop: &str, other_decl: &str) -> String { for i in 0..10 { contract.push('\n'); contract.push_str(&format!( - "(define-constant list-{} (concat list-{} list-{}))", + "(define-constant list-{} (concat list-{i} list-{i}))", i + 1, - i, - i )); } @@ -2083,7 +2057,7 @@ fn block_limit_runtime_test() { let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; let spender_sks = make_keys(seed, 500); for sk in spender_sks.iter() { - conf.add_initial_balance(to_addr(&sk).to_string(), 1000); + conf.add_initial_balance(to_addr(sk).to_string(), 1000); } let num_rounds = 6; @@ -2097,9 +2071,8 @@ fn block_limit_runtime_test() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let (consensus_hash, block_hash) = ( @@ -2107,45 +2080,15 @@ fn block_limit_runtime_test() { &tenure.parent_block.metadata.anchored_header.block_hash(), ); - if round == 1 { - let publish_tx = make_contract_publish( - &contract_sk, - 0, - 10, - CHAIN_ID_TESTNET, - "hello-contract", - EXPENSIVE_CONTRACT.as_str(), - ); - tenure - .mem_pool - .submit_raw( - &mut chainstate_copy, - &sortdb, - consensus_hash, - block_hash, - publish_tx, - &ExecutionCost::max_value(), - &StacksEpochId::Epoch21, - ) - .unwrap(); - } else if round > 1 { - eprintln!("Begin Round: {}", round); - let to_submit = 2 * (round - 1); - - let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; - let spender_sks = make_keys(seed, 500); - - for i in 0..to_submit { - let sk = &spender_sks[(i + round * round) as usize]; - let tx = make_contract_call( - sk, + match round.cmp(&1) { + Ordering::Equal => { + let publish_tx = make_contract_publish( + &contract_sk, 0, 10, CHAIN_ID_TESTNET, - &to_addr(&contract_sk), "hello-contract", - "do-it", - &[], + EXPENSIVE_CONTRACT.as_str(), ); tenure .mem_pool @@ -2154,24 +2097,55 @@ fn block_limit_runtime_test() { &sortdb, consensus_hash, block_hash, - tx, + publish_tx, &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ) .unwrap(); } - } - - return; + Ordering::Greater => { + eprintln!("Begin Round: {round}"); + let to_submit = 2 * (round - 1); + + let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; + let spender_sks = make_keys(seed, 500); + + for i in 0..to_submit { + let sk = &spender_sks[(i + round * round) as usize]; + let tx = make_contract_call( + sk, + 0, + 10, + CHAIN_ID_TESTNET, + &to_addr(&contract_sk), + "hello-contract", + "do-it", + &[], + ); + tenure + .mem_pool + .submit_raw( + &mut chainstate_copy, + &sortdb, + consensus_hash, + block_hash, + tx, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch21, + ) + .unwrap(); + } + } + Ordering::Less => {} + }; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); @@ -2180,7 +2154,7 @@ fn block_limit_runtime_test() { // Block #1 should have 3 txs -- coinbase + 2 contract calls... assert_eq!(block.block.txs.len(), 3); } - 3 | 4 | 5 => { + 3..=5 => { // Block >= 2 should have 4 txs -- coinbase + 3 contract calls // because the _subsequent_ transactions should never have been // included. @@ -2215,7 +2189,7 @@ fn mempool_errors() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } let mut run_loop = RunLoop::new(conf); @@ -2254,22 +2228,19 @@ fn mempool_errors() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, _block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() }; let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let spender_addr = to_addr(&spender_sk); @@ -2277,7 +2248,7 @@ fn mempool_errors() { if round == 1 { // let's submit an invalid transaction! - eprintln!("Test: POST {} (invalid)", path); + eprintln!("Test: POST {path} (invalid)"); let tx_xfer_invalid = make_stacks_transfer( &spender_sk, 30, // bad nonce -- too much chaining @@ -2298,7 +2269,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2312,7 +2283,7 @@ fn mempool_errors() { "TooMuchChaining" ); let data = res.get("reason_data").unwrap(); - assert_eq!(data.get("is_origin").unwrap().as_bool().unwrap(), true); + assert!(data.get("is_origin").unwrap().as_bool().unwrap()); assert_eq!( data.get("principal").unwrap().as_str().unwrap(), &spender_addr.to_string() @@ -2340,7 +2311,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2374,7 +2345,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2419,7 +2390,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index b701e70a151..58a526ba307 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -31,13 +31,13 @@ use super::{ use crate::helium::RunLoop; use crate::Keychain; -const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) (define-public (bar (x uint)) (ok x))"; -const TRAIT_CONTRACT: &'static str = "(define-trait tr ((value () (response uint uint))))"; -const USE_TRAIT_CONTRACT: &'static str = "(use-trait tr-trait .trait-contract.tr) +const TRAIT_CONTRACT: &str = "(define-trait tr ((value () (response uint uint))))"; +const USE_TRAIT_CONTRACT: &str = "(use-trait tr-trait .trait-contract.tr) (define-public (baz (abc )) (ok (contract-of abc)))"; -const IMPLEMENT_TRAIT_CONTRACT: &'static str = "(define-public (value) (ok u1))"; -const BAD_TRAIT_CONTRACT: &'static str = "(define-public (foo-bar) (ok u1))"; +const IMPLEMENT_TRAIT_CONTRACT: &str = "(define-public (value) (ok u1))"; +const BAD_TRAIT_CONTRACT: &str = "(define-public (foo-bar) (ok u1))"; pub fn make_bad_stacks_transfer( sender: &StacksPrivateKey, @@ -318,17 +318,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!( - if let MemPoolRejection::FailedToValidate(ChainstateError::NetError( - NetError::VerifyingError(_), - )) = e - { - true - } else { - false - } - ); + eprintln!("Err: {e:?}"); + assert!(matches!( + e, + MemPoolRejection::FailedToValidate(ChainstateError::NetError( + NetError::VerifyingError(_) + )) + )); // mismatched network on contract-call! let bad_addr = StacksAddress::from_public_keys( @@ -337,8 +333,7 @@ fn mempool_setup_chainstate() { 1, &vec![StacksPublicKey::from_private(&other_sk)], ) - .unwrap() - .into(); + .unwrap(); let tx_bytes = make_contract_call( &contract_sk, @@ -362,11 +357,7 @@ fn mempool_setup_chainstate() { ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // mismatched network on transfer! let bad_addr = StacksAddress::from_public_keys( @@ -391,11 +382,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // bad fees let tx_bytes = @@ -411,12 +398,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::FeeTooLow(0, _))); // bad nonce let tx_bytes = @@ -432,12 +415,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadNonces(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadNonces(_))); // not enough funds let tx_bytes = make_stacks_transfer( @@ -459,15 +438,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); // sender == recipient - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let tx_bytes = make_stacks_transfer( &contract_sk, 5, @@ -487,7 +462,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { r == contract_princ } else { @@ -517,15 +492,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // tx version must be testnet - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let payload = TransactionPayload::TokenTransfer( contract_princ.clone(), 1000, @@ -551,12 +522,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadTransactionVersion = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadTransactionVersion)); // send amount must be positive let tx_bytes = @@ -572,12 +539,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::TransferAmountMustBePositive)); // not enough funds let tx_bytes = make_stacks_transfer( @@ -599,12 +562,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); let tx_bytes = make_stacks_transfer( &contract_sk, @@ -625,12 +584,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(100700, 99500))); let tx_bytes = make_contract_call( &contract_sk, @@ -653,12 +608,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchContract = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchContract)); let tx_bytes = make_contract_call( &contract_sk, @@ -681,12 +632,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchPublicFunction = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchPublicFunction)); let tx_bytes = make_contract_call( &contract_sk, @@ -709,12 +656,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); let tx_bytes = make_contract_publish( &contract_sk, @@ -735,12 +678,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::ContractAlreadyExists(_))); let microblock_1 = StacksMicroblockHeader { version: 0, @@ -777,13 +716,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let microblock_1 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), signature: MessageSignature([0; 65]), }; @@ -791,7 +730,7 @@ fn mempool_setup_chainstate() { let microblock_2 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[1, 2, 3]), signature: MessageSignature([0; 65]), }; @@ -815,7 +754,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let mut microblock_1 = StacksMicroblockHeader { @@ -856,7 +795,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); @@ -871,12 +810,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoCoinbaseViaMempool)); // find the correct priv-key let mut secret_key = None; @@ -936,12 +871,12 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("implement-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("implement-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -968,8 +903,8 @@ fn mempool_setup_chainstate() { .unwrap(); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("bad-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("bad-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -994,11 +929,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); } }, ); diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 2c555e72321..6f02ecf1380 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -81,11 +81,11 @@ pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } (ok true)))"#; // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar -pub const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; -pub const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; -pub const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; -pub const ADDR_4: &'static str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; +pub const ADDR_4: &str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; lazy_static! { pub static ref PUBLISH_CONTRACT: Vec = make_contract_publish( @@ -133,6 +133,7 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -215,6 +216,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( ) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -401,10 +403,10 @@ pub fn set_random_binds(config: &mut Config) { let rpc_port = gen_random_port(); let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - config.node.data_url = format!("http://{}:{}", localhost, rpc_port); - config.node.p2p_address = format!("{}:{}", localhost, p2p_port); + config.node.rpc_bind = format!("{localhost}:{rpc_port}"); + config.node.p2p_bind = format!("{localhost}:{p2p_port}"); + config.node.data_url = format!("http://{localhost}:{rpc_port}"); + config.node.p2p_address = format!("{localhost}:{p2p_port}"); } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { @@ -427,9 +429,10 @@ pub fn make_stacks_transfer( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_sponsored_stacks_transfer_on_testnet( sender: &StacksPrivateKey, payer: &StacksPrivateKey, @@ -443,7 +446,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload.into(), + payload, sender, payer, sender_nonce, @@ -466,7 +469,7 @@ pub fn make_stacks_transfer_mblock_only( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -484,14 +487,15 @@ pub fn make_poison( header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, @@ -506,15 +510,16 @@ pub fn make_contract_call( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, @@ -529,10 +534,10 @@ pub fn make_contract_call_mblock_only( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx_anchor_mode( @@ -558,7 +563,7 @@ fn make_microblock( let mut microblock_builder = StacksMicroblockBuilder::new( block.block_hash(), - consensus_hash.clone(), + consensus_hash, chainstate, burn_dbconn, BlockBuilderSettings::max_value(), @@ -576,10 +581,9 @@ fn make_microblock( // NOTE: we intentionally do not check the block's microblock pubkey hash against the private // key, because we may need to test that microblocks get rejected due to bad signatures. - let microblock = microblock_builder + microblock_builder .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap(); - microblock + .unwrap() } /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that @@ -601,7 +605,7 @@ pub fn select_transactions_where( } } - return result; + result } /// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` @@ -614,20 +618,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", - get_epoch_time_secs(), - current_height + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", + get_epoch_time_secs() ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); current_height = tip_info.burn_block_height; } @@ -717,7 +720,6 @@ fn should_succeed_mining_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -743,18 +745,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); // 0 event should have been produced let events: Vec = chain_tip @@ -762,7 +764,7 @@ fn should_succeed_mining_valid_txs() { .iter() .flat_map(|a| a.events.clone()) .collect(); - assert!(events.len() == 0); + assert!(events.is_empty()); } 2 => { // Inspecting the chain at round 2. @@ -775,18 +777,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events should have been produced let events: Vec = chain_tip @@ -807,18 +809,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the set-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events + 1 contract event should have been produced let events: Vec = chain_tip @@ -832,7 +834,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Setting key foo\"".to_string() + && format!("{}", data.value) == "\"Setting key foo\"" } _ => false, }); @@ -848,18 +850,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -873,7 +875,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Getting key foo\"".to_string() + && format!("{}", data.value) == "\"Getting key foo\"" } _ => false, }); @@ -889,19 +891,19 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the STX transfer let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::TokenTransfer(_, _, _) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::TokenTransfer(_, _, _) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -996,7 +998,6 @@ fn should_succeed_handling_malformed_and_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -1014,10 +1015,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 1 => { // Inspecting the chain at round 1. @@ -1030,18 +1031,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); } 2 => { // Inspecting the chain at round 2. @@ -1054,10 +1055,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 3 => { // Inspecting the chain at round 3. @@ -1070,10 +1071,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 4 => { // Inspecting the chain at round 4. @@ -1086,18 +1087,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); } _ => {} } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b5140a06eed..3e9f2354241 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -200,9 +200,7 @@ impl TestSigningChannel { /// TODO: update to use signatures vec pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); - let Some(sign_channels) = signer.as_mut() else { - return None; - }; + let sign_channels = signer.as_mut()?; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); @@ -362,7 +360,7 @@ pub fn blind_signer_multinode( thread::sleep(Duration::from_secs(2)); info!("Checking for a block proposal to sign..."); last_count = cur_count; - let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + let configs: Vec<&Config> = configs.iter().collect(); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { @@ -427,10 +425,12 @@ pub fn get_latest_block_proposal( .collect(); proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { - if block_a.header.chain_length > block_b.header.chain_length { - return std::cmp::Ordering::Greater; - } else if block_a.header.chain_length < block_b.header.chain_length { - return std::cmp::Ordering::Less; + let res = block_a + .header + .chain_length + .cmp(&block_b.header.chain_length); + if res != std::cmp::Ordering::Equal { + return res; } // the heights are tied, tie break with the latest miner if *is_latest_a { @@ -439,7 +439,7 @@ pub fn get_latest_block_proposal( if *is_latest_b { return std::cmp::Ordering::Less; } - return std::cmp::Ordering::Equal; + std::cmp::Ordering::Equal }); for (b, _, is_latest) in proposed_blocks.iter() { @@ -542,7 +542,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - return Ok(signer_sig_hash); + Ok(signer_sig_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -585,12 +585,12 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress burnchain.peer_host = Some("127.0.0.1".to_string()); } - conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.magic_bytes = MagicBytes::from([b'T', b'3'].as_ref()); conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; // if there's just one node, then this must be true for tests to pass conf.miner.wait_for_block_download = false; @@ -709,7 +709,7 @@ pub fn next_block_and_wait_for_commits( coord_channels: &[&Arc>], commits_submitted: &[&Arc], ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let commits_submitted: Vec<_> = commits_submitted.to_vec(); let blocks_processed_before: Vec<_> = coord_channels .iter() .map(|x| { @@ -786,7 +786,7 @@ pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( - PrincipalData::from(stacker_address.clone()).to_string(), + PrincipalData::from(stacker_address).to_string(), POX_4_DEFAULT_STACKER_BALANCE, ); stacker_sk @@ -813,17 +813,17 @@ pub fn boot_to_epoch_3( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -840,13 +840,13 @@ pub fn boot_to_epoch_3( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -860,7 +860,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -900,9 +900,9 @@ pub fn boot_to_epoch_3( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -943,9 +943,9 @@ pub fn boot_to_epoch_3( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 1, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); @@ -975,17 +975,17 @@ pub fn boot_to_pre_epoch_3_boundary( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -1002,13 +1002,13 @@ pub fn boot_to_pre_epoch_3_boundary( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1022,7 +1022,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1062,9 +1062,9 @@ pub fn boot_to_pre_epoch_3_boundary( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -1105,9 +1105,9 @@ pub fn boot_to_pre_epoch_3_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 2, - &naka_conf, + naka_conf, ); info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); @@ -1191,7 +1191,7 @@ pub fn is_key_set_for_cycle( is_mainnet: bool, http_origin: &str, ) -> Result { - let key = get_key_for_cycle(reward_cycle, is_mainnet, &http_origin)?; + let key = get_key_for_cycle(reward_cycle, is_mainnet, http_origin)?; Ok(key.is_some()) } @@ -1218,10 +1218,10 @@ pub fn setup_epoch_3_reward_set( let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1241,13 +1241,13 @@ pub fn setup_epoch_3_reward_set( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1260,7 +1260,7 @@ pub fn setup_epoch_3_reward_set( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1322,9 +1322,9 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3_reward_set_calculation_boundary, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); @@ -1364,9 +1364,9 @@ pub fn boot_to_epoch_25( ); run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_25_start_height, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); } @@ -1391,7 +1391,7 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); info!( "Bootstrapped to Epoch 3.0 reward set calculation height: {}", get_chain_info(naka_conf).burn_block_height @@ -1426,7 +1426,7 @@ fn simple_neon_integration() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); @@ -1435,16 +1435,13 @@ fn simple_neon_integration() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1502,7 +1499,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1569,8 +1566,7 @@ fn simple_neon_integration() { .as_array() .unwrap() .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) }); Ok(transfer_tx_included) }) @@ -1598,17 +1594,13 @@ fn simple_neon_integration() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1626,7 +1618,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1676,7 +1668,7 @@ fn flash_blocks_on_epoch_3() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1685,16 +1677,13 @@ fn flash_blocks_on_epoch_3() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1852,17 +1841,13 @@ fn flash_blocks_on_epoch_3() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1898,18 +1883,13 @@ fn flash_blocks_on_epoch_3() { // Verify that there's a gap of AT LEAST 3 blocks assert!( gap_end - gap_start + 1 >= 3, - "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {} to {}", - gap_start, - gap_end + "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {gap_start} to {gap_end}" ); // Verify that the gap includes the Epoch 3.0 activation height assert!( gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, - "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", - gap_start, - gap_end, - epoch_3_start_height + "Expected the gap ({gap_start}..={gap_end}) to include the Epoch 3.0 activation height ({epoch_3_start_height})" ); // Verify blocks before and after the gap @@ -1918,7 +1898,7 @@ fn flash_blocks_on_epoch_3() { check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); - info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {epoch_3_start_height}"); coord_channel .lock() @@ -1957,13 +1937,10 @@ fn mine_multiple_per_tenure_integration() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1993,7 +1970,7 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2028,7 +2005,7 @@ fn mine_multiple_per_tenure_integration() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("Mining tenure {}", tenure_ix); + debug!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2145,22 +2122,19 @@ fn multiple_miners() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -2175,7 +2149,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), @@ -2243,7 +2217,7 @@ fn multiple_miners() { .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2286,7 +2260,7 @@ fn multiple_miners() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2386,9 +2360,9 @@ fn correct_burn_outs() { { let epochs = naka_conf.burnchain.epochs.as_mut().unwrap(); - let epoch_24_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch24).unwrap(); - let epoch_25_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap(); - let epoch_30_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); + let epoch_24_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch24).unwrap(); + let epoch_25_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch25).unwrap(); + let epoch_30_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch30).unwrap(); epochs[epoch_24_ix].end_height = 208; epochs[epoch_25_ix].start_height = 208; epochs[epoch_25_ix].end_height = 225; @@ -2411,10 +2385,7 @@ fn correct_burn_outs() { let stacker_accounts = accounts[0..3].to_vec(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let signers = TestSigners::new(vec![sender_signer_sk]); @@ -2503,7 +2474,7 @@ fn correct_burn_outs() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&account.0).bytes, + tests::to_addr(account.0).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); @@ -2524,7 +2495,7 @@ fn correct_burn_outs() { .to_rsv(); let stacking_tx = tests::make_contract_call( - &account.0, + account.0, account.2.nonce, 1000, naka_conf.burnchain.chain_id, @@ -2586,7 +2557,7 @@ fn correct_burn_outs() { .block_height_to_reward_cycle(epoch_3.start_height) .unwrap(); - info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); + info!("first_epoch_3_cycle: {first_epoch_3_cycle:?}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); @@ -2732,10 +2703,7 @@ fn block_proposal_api_endpoint() { let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); // only subscribe to the block proposal events test_observer::spawn(); @@ -2760,7 +2728,7 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -2814,7 +2782,7 @@ fn block_proposal_api_endpoint() { .unwrap() .unwrap(); - let privk = conf.miner.mining_key.unwrap().clone(); + let privk = conf.miner.mining_key.unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()) .expect("Failed to get sortition tip"); let db_handle = sortdb.index_handle(&sort_tip); @@ -2910,41 +2878,41 @@ fn block_proposal_api_endpoint() { ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( "Non-canonical or absent tenure", - (|| { + { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::NonCanonicalTenure)), ), ( "Corrupted (bit flipped after signing)", - (|| { + { let mut sp = sign(&proposal); sp.block.header.timestamp ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( "Invalid `chain_id`", - (|| { + { let mut p = proposal.clone(); p.chain_id ^= 0xFFFFFFFF; sign(&p) - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::InvalidBlock)), ), ( "Invalid `miner_signature`", - (|| { + { let mut sp = sign(&proposal); sp.block.header.miner_signature.0[1] ^= 0x80; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), @@ -3042,10 +3010,7 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { - info!( - "Received response {:?}, expecting {:?}", - &response, &expected_response - ); + info!("Received response {response:?}, expecting {expected_response:?}"); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); @@ -3093,19 +3058,16 @@ fn miner_writes_proposed_block_to_stackerdb() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let stacker_sk = setup_stacker(&mut naka_conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); test_observer::register( @@ -3216,9 +3178,9 @@ fn vote_for_aggregate_key_burn_op() { let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); - naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3277,7 +3239,7 @@ fn vote_for_aggregate_key_burn_op() { let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr.clone(), + output: signer_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3343,13 +3305,13 @@ fn vote_for_aggregate_key_burn_op() { let stacker_pk = StacksPublicKey::from_private(&stacker_sk); let signer_key: StacksPublicKeyBuffer = stacker_pk.to_bytes_compressed().as_slice().into(); - let aggregate_key = signer_key.clone(); + let aggregate_key = signer_key; let vote_for_aggregate_key_op = BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: signer_addr.clone(), + sender: signer_addr, round: 0, reward_cycle, aggregate_key, @@ -3360,7 +3322,7 @@ fn vote_for_aggregate_key_burn_op() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3393,10 +3355,10 @@ fn vote_for_aggregate_key_burn_op() { for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); @@ -3446,7 +3408,7 @@ fn follower_bootup() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3455,13 +3417,10 @@ fn follower_bootup() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -3574,7 +3533,7 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -3582,10 +3541,7 @@ fn follower_bootup() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -3621,8 +3577,8 @@ fn follower_bootup() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -3773,7 +3729,7 @@ fn follower_bootup_across_multiple_cycles() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3782,13 +3738,10 @@ fn follower_bootup_across_multiple_cycles() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3972,7 +3925,7 @@ fn follower_bootup_custom_chain_id() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3981,13 +3934,10 @@ fn follower_bootup_custom_chain_id() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4100,7 +4050,7 @@ fn follower_bootup_custom_chain_id() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -4108,10 +4058,7 @@ fn follower_bootup_custom_chain_id() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -4147,8 +4094,8 @@ fn follower_bootup_custom_chain_id() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -4326,23 +4273,14 @@ fn burn_ops_integration_test() { let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; - let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let mut signers = TestSigners::new(vec![signer_sk_1]); let stacker_sk = setup_stacker(&mut naka_conf); // Add the initial balances to the other accounts - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_1.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_2.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 100_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_1).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_2).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 100_000_000); test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -4395,7 +4333,7 @@ fn burn_ops_integration_test() { info!("Submitting first pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr_1.clone(), + output: signer_addr_1, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4426,7 +4364,7 @@ fn burn_ops_integration_test() { let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); let pre_stx_op_2 = PreStxOp { - output: signer_addr_2.clone(), + output: signer_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4448,7 +4386,7 @@ fn burn_ops_integration_test() { let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting third pre-stx op"); let pre_stx_op_3 = PreStxOp { - output: stacker_addr_1.clone(), + output: stacker_addr_1, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4469,7 +4407,7 @@ fn burn_ops_integration_test() { info!("Submitting fourth pre-stx op"); let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_4 = PreStxOp { - output: stacker_addr_2.clone(), + output: stacker_addr_2, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4566,10 +4504,10 @@ fn burn_ops_integration_test() { "reward_cycle" => reward_cycle, ); - let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); - let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); + let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); + let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1, false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2, false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4603,8 +4541,8 @@ fn burn_ops_integration_test() { info!("Submitting transfer STX op"); let transfer_stx_op = TransferStxOp { - sender: stacker_addr_1.clone(), - recipient: stacker_addr_2.clone(), + sender: stacker_addr_1, + recipient: stacker_addr_2, transfered_ustx: 10000, memo: vec![], txid: Txid([0u8; 32]), @@ -4626,8 +4564,8 @@ fn burn_ops_integration_test() { info!("Submitting delegate STX op"); let del_stx_op = DelegateStxOp { - sender: stacker_addr_2.clone(), - delegate_to: stacker_addr_1.clone(), + sender: stacker_addr_2, + delegate_to: stacker_addr_1, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -4654,7 +4592,7 @@ fn burn_ops_integration_test() { let min_stx = pox_info.next_cycle.min_threshold_ustx; let stack_stx_op_with_some_signer_key = StackStxOp { - sender: signer_addr_1.clone(), + sender: signer_addr_1, reward_addr: pox_addr, stacked_ustx: min_stx.into(), num_cycles: lock_period, @@ -4681,7 +4619,7 @@ fn burn_ops_integration_test() { ); let stack_stx_op_with_no_signer_key = StackStxOp { - sender: signer_addr_2.clone(), + sender: signer_addr_2, reward_addr: PoxAddress::Standard(signer_addr_2, None), stacked_ustx: 100000, num_cycles: 6, @@ -4766,7 +4704,7 @@ fn burn_ops_integration_test() { for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { @@ -4784,15 +4722,14 @@ fn burn_ops_integration_test() { assert_eq!(recipient, stacker_addr_2.to_string()); assert_eq!(transfered_ustx, 10000); info!( - "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", - sender, recipient, transfered_ustx + "Transfer STX op: sender: {sender}, recipient: {recipient}, transfered_ustx: {transfered_ustx}" ); assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } if burnchain_op.contains_key("delegate_stx") { - info!("Got delegate STX op: {:?}", burnchain_op); + info!("Got delegate STX op: {burnchain_op:?}"); let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); let sender_obj = delegate_stx_obj.get("sender").unwrap(); let sender = sender_obj.get("address").unwrap().as_str().unwrap(); @@ -4811,7 +4748,7 @@ fn burn_ops_integration_test() { continue; } if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); @@ -4882,7 +4819,7 @@ fn burn_ops_integration_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -4937,17 +4874,14 @@ fn forked_tenure_is_ignored() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5075,7 +5009,7 @@ fn forked_tenure_is_ignored() { .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -5304,13 +5238,10 @@ fn check_block_heights() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -5394,12 +5325,12 @@ fn check_block_heights() { vec![], ); let preheights = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", preheights); + info!("Heights from pre-epoch 3.0: {preheights}"); wait_for_first_naka_block_commit(60, &commits_submitted); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) @@ -5417,7 +5348,7 @@ fn check_block_heights() { vec![], ); let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from epoch 3.0 start: {}", heights0); + info!("Heights from epoch 3.0 start: {heights0}"); assert_eq!( heights0.get("burn-block-height"), preheights.get("burn-block-height"), @@ -5466,7 +5397,7 @@ fn check_block_heights() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5488,7 +5419,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5498,7 +5429,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5598,7 +5529,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5608,7 +5539,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5723,17 +5654,11 @@ fn nakamoto_attempt_time() { let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 1_000_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1_000_000_000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100_000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5849,7 +5774,7 @@ fn nakamoto_attempt_time() { // mine the interim blocks for tenure_count in 0..inter_blocks_per_tenure { - debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + debug!("nakamoto_attempt_time: begin tenure {tenure_count}"); let blocks_processed_before = coord_channel .lock() @@ -5987,8 +5912,7 @@ fn nakamoto_attempt_time() { break 'submit_txs; } info!( - "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", - acct_idx, tx_count, tx_limit + "nakamoto_times_ms: on account {acct_idx}; sent {tx_count} txs so far (out of {tx_limit})" ); } acct_idx += 1; @@ -6056,13 +5980,10 @@ fn clarity_burn_state() { let tx_fee = 1000; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -6139,7 +6060,7 @@ fn clarity_burn_state() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); // Don't submit this tx on the first iteration, because the contract is not published yet. if tenure_ix > 0 { @@ -6200,7 +6121,7 @@ fn clarity_burn_state() { let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; - info!("Expecting burn block height to be {}", burn_block_height); + info!("Expecting burn block height to be {burn_block_height}"); // Assert that the contract call was successful test_observer::get_mined_nakamoto_blocks() @@ -6215,11 +6136,11 @@ fn clarity_burn_state() { return; } - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6241,7 +6162,7 @@ fn clarity_burn_state() { "foo", vec![&expected_height], ); - info!("Read-only result: {:?}", result); + info!("Read-only result: {result:?}"); result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block @@ -6277,11 +6198,11 @@ fn clarity_burn_state() { .iter() .for_each(|event| match event { TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6307,6 +6228,7 @@ fn clarity_burn_state() { #[test] #[ignore] +#[allow(clippy::drop_non_drop)] fn signer_chainstate() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -6314,7 +6236,7 @@ fn signer_chainstate() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -6324,15 +6246,12 @@ fn signer_chainstate() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -6388,7 +6307,7 @@ fn signer_chainstate() { .unwrap() .unwrap() .stacks_block_height; - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -6639,13 +6558,13 @@ fn signer_chainstate() { // Case: the block doesn't confirm the prior blocks that have been signed. let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); let last_tenure_header = &last_tenure.header; - let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_sk = naka_conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: last_tenure_header.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6693,8 +6612,8 @@ fn signer_chainstate() { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), - parent_block_id: last_tenure_header.parent_block_id.clone(), + consensus_hash: last_tenure_header.consensus_hash, + parent_block_id: last_tenure_header.parent_block_id, tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), timestamp: last_tenure_header.timestamp + 1, @@ -6751,7 +6670,7 @@ fn signer_chainstate() { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6782,9 +6701,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6812,12 +6731,12 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash; let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6848,9 +6767,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6889,7 +6808,7 @@ fn signer_chainstate() { // every step of the return should be linked to the parent let mut prior: Option<&TenureForkingInfo> = None; for step in fork_info.iter().rev() { - if let Some(ref prior) = prior { + if let Some(prior) = prior { assert_eq!(prior.sortition_id, step.parent_sortition_id); } prior = Some(step); @@ -6928,7 +6847,7 @@ fn continue_tenure_extend() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let http_origin = naka_conf.node.data_url.clone(); @@ -6938,15 +6857,12 @@ fn continue_tenure_extend() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut transfer_nonce = 0; @@ -7005,7 +6921,7 @@ fn continue_tenure_extend() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7194,7 +7110,7 @@ fn continue_tenure_extend() { let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == &transfer_tx_hex { + if raw_tx == transfer_tx_hex { transfer_tx_included = true; continue; } @@ -7203,8 +7119,9 @@ fn continue_tenure_extend() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { has_extend = true; tenure_extends.push(parsed); @@ -7215,9 +7132,8 @@ fn continue_tenure_extend() { } tenure_block_founds.push(parsed); } - }, - _ => {} - }; + }; + } } last_block_had_extend = has_extend; } @@ -7242,7 +7158,7 @@ fn continue_tenure_extend() { // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7280,8 +7196,8 @@ fn get_block_times( info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7294,8 +7210,8 @@ fn get_block_times( .unwrap(); let time_now0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-last-time", vec![], @@ -7308,8 +7224,8 @@ fn get_block_times( .unwrap(); let time1_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7322,8 +7238,8 @@ fn get_block_times( .unwrap(); let time1_now_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-last-time", vec![], @@ -7336,8 +7252,8 @@ fn get_block_times( .unwrap(); let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-tenure-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7350,8 +7266,8 @@ fn get_block_times( .unwrap(); let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-block-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7364,8 +7280,8 @@ fn get_block_times( .unwrap(); let time3_now_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-last-tenure-time", vec![], @@ -7432,13 +7348,10 @@ fn check_block_times() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 12, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -7528,7 +7441,7 @@ fn check_block_times() { .unwrap() .expect_u128() .unwrap(); - info!("Time from pre-epoch 3.0: {}", time0); + info!("Time from pre-epoch 3.0: {time0}"); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -7833,13 +7746,10 @@ fn check_block_info() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let contract3_name = "test-contract-3"; @@ -7971,7 +7881,7 @@ fn check_block_info() { blind_signer(&naka_conf, &signers, proposals_submitted); let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); + info!("Info from pre-epoch 3.0: {c0_block_ht_1_pre_3:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8039,7 +7949,7 @@ fn check_block_info() { // one in the tenure) let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = @@ -8062,7 +7972,7 @@ fn check_block_info() { .unwrap(); let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let cur_stacks_block_height = info.stacks_tip_height as u128; let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let cur_tenure_height: u128 = @@ -8381,7 +8291,7 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } -fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { +fn get_expected_reward_for_height(blocks: &[serde_json::Value], block_height: u128) -> u128 { // Find the target block let target_block = blocks .iter() @@ -8468,13 +8378,10 @@ fn check_block_info_rewards() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -8560,7 +8467,7 @@ fn check_block_info_rewards() { blind_signer(&naka_conf, &signers, proposals_submitted); let tuple0 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", tuple0); + info!("Info from pre-epoch 3.0: {tuple0:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8672,7 +8579,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -8705,7 +8612,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); @@ -8797,7 +8704,7 @@ fn mock_mining() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 3; let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers @@ -8819,13 +8726,10 @@ fn mock_mining() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -9009,18 +8913,22 @@ fn mock_mining() { Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) > follower_naka_mined_blocks_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); wait_for(20, || { Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -9046,9 +8954,7 @@ fn mock_mining() { let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( blocks_mock_mined >= tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", - blocks_mock_mined, - tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {blocks_mock_mined}. Expected = {tenure_count}" ); // wait for follower to reach the chain tip @@ -9093,8 +8999,8 @@ fn utxo_check_on_startup_panic() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9169,8 +9075,8 @@ fn utxo_check_on_startup_recover() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9244,10 +9150,10 @@ fn v3_signer_api_endpoint() { let send_amt = 100; let send_fee = 180; conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); - conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events @@ -9273,7 +9179,7 @@ fn v3_signer_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -9407,7 +9313,7 @@ fn skip_mining_long_tx() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.miner.nakamoto_attempt_time_ms = 5_000; @@ -9419,20 +9325,14 @@ fn skip_mining_long_tx() { let send_amt = 1000; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_1_addr.clone()).to_string(), + PrincipalData::from(sender_1_addr).to_string(), send_amt * 15 + send_fee * 15, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_2_addr.clone()).to_string(), - 10000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_2_addr).to_string(), 10000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -9488,7 +9388,7 @@ fn skip_mining_long_tx() { wait_for_first_naka_block_commit(60, &commits_submitted); // submit a long running TX and the transfer TX - let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); + let input_list: Vec<_> = (1..100u64).map(|x| x.to_string()).collect(); let input_list = input_list.join(" "); // Mine a few nakamoto tenures with some interim blocks in them diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d6373a3b444..1f7252ec5f8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -158,7 +158,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd .unwrap() .burnchain .magic_bytes; - assert_eq!(magic_bytes.as_bytes(), &['T' as u8, '2' as u8]); + assert_eq!(magic_bytes.as_bytes(), b"T2"); conf.burnchain.magic_bytes = magic_bytes; conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; @@ -391,7 +391,7 @@ pub mod test_observer { let new_rawtxs = txs .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let mut memtxs = MEMTXS.lock().unwrap(); for new_tx in new_rawtxs { @@ -408,7 +408,7 @@ pub mod test_observer { .unwrap() .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let reason = txs.get("reason").unwrap().as_str().unwrap().to_string(); @@ -622,8 +622,7 @@ pub mod test_observer { // Find indexes in range for which we don't have burn block in set let missing = (start..=end) - .into_iter() - .filter(|i| !burn_block_heights.contains(&i)) + .filter(|i| !burn_block_heights.contains(i)) .collect::>(); if missing.is_empty() { @@ -725,20 +724,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", get_epoch_time_secs(), - current_height ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let Ok(tip_info) = get_chain_info_result(&conf) else { + let Ok(tip_info) = get_chain_info_result(conf) else { sleep_ms(1000); continue; }; @@ -764,15 +762,12 @@ pub fn wait_for_runloop(blocks_processed: &Arc) { pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { let mut current = microblocks_processed.load(Ordering::SeqCst); let start = Instant::now(); - info!("Waiting for next microblock (current = {})", ¤t); + info!("Waiting for next microblock (current = {current})"); loop { let now = microblocks_processed.load(Ordering::SeqCst); if now == 0 && current != 0 { // wrapped around -- a new epoch started - info!( - "New microblock epoch started while waiting (originally {})", - current - ); + info!("New microblock epoch started while waiting (originally {current})"); current = 0; } @@ -781,24 +776,24 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 } if start.elapsed() > Duration::from_secs(timeout) { - warn!("Timed out waiting for microblocks to process ({})", timeout); + warn!("Timed out waiting for microblocks to process ({timeout})"); return false; } thread::sleep(Duration::from_millis(100)); } info!("Next microblock acknowledged"); - return true; + true } /// returns Txid string upon success -pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { +pub fn submit_tx_fallible(http_origin: &str, tx: &[u8]) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(tx.clone()) + .body(tx.to_vec()) .send() .unwrap(); if res.status().is_success() { @@ -817,16 +812,16 @@ pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result) -> String { +pub fn submit_tx(http_origin: &str, tx: &[u8]) -> String { submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { - eprintln!("Submit tx error: {}", e); + eprintln!("Submit tx error: {e}"); panic!(""); }) } pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); + let path = format!("{http_origin}/v2/transactions/unconfirmed/{txid}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -840,14 +835,14 @@ pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { pub fn submit_block( http_origin: &str, consensus_hash: &ConsensusHash, - block: &Vec, + block: &[u8], ) -> StacksBlockAcceptedData { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/upload/{}", http_origin, consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{consensus_hash}"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(block.clone()) + .body(block.to_owned()) .send() .unwrap(); @@ -862,21 +857,21 @@ pub fn submit_block( .block_hash() ) ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); } } -pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash { +pub fn submit_microblock(http_origin: &str, mblock: &[u8]) -> BlockHeaderHash { let client = reqwest::blocking::Client::new(); let microblock = StacksMicroblock::consensus_deserialize(&mut &mblock[..]).unwrap(); - let path = format!("{}/v2/microblocks/{}", http_origin, microblock.block_hash()); + let path = format!("{http_origin}/v2/microblocks/{}", microblock.block_hash()); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(mblock.clone()) + .body(mblock.to_owned()) .send() .unwrap(); @@ -888,7 +883,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash .unwrap() .block_hash() ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -897,7 +892,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash pub fn get_block(http_origin: &str, block_id: &StacksBlockId) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", http_origin, block_id); + let path = format!("{http_origin}/v2/blocks/{block_id}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -939,7 +934,7 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { // get the associated anchored block let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", &http_origin, &stacks_id_tip); + let path = format!("{http_origin}/v2/blocks/{stacks_id_tip}"); let block_bytes = client.get(&path).send().unwrap().bytes().unwrap(); let block = StacksBlock::consensus_deserialize(&mut block_bytes.as_ref()).unwrap(); @@ -972,10 +967,7 @@ pub fn call_read_only( info!("Call read only: {contract}.{function}({args:?})"); - let path = format!( - "{http_origin}/v2/contracts/call-read/{}/{}/{}", - principal, contract, function - ); + let path = format!("{http_origin}/v2/contracts/call-read/{principal}/{contract}/{function}"); let serialized_args = args .iter() @@ -1005,14 +997,13 @@ fn find_microblock_privkey( let mut keychain = Keychain::default(conf.node.seed.clone()); for ix in 0..max_tries { // the first rotation occurs at 203. - let privk = - keychain.make_microblock_secret_key(203 + ix, &((203 + ix) as u64).to_be_bytes()); + let privk = keychain.make_microblock_secret_key(203 + ix, &(203 + ix).to_be_bytes()); let pubkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(&privk)); if pubkh == *pubkey_hash { return Some(privk); } } - return None; + None } /// Returns true iff `b` is within `0.1%` of `a`. @@ -1088,7 +1079,7 @@ fn bitcoind_integration_test() { .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) .collect(); assert!( - burn_blocks_with_burns.len() >= 1, + !burn_blocks_with_burns.is_empty(), "Burn block sortitions {} should be >= 1", burn_blocks_with_burns.len() ); @@ -1096,7 +1087,7 @@ fn bitcoind_integration_test() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1184,7 +1175,7 @@ fn confirm_unparsed_ongoing_ops() { bitcoin_regtest_controller::TEST_MAGIC_BYTES .lock() .unwrap() - .replace(['Z' as u8, 'Z' as u8]); + .replace([b'Z', b'Z']); // let's trigger another mining loop: this should create an invalid block commit. // this bitcoin block will contain the valid commit created before (so, a second stacks block) @@ -1209,7 +1200,7 @@ fn confirm_unparsed_ongoing_ops() { // query the miner's account nonce - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1305,9 +1296,9 @@ fn most_recent_utxo_integration_test() { let smallest_utxo = smallest_utxo.unwrap(); let mut biggest_utxo = biggest_utxo.unwrap(); - eprintln!("Last-spent UTXO is {:?}", &last_utxo); - eprintln!("Smallest UTXO is {:?}", &smallest_utxo); - eprintln!("Biggest UTXO is {:?}", &biggest_utxo); + eprintln!("Last-spent UTXO is {last_utxo:?}"); + eprintln!("Smallest UTXO is {smallest_utxo:?}"); + eprintln!("Biggest UTXO is {biggest_utxo:?}"); assert_eq!(last_utxo, smallest_utxo); assert_ne!(biggest_utxo, last_utxo); @@ -1354,9 +1345,9 @@ pub fn get_account_result( account: &F, ) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); + let path = format!("{http_origin}/v2/accounts/{account}?proof=0"); let res = client.get(&path).send()?.json::()?; - info!("Account response: {:#?}", res); + info!("Account response: {res:#?}"); Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), @@ -1371,19 +1362,19 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco pub fn get_neighbors(conf: &Config) -> Option { let client = reqwest::blocking::Client::new(); let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{}/v2/neighbors", http_origin); + let path = format!("{http_origin}/v2/neighbors"); client.get(&path).send().ok()?.json().ok() } pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/pox", http_origin); + let path = format!("{http_origin}/v2/pox"); client.get(&path).send().ok()?.json::().ok() } fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1404,7 +1395,7 @@ fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { fn get_chain_tip_height(http_origin: &str) -> u64 { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1427,10 +1418,8 @@ pub fn get_contract_src( } else { "".to_string() }; - let path = format!( - "{}/v2/contracts/source/{}/{}{}", - http_origin, contract_addr, contract_name, query_string - ); + let path = + format!("{http_origin}/v2/contracts/source/{contract_addr}/{contract_name}{query_string}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -1848,7 +1837,7 @@ fn lockup_integration() { } } } - assert_eq!(found, true); + assert!(found); // block #2 won't unlock STX next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1882,7 +1871,7 @@ fn stx_transfer_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -1892,7 +1881,7 @@ fn stx_transfer_btc_integration_test() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -1948,7 +1937,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -1975,8 +1964,8 @@ fn stx_transfer_btc_integration_test() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -1986,7 +1975,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -2017,7 +2006,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2046,8 +2035,8 @@ fn stx_transfer_btc_integration_test() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -2057,7 +2046,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -2111,7 +2100,7 @@ fn stx_delegate_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); @@ -2119,11 +2108,7 @@ fn stx_delegate_btc_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2132,7 +2117,7 @@ fn stx_delegate_btc_integration_test() { amount: 100300, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: 300, }); @@ -2226,7 +2211,7 @@ fn stx_delegate_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2252,8 +2237,8 @@ fn stx_delegate_btc_integration_test() { // let's fire off our delegate op. let del_stx_op = DelegateStxOp { - sender: spender_stx_addr.clone(), - delegate_to: recipient_addr.clone(), + sender: spender_stx_addr, + delegate_to: recipient_addr, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -2264,7 +2249,7 @@ fn stx_delegate_btc_integration_test() { until_burn_height: None, }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -2298,7 +2283,7 @@ fn stx_delegate_btc_integration_test() { Value::Principal(spender_addr.clone()), Value::UInt(100_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -2372,7 +2357,7 @@ fn stack_stx_burn_op_test() { let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr_1: StacksAddress = to_addr(&spender_sk_1); - let spender_addr_1: PrincipalData = spender_stx_addr_1.clone().into(); + let spender_addr_1: PrincipalData = spender_stx_addr_1.into(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); @@ -2390,7 +2375,7 @@ fn stack_stx_burn_op_test() { amount: first_bal, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: second_bal, }); @@ -2506,8 +2491,8 @@ fn stack_stx_burn_op_test() { info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - let signer_sk_1 = spender_sk_1.clone(); - let signer_sk_2 = spender_sk_2.clone(); + let signer_sk_1 = spender_sk_1; + let signer_sk_2 = spender_sk_2; let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); let pox_addr = PoxAddress::Standard(spender_stx_addr_1, Some(AddressHashMode::SerializeP2PKH)); @@ -2540,7 +2525,7 @@ fn stack_stx_burn_op_test() { let mut miner_signer_2 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_2 = PreStxOp { - output: spender_stx_addr_2.clone(), + output: spender_stx_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2619,13 +2604,13 @@ fn stack_stx_burn_op_test() { // `stacked_ustx` should be large enough to avoid ERR_STACKING_THRESHOLD_NOT_MET from Clarity let stack_stx_op_with_some_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_1.clone(), + sender: spender_stx_addr_1, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, signer_key: Some(signer_key), max_amount: Some(u128::MAX), - auth_id: Some(auth_id.into()), + auth_id: Some(auth_id), // to be filled in vtxindex: 0, txid: Txid([0u8; 32]), @@ -2633,7 +2618,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); + let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); assert!( btc_regtest_controller .submit_operation( @@ -2647,7 +2632,7 @@ fn stack_stx_burn_op_test() { ); let stack_stx_op_with_no_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_2.clone(), + sender: spender_stx_addr_2, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, @@ -2661,7 +2646,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); assert!( btc_regtest_controller .submit_operation( @@ -2740,7 +2725,7 @@ fn stack_stx_burn_op_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -2775,17 +2760,13 @@ fn vote_for_aggregate_key_burn_op_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let _pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let _pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2912,7 +2893,7 @@ fn vote_for_aggregate_key_burn_op_test() { // setup stack-stx tx - let signer_sk = spender_sk.clone(); + let signer_sk = spender_sk; let signer_pk = StacksPublicKey::from_private(&signer_sk); let pox_addr = PoxAddress::Standard(spender_stx_addr, Some(AddressHashMode::SerializeP2PKH)); @@ -2959,7 +2940,7 @@ fn vote_for_aggregate_key_burn_op_test() { let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3012,7 +2993,7 @@ fn vote_for_aggregate_key_burn_op_test() { BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: spender_stx_addr.clone(), + sender: spender_stx_addr, round: 0, reward_cycle, aggregate_key, @@ -3023,7 +3004,7 @@ fn vote_for_aggregate_key_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3251,16 +3232,16 @@ fn bitcoind_forking_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); @@ -3355,17 +3336,17 @@ fn should_fix_2771() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // okay, let's figure out the burn block we want to fork away. let reorg_height = 208; - warn!("Will trigger re-org at block {}", reorg_height); + warn!("Will trigger re-org at block {reorg_height}"); let burn_header_hash_to_fork = btc_regtest_controller.get_block_hash(reorg_height); btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(1); @@ -3407,10 +3388,10 @@ fn make_signed_microblock( version: rng.gen(), sequence: seq, prev_block: parent_block, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }, - txs: txs, + txs, }; mblock.sign(block_privk).unwrap(); mblock @@ -3585,9 +3566,8 @@ fn microblock_fork_poison_integration_test() { make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); eprintln!( - "Created second conflicting microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second conflicting microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3598,7 +3578,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3616,7 +3596,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3737,7 +3717,7 @@ fn microblock_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -3854,9 +3834,8 @@ fn microblock_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); /* let second_microblock = @@ -3869,9 +3848,8 @@ fn microblock_integration_test() { 1, ); eprintln!( - "Created second microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3882,7 +3860,7 @@ fn microblock_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3894,7 +3872,7 @@ fn microblock_integration_test() { assert_eq!(res, format!("{}", &first_microblock.block_hash())); - eprintln!("\n\nBegin testing\nmicroblock: {:?}\n\n", &first_microblock); + eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 1); @@ -3906,7 +3884,7 @@ fn microblock_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -4090,13 +4068,11 @@ fn microblock_integration_test() { // we can query unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); - eprintln!("{:?}", &path); + eprintln!("{path:?}"); let mut iter_count = 0; let res = loop { @@ -4107,7 +4083,7 @@ fn microblock_integration_test() { match http_resp.json::() { Ok(x) => break x, Err(e) => { - warn!("Failed to query {}; will try again. Err = {:?}", &path, e); + warn!("Failed to query {path}; will try again. Err = {e:?}"); iter_count += 1; assert!(iter_count < 10, "Retry limit reached querying account"); sleep_ms(1000); @@ -4116,17 +4092,14 @@ fn microblock_integration_test() { }; }; - info!("Account Response = {:#?}", res); + info!("Account Response = {res:#?}"); assert_eq!(res.nonce, 2); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); // limited by chaining for next_nonce in 2..5 { // verify that the microblock miner can automatically pick up transactions - debug!( - "Try to send unconfirmed tx from {} to {} nonce {}", - &spender_addr, &recipient, next_nonce - ); + debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( &spender_sk, next_nonce, @@ -4136,14 +4109,14 @@ fn microblock_integration_test() { 1000, ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(unconfirmed_tx_bytes.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -4153,7 +4126,7 @@ fn microblock_integration_test() { .txid() .to_string() ); - eprintln!("Sent {}", &res); + eprintln!("Sent {res}"); } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -4171,15 +4144,13 @@ fn microblock_integration_test() { // we can query _new_ unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); let res_text = client.get(&path).send().unwrap().text().unwrap(); - eprintln!("text of {}\n{}", &path, &res_text); + eprintln!("text of {path}\n{res_text}"); let res = client .get(&path) @@ -4187,8 +4158,8 @@ fn microblock_integration_test() { .unwrap() .json::() .unwrap(); - eprintln!("{:?}", &path); - eprintln!("{:#?}", res); + eprintln!("{path:?}"); + eprintln!("{res:#?}"); // advanced! assert_eq!(res.nonce, next_nonce + 1); @@ -4209,10 +4180,7 @@ fn filter_low_fee_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4232,7 +4200,7 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4242,7 +4210,7 @@ fn filter_low_fee_tx_integration_test() { } else { // high-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 2000 + (ix as u64), conf.burnchain.chain_id, @@ -4296,14 +4264,9 @@ fn filter_low_fee_tx_integration_test() { // First five accounts have a transaction. The miner will consider low fee transactions, // but rank by estimated fee rate. - for i in 0..5 { - let account = get_account(&http_origin, &spender_addrs[i]); - assert_eq!(account.nonce, 1); - } - - // last five accounts have transaction - for i in 5..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); assert_eq!(account.nonce, 1); } @@ -4317,10 +4280,7 @@ fn filter_long_runtime_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4341,7 +4301,7 @@ fn filter_long_runtime_tx_integration_test() { .map(|(ix, spender_sk)| { let recipient = StacksAddress::from_string(ADDR_4).unwrap(); make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4393,8 +4353,8 @@ fn filter_long_runtime_tx_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // no transactions mined - for i in 0..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 0); } @@ -4517,10 +4477,7 @@ fn size_check_integration_test() { giant_contract.push(' '); } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4652,13 +4609,10 @@ fn size_check_integration_test() { panic!("Spender address nonce incremented past 1"); } - debug!("Spender {},{}: {:?}", ix, &spender_addr, &res); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - eprintln!( - "anchor_block_txs: {}, micro_block_txs: {}", - anchor_block_txs, micro_block_txs - ); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); if anchor_block_txs >= 2 && micro_block_txs >= 2 { break; @@ -4693,10 +4647,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4723,7 +4674,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { i as u64, 1100000, conf.burnchain.chain_id, - &format!("small-{}", i), + &format!("small-{i}"), &small_contract, ); ret.push(tx); @@ -4849,10 +4800,10 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_per_block += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -4868,8 +4819,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}, total_big_txs_per_block: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, max_big_txs_per_block, total_big_txs_per_block, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert!(max_big_txs_per_block > 0); @@ -4902,10 +4852,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..20) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4930,15 +4877,14 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let txs: Vec<_> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 600000, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5049,7 +4995,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5061,8 +5007,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 5); @@ -5090,10 +5035,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..25) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5115,15 +5057,14 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let txs: Vec> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 1149230, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5222,7 +5163,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5234,8 +5175,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 3); @@ -5252,13 +5192,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { return; } - let spender_sks: Vec<_> = (0..4) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = - spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5292,7 +5228,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5336,7 +5272,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { (begin (crash-me \"{}\")) ", - &format!("large-contract-{}-{}", &spender_addrs_c32[ix], &ix) + &format!("large-contract-{}-{ix}", &spender_addrs_c32[ix]) ) )] } else { @@ -5347,7 +5283,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { i as u64, 210000, conf.burnchain.chain_id, - &format!("small-{}-{}", ix, i), + &format!("small-{ix}-{i}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5390,7 +5326,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) (begin (crash-me \"{}\")) - ", &format!("small-contract-{}-{}-{}", &spender_addrs_c32[ix], &ix, i)) + ", &format!("small-contract-{}-{ix}-{i}", &spender_addrs_c32[ix])) ); ret.push(tx); } @@ -5486,7 +5422,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let mut total_big_txs_in_microblocks = 0; for block in blocks { - eprintln!("block {:?}", &block); + eprintln!("block {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); let mut num_big_anchored_txs = 0; @@ -5499,12 +5435,12 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {:?}", &parsed); + eprintln!("tx: {parsed:?}"); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_in_microblocks += 1; } @@ -5520,12 +5456,10 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } info!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}", - max_big_txs_per_microblock, max_big_txs_per_block + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" ); info!( - "total_big_txs_in_microblocks: {}, total_big_txs_in_blocks: {}", - total_big_txs_in_microblocks, total_big_txs_in_blocks + "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" ); // at most one big tx per block and at most one big tx per stream, always. @@ -5605,7 +5539,7 @@ fn block_replay_integration_test() { // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -5638,7 +5572,7 @@ fn block_replay_integration_test() { tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); for i in 0..1024 { - let path = format!("{}/v2/blocks/upload/{}", &http_origin, &tip_consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); let res_text = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -5648,7 +5582,7 @@ fn block_replay_integration_test() { .text() .unwrap(); - eprintln!("{}: text of {}\n{}", i, &path, &res_text); + eprintln!("{i}: text of {path}\n{res_text}"); } test_observer::clear(); @@ -6022,11 +5956,11 @@ fn mining_events_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); conf.initial_balances.push(InitialBalance { - address: addr_2.clone().into(), + address: addr_2.into(), amount: 10000000, }); @@ -6121,7 +6055,7 @@ fn mining_events_integration_test() { // check mined microblock events let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(mined_microblock_events.len() >= 1); + assert!(!mined_microblock_events.is_empty()); // check tx events in the first microblock // 1 success: 1 contract publish, 2 error (on chain transactions) @@ -6136,15 +6070,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &620000); assert_eq!( execution_cost, @@ -6176,15 +6107,12 @@ fn mining_events_integration_test() { txid.to_string(), "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" ); - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); } _ => panic!("unexpected event type"), } @@ -6197,15 +6125,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &600000); assert_eq!( execution_cost, @@ -6304,7 +6229,7 @@ fn block_limit_hit_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6432,8 +6357,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_third_block.len(), 3); let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_fourth_block = mined_block_events[4] .get("transactions") @@ -6443,8 +6368,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_fourth_block.len(), 3); let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6516,7 +6441,7 @@ fn microblock_limit_hit_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6656,10 +6581,7 @@ fn microblock_limit_hit_integration_test() { let txid_3 = submit_tx(&http_origin, &tx_3); let txid_4 = submit_tx(&http_origin, &tx_4); - eprintln!( - "transactions: {},{},{},{}", - &txid_1, &txid_2, &txid_3, &txid_4 - ); + eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); sleep_ms(50_000); @@ -6702,8 +6624,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_first_mblock.len(), 2); let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_second_mblock = mined_mblock_events[1] .get("transactions") @@ -6713,8 +6635,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_second_mblock.len(), 2); let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6761,7 +6683,7 @@ fn block_large_tx_integration_test() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000, }); @@ -6837,10 +6759,7 @@ fn block_large_tx_integration_test() { let normal_txid = submit_tx(&http_origin, &tx); let huge_txid = submit_tx(&http_origin, &tx_2); - eprintln!( - "Try to mine a too-big tx. Normal = {}, TooBig = {}", - &normal_txid, &huge_txid - ); + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); eprintln!("Finished trying to mine a too-big tx"); @@ -6848,7 +6767,7 @@ fn block_large_tx_integration_test() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6898,7 +6817,7 @@ fn microblock_large_tx_integration_test_FLAKY() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); @@ -6981,7 +6900,7 @@ fn microblock_large_tx_integration_test_FLAKY() { // Check that the microblock contains the first tx. let microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 1); + assert!(!microblock_events.is_empty()); let microblock = microblock_events[0].clone(); let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -6994,7 +6913,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -7020,18 +6939,10 @@ fn pox_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); - let pox_2_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_2_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); let pox_2_address = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Testnet, @@ -7145,15 +7056,12 @@ fn pox_integration_test() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); @@ -7191,7 +7099,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7205,14 +7113,14 @@ fn pox_integration_test() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // now let's mine until the next reward cycle starts ... while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -7220,16 +7128,13 @@ fn pox_integration_test() { .block_height_to_reward_cycle(sort_height) .expect("Expected to be able to get reward cycle"); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7281,8 +7186,7 @@ fn pox_integration_test() { // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", - &spender_addr)); + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); tested = true; } } @@ -7307,7 +7211,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7331,7 +7235,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7348,20 +7252,17 @@ fn pox_integration_test() { while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7403,19 +7304,16 @@ fn pox_integration_test() { while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); @@ -7472,11 +7370,11 @@ fn pox_integration_test() { assert_eq!(recipient_slots.len(), 2); assert_eq!( - recipient_slots.get(&format!("{}", &pox_2_address)).cloned(), + recipient_slots.get(&format!("{pox_2_address}")).cloned(), Some(7u64) ); assert_eq!( - recipient_slots.get(&format!("{}", &pox_1_address)).cloned(), + recipient_slots.get(&format!("{pox_1_address}")).cloned(), Some(7u64) ); @@ -7490,7 +7388,7 @@ fn pox_integration_test() { while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // get the canonical chain tip @@ -7513,7 +7411,7 @@ fn pox_integration_test() { while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); @@ -7661,7 +7559,7 @@ fn atlas_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -7677,14 +7575,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7759,14 +7657,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_2.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7810,14 +7708,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -7830,7 +7728,7 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Then check that the follower is correctly replicating the attachment @@ -7852,27 +7750,23 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -7944,20 +7838,16 @@ fn atlas_integration_test() { // Now wait for the node to sync the attachment let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = "facade00"; let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { eprintln!("Success syncing attachment - {}", res.text().unwrap()); attachments_did_sync = true; @@ -7966,7 +7856,7 @@ fn atlas_integration_test() { if timeout == 0 { panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } - eprintln!("Attachment {} not sync'd yet", zonefile_hex); + eprintln!("Attachment {zonefile_hex} not sync'd yet"); thread::sleep(Duration::from_millis(1000)); } } @@ -7980,7 +7870,7 @@ fn atlas_integration_test() { let namespace = "passport"; for i in 1..10 { let user = StacksPrivateKey::new(); - let zonefile_hex = format!("facade0{}", i); + let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); let name = format!("johndoe{}", i); let tx = make_contract_call( @@ -8007,14 +7897,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8040,20 +7930,16 @@ fn atlas_integration_test() { for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -8072,7 +7958,7 @@ fn atlas_integration_test() { // Ensure that we the attached sidecar was able to receive a total of 10 attachments // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(test_observer::get_attachments().len() > 0); + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); channel.stop_chains_coordinator(); @@ -8122,8 +8008,8 @@ fn antientropy_integration_test() { // Prepare the config of the follower node let (mut conf_follower_node, _) = neon_integration_test_conf(); let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind ); conf_follower_node.connection_options.disable_block_download = true; conf_follower_node.node.set_bootstrap_nodes( @@ -8195,10 +8081,10 @@ fn antientropy_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); for i in 0..(target_height - 3) { - eprintln!("Mine block {}", i); + eprintln!("Mine block {i}"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Let's setup the follower now. @@ -8214,11 +8100,11 @@ fn antientropy_integration_test() { println!("Follower has finished"); } Ok(x) => { - println!("Follower gave a bad signal: {:?}", &x); + println!("Follower gave a bad signal: {x:?}"); panic!(); } Err(e) => { - println!("Failed to recv: {:?}", &e); + println!("Failed to recv: {e:?}"); panic!(); } }; @@ -8255,8 +8141,7 @@ fn antientropy_integration_test() { let mut sort_height = channel.get_sortitions_processed(); while sort_height < (target_height + 200) as u64 { eprintln!( - "Follower sortition is {}, target is {}", - sort_height, + "Follower sortition is {sort_height}, target is {}", target_height + 200 ); wait_for_runloop(&blocks_processed); @@ -8269,8 +8154,7 @@ fn antientropy_integration_test() { // wait for block height to reach target let mut tip_height = get_chain_tip_height(&http_origin); eprintln!( - "Follower Stacks tip height is {}, wait until {} >= {} - 3", - tip_height, tip_height, target_height + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( @@ -8285,7 +8169,7 @@ fn antientropy_integration_test() { sleep_ms(1000); tip_height = get_chain_tip_height(&http_origin); - eprintln!("Follower Stacks tip height is {}", tip_height); + eprintln!("Follower Stacks tip height is {tip_height}"); if burnchain_deadline < get_epoch_time_secs() { burnchain_deadline = get_epoch_time_secs() + 60; @@ -8304,12 +8188,13 @@ fn antientropy_integration_test() { channel.stop_chains_coordinator(); } +#[allow(clippy::too_many_arguments)] fn wait_for_mined( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, http_origin: &str, users: &[StacksPrivateKey], - account_before_nonces: &Vec, + account_before_nonces: &[u64], batch_size: usize, batches: usize, index_block_hashes: &mut Vec, @@ -8318,7 +8203,7 @@ fn wait_for_mined( let mut account_after_nonces = vec![0; batches * batch_size]; let mut all_mined = false; for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); sleep_ms(10_000); let (ch, bhh) = get_chain_tip(http_origin); @@ -8327,29 +8212,28 @@ fn wait_for_mined( if let Some(last_ibh) = index_block_hashes.last() { if *last_ibh != ibh { index_block_hashes.push(ibh); - eprintln!("Tip is now {}", &ibh); + eprintln!("Tip is now {ibh}"); } } for j in 0..batches * batch_size { - let account_after = get_account(&http_origin, &to_addr(&users[j])); + let account_after = get_account(http_origin, &to_addr(&users[j])); let account_after_nonce = account_after.nonce; account_after_nonces[j] = account_after_nonce; - if account_before_nonces[j] + 1 <= account_after_nonce { + if account_before_nonces[j] < account_after_nonce { all_mined_vec[j] = true; } } - all_mined = all_mined_vec.iter().fold(true, |acc, elem| acc && *elem); + all_mined = all_mined_vec.iter().all(|elem| *elem); if all_mined { break; } } if !all_mined { eprintln!( - "Failed to mine all transactions: nonces = {:?}, expected {:?} + {}", - &account_after_nonces, account_before_nonces, batch_size + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" ); panic!(); } @@ -8450,7 +8334,7 @@ fn atlas_stress_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -8466,14 +8350,14 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -8548,7 +8432,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8626,14 +8510,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8660,8 +8544,7 @@ fn atlas_stress_integration_test() { } if !all_mined { eprintln!( - "Failed to mine all transactions: nonce = {}, expected {}", - account_after_nonce, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", account_before.nonce + (batch_size as u64) ); panic!(); @@ -8682,14 +8565,14 @@ fn atlas_stress_integration_test() { &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_4.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8723,7 +8606,7 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let fqn = format!("janedoe{}.passport", j); + let fqn = format!("janedoe{j}.passport"); let fqn_bytes = fqn.as_bytes().to_vec(); let salt = format!("{:04x}", j); let salt_bytes = salt.as_bytes().to_vec(); @@ -8746,7 +8629,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8755,9 +8638,8 @@ fn atlas_stress_integration_test() { .unwrap(); eprintln!( - "sent preorder for {}:\n{:#?}", - &to_addr(&users[batches * batch_size + j]), - res + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) ); if !res.status().is_success() { panic!(""); @@ -8784,10 +8666,10 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let salt = format!("{:04x}", j); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - let zonefile_hex = format!("facade01{:04x}", j); + let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8816,14 +8698,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8850,8 +8732,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade02{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8879,14 +8761,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8913,8 +8795,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade03{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8945,14 +8827,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8984,8 +8866,8 @@ fn atlas_stress_integration_test() { &[ibh], ) .unwrap(); - if indexes.len() > 0 { - attachment_indexes.insert(ibh.clone(), indexes.clone()); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); } for index in indexes.iter() { @@ -8995,14 +8877,14 @@ fn atlas_stress_integration_test() { params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); - if hashes.len() > 0 { + if !hashes.is_empty() { assert_eq!(hashes.len(), 1); - attachment_hashes.insert((ibh.clone(), *index), hashes.pop()); + attachment_hashes.insert((*ibh, *index), hashes.pop()); } } } } - eprintln!("attachment_indexes = {:?}", &attachment_indexes); + eprintln!("attachment_indexes = {attachment_indexes:?}"); let max_request_time_ms = 100; @@ -9017,12 +8899,10 @@ fn atlas_stress_integration_test() { ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] .to_vec(); let path = format!( - "{}/v2/attachments/inv?index_block_hash={}&pages_indexes={}", - &http_origin, - ibh, + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", attachments_batch .iter() - .map(|a| format!("{}", &a)) + .map(|a| format!("{a}")) .collect::>() .join(",") ); @@ -9034,40 +8914,34 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!( - "attachment inv response for {}: {:?}", - &path, &attachment_inv_response - ); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } - for i in 0..l { - if attachments[i] == 0 { + for attachment in attachments.iter().take(l) { + if *attachment == 0 { continue; } let content_hash = attachment_hashes - .get(&(*ibh, attachments[i])) + .get(&(*ibh, *attachment)) .cloned() .unwrap() .unwrap(); - let path = format!("{}/v2/attachments/{}", &http_origin, &content_hash); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); let attempts = 10; let ts_begin = get_epoch_time_ms(); @@ -9076,26 +8950,20 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!( - "attachment response for {}: {:?}", - &path, &attachment_response - ); + eprintln!("attachment response for {path}: {attachment_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } } @@ -9129,8 +8997,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value (unwrap! (increment) (err u1)) (unwrap! (increment) (err u1)) (ok (var-get counter)))) - "# - .to_string(); + "#; let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); @@ -9144,7 +9011,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.estimation.fee_rate_window_size = window_size; conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000000, }); test_observer::spawn(); @@ -9181,7 +9048,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value 110000, conf.burnchain.chain_id, "increment-contract", - &max_contract_src, + max_contract_src, ), ); run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); @@ -9198,7 +9065,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value i, // nonce i * 100000, // payment conf.burnchain.chain_id, - &spender_addr.into(), + &spender_addr, "increment-contract", "increment-many", &[], @@ -9213,12 +9080,12 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value { // Read from the fee estimation endpoin. - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr.clone().into(), - contract_name: ContractName::try_from("increment-contract").unwrap(), - function_name: ClarityName::try_from("increment-many").unwrap(), + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), function_args: vec![], }); @@ -9255,8 +9122,8 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value let last_cost = response_estimated_costs[i - 1]; assert_eq!(curr_cost, last_cost); - let curr_rate = response_top_fee_rates[i] as f64; - let last_rate = response_top_fee_rates[i - 1] as f64; + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; assert!(curr_rate >= last_rate); } @@ -9438,7 +9305,7 @@ fn use_latest_tip_integration_test() { let client = reqwest::blocking::Client::new(); // Post the microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -9452,7 +9319,7 @@ fn use_latest_tip_integration_test() { // Wait for the microblock to be accepted sleep_ms(5_000); - let path = format!("{}/v2/info", &http_origin); + let path = format!("{http_origin}/v2/info"); let mut iter_count = 0; loop { let tip_info = client @@ -9594,26 +9461,26 @@ fn test_flash_block_skip_tenure() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); for i in 0..10 { // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{}", i); + eprintln!("Build bitcoin block +{i}"); btc_regtest_controller.build_next_block(1); sleep_ms(10000); } // at least one tenure was skipped let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {} tenures", &num_skipped); + eprintln!("Skipped {num_skipped} tenures"); assert!(num_skipped > 1); // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); - eprintln!("account = {:?}", &account); + eprintln!("account = {account:?}"); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 2); @@ -9696,15 +9563,15 @@ fn test_problematic_txs_are_not_stored() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: spender_addr_1.clone(), + address: spender_addr_1, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_2.clone(), + address: spender_addr_2, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_3.clone(), + address: spender_addr_3, amount: 1_000_000_000_000, }); @@ -9772,7 +9639,7 @@ fn test_problematic_txs_are_not_stored() { let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -9790,7 +9657,7 @@ fn test_problematic_txs_are_not_stored() { let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -9840,25 +9707,24 @@ fn test_problematic_txs_are_not_stored() { fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { let dirpp = Path::new(dirp); - debug!("readdir {}", dirp); + debug!("readdir {dirp}"); let cur_files = fs::read_dir(dirp).unwrap(); let mut new_files = vec![]; let mut cur_files_set = HashSet::new(); for cur_file in cur_files.into_iter() { let cur_file = cur_file.unwrap(); let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {}: {}", dirp, &cur_file_fullpath); + test_debug!("file in {dirp}: {cur_file_fullpath}"); cur_files_set.insert(cur_file_fullpath.clone()); if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {}", &cur_file_fullpath); + test_debug!("already contains {cur_file_fullpath}"); continue; } - test_debug!("new file {}", &cur_file_fullpath); + test_debug!("new file {cur_file_fullpath}"); new_files.push(cur_file_fullpath); } debug!( - "Checked {} for new files; found {} (all: {})", - dirp, + "Checked {dirp} for new files; found {} (all: {})", new_files.len(), cur_files_set.len() ); @@ -9894,8 +9760,7 @@ fn spawn_follower_node( conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = - initial_conf.burnchain.ast_precheck_size_height.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; conf.connection_options.inv_sync_interval = 3; @@ -9923,12 +9788,12 @@ fn test_problematic_blocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10000,7 +9865,7 @@ fn test_problematic_blocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10018,7 +9883,7 @@ fn test_problematic_blocks_are_not_mined() { let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10054,20 +9919,11 @@ fn test_problematic_blocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10116,7 +9972,7 @@ fn test_problematic_blocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10125,31 +9981,25 @@ fn test_problematic_blocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); btc_regtest_controller.build_next_block(1); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10162,7 +10012,7 @@ fn test_problematic_blocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10185,10 +10035,8 @@ fn test_problematic_blocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no block contained the tx_high bad transaction, ever let blocks = test_observer::get_blocks(); @@ -10233,8 +10081,7 @@ fn test_problematic_blocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10278,12 +10125,12 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10355,7 +10202,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10372,7 +10219,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10408,20 +10255,11 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10470,7 +10308,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10484,14 +10322,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { loop { sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10510,7 +10348,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10521,23 +10359,17 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10549,7 +10381,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10616,10 +10448,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { break; } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); sleep_ms(1000); } @@ -10627,8 +10457,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10669,12 +10498,12 @@ fn test_problematic_microblocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10752,7 +10581,7 @@ fn test_problematic_microblocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -10767,11 +10596,10 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -10807,24 +10635,12 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10876,7 +10692,7 @@ fn test_problematic_microblocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10885,39 +10701,27 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_high transaction {}", - &tx_high_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!( - "Mined block after submitting problematic tx_high transaction {}", - &tx_high_txid - ); + info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10930,7 +10734,7 @@ fn test_problematic_microblocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some microblocks, and log problematic microblocks for _i in 0..6 { @@ -10956,10 +10760,8 @@ fn test_problematic_microblocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no microblock contained the tx_high bad transaction, ever let microblocks = test_observer::get_microblocks(); @@ -11004,8 +10806,7 @@ fn test_problematic_microblocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11049,12 +10850,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -11134,7 +10935,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -11149,11 +10950,10 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .txid(); // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -11189,20 +10989,11 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -11254,7 +11045,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -11267,14 +11058,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11293,7 +11084,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11304,24 +11095,18 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic microblocks for _i in 0..6 { @@ -11333,7 +11118,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11356,7 +11141,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high - assert!(all_new_files.len() >= 1); + assert!(!all_new_files.is_empty()); // tx_high got mined by the miner let microblocks = test_observer::get_microblocks(); @@ -11381,8 +11166,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .split("0x") .collect(); let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{}'", &bad_block_id_hex); - Some(StacksBlockId::from_hex(&bad_block_id_hex).unwrap()) + debug!("bad_block_id_hex = '{bad_block_id_hex}'"); + Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) }; } } @@ -11420,8 +11205,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11570,9 +11354,8 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st let full_iters_code = full_iters_code_parts.join("\n "); - let iters_mod_code_parts: Vec = (0..iters_mod) - .map(|cnt| format!("0x{:0>2x}", cnt)) - .collect(); + let iters_mod_code_parts: Vec = + (0..iters_mod).map(|cnt| format!("0x{cnt:0>2x}")).collect(); let iters_mod_code = format!("(list {})", iters_mod_code_parts.join(" ")); @@ -11599,7 +11382,7 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st (define-private (crash-me-folder (input (buff 1)) (ctr uint)) (begin ;; full_iters_code - {} + {full_iters_code} (+ u1 ctr) ) ) @@ -11608,20 +11391,17 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st ;; call index-of (iters_256 * 256) times (fold crash-me-folder BUFF_TO_BYTE u0) ;; call index-of iters_mod times - (fold crash-me-folder {} u0) + (fold crash-me-folder {iters_mod_code} u0) (print name) (ok u0) ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-{nonce}-{addr_prefix}-{num_index_of}\")) ", - full_iters_code, - iters_mod_code, - &format!("large-{}-{}-{}", nonce, &addr_prefix, num_index_of) ); - eprintln!("{}", &code); + eprintln!("{code}"); code } @@ -11636,13 +11416,14 @@ pub fn make_expensive_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; + let num_index_of = 256; for nonce in 0..25 { let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, 256); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{num_index_of}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11650,7 +11431,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) } else { make_contract_publish( @@ -11659,7 +11440,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) }; chain.push(tx); @@ -11673,7 +11454,7 @@ pub fn make_random_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11689,8 +11470,8 @@ pub fn make_random_tx_chain( let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11716,7 +11497,7 @@ pub fn make_random_tx_chain( } fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11732,7 +11513,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{}-{}-{}", nonce, &addr_prefix, random_iters); + let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); eprintln!("Make tx {}", &contract_name); let tx = make_contract_publish_microblock_only( privk, @@ -11758,10 +11539,7 @@ fn test_competing_miners_build_on_same_chain( return; } - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11808,9 +11586,8 @@ fn test_competing_miners_build_on_same_chain( confs[i].node.set_bootstrap_nodes( format!( - "{}@{}", + "{}@{p2p_bind}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind ), chain_id, peer_version, @@ -11818,8 +11595,8 @@ fn test_competing_miners_build_on_same_chain( } // use long reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 100; let prepare_phase_len = 20; let pox_constants = PoxConstants::new( @@ -11856,10 +11633,10 @@ fn test_competing_miners_build_on_same_chain( btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -11879,8 +11656,8 @@ fn test_competing_miners_build_on_same_chain( let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -11888,7 +11665,7 @@ fn test_competing_miners_build_on_same_chain( loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 1: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 1: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -11898,23 +11675,19 @@ fn test_competing_miners_build_on_same_chain( next_block_and_wait(&mut btc_regtest_controller, &blocks_processed[0]); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -11938,7 +11711,7 @@ fn test_competing_miners_build_on_same_chain( let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -11948,7 +11721,7 @@ fn test_competing_miners_build_on_same_chain( // mine quickly -- see if we can induce flash blocks for i in 0..1000 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); } @@ -12023,10 +11796,7 @@ fn microblock_miner_multiple_attempts() { conf.burnchain.max_rbf = 1000000; conf.node.wait_time_for_blocks = 1_000; - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -12076,7 +11846,7 @@ fn microblock_miner_multiple_attempts() { // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {:?}", &account); + eprintln!("Miner account: {account:?}"); let all_txs: Vec<_> = privks .iter() @@ -12085,10 +11855,9 @@ fn microblock_miner_multiple_attempts() { .collect(); let _handle = thread::spawn(move || { - for txi in 0..all_txs.len() { - for j in 0..all_txs[txi].len() { - let tx = &all_txs[txi][j]; - eprintln!("\n\nSubmit tx {},{}\n\n", txi, j); + for (i, txi) in all_txs.iter().enumerate() { + for (j, tx) in txi.iter().enumerate() { + eprintln!("\n\nSubmit tx {i},{j}\n\n"); submit_tx(&http_origin, tx); sleep_ms(1_000); } @@ -12119,12 +11888,13 @@ fn min_txs() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.min_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.min_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); - if fs::metadata("/tmp/activate_vrf_key.min_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.min_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12176,18 +11946,18 @@ fn min_txs() { let _sort_height = channel.get_sortitions_processed(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); submit_tx(&http_origin, &publish); - debug!("Try to build too-small a block {}", &i); + debug!("Try to build too-small a block {i}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); } @@ -12195,12 +11965,12 @@ fn min_txs() { for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); if transactions.len() > 1 { - debug!("Got block: {:?}", &block); + debug!("Got block: {block:?}"); assert!(transactions.len() >= 4); } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.min_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12222,13 +11992,14 @@ fn filter_txs_by_type() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.filter_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.filter_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); conf.miner.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); - if fs::metadata("/tmp/activate_vrf_key.filter_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.filter_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12280,13 +12051,13 @@ fn filter_txs_by_type() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12298,7 +12069,7 @@ fn filter_txs_by_type() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12313,7 +12084,7 @@ fn filter_txs_by_type() { } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.filter_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12391,13 +12162,13 @@ fn filter_txs_by_origin() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12409,7 +12180,7 @@ fn filter_txs_by_origin() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12479,12 +12250,12 @@ fn bitcoin_reorg_flap() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // stop bitcoind and copy its DB to simulate a chain flap @@ -12496,7 +12267,7 @@ fn bitcoin_reorg_flap() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -12681,8 +12452,7 @@ fn bitcoin_reorg_flap_with_follower() { let mut miner_sort_height = miner_channel.get_sortitions_processed(); let mut follower_sort_height = follower_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); while miner_sort_height < 210 && follower_sort_height < 210 { @@ -12695,8 +12465,7 @@ fn bitcoin_reorg_flap_with_follower() { miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); } @@ -12709,7 +12478,7 @@ fn bitcoin_reorg_flap_with_follower() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42b894398df..622e31bdd66 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -123,10 +123,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest (), - G: FnMut(&mut NeonConfig) -> (), - >( + fn new_with_config_modifications( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, mut signer_config_modifier: F, @@ -151,8 +148,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); @@ -330,10 +326,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( +fn setup_stx_btc_node( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], signer_configs: &[SignerConfig], diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684ac..a704d2f2ee7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -118,13 +118,13 @@ impl SignerTest { for stacker_sk in self.signer_stacks_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -137,7 +137,7 @@ impl SignerTest { let signer_pk = StacksPublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, self.running_nodes.conf.burnchain.chain_id, @@ -246,7 +246,7 @@ impl SignerTest { .get_reward_set_signers(reward_cycle) .expect("Failed to check if reward set is calculated") .map(|reward_set| { - debug!("Signer set: {:?}", reward_set); + debug!("Signer set: {reward_set:?}"); }) .is_some()) }) @@ -304,10 +304,7 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - info!( - "Verifying signatures against signers for reward cycle {:?}", - reward_cycle - ); + info!("Verifying signatures against signers for reward cycle {reward_cycle:?}"); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -789,7 +786,7 @@ fn reloads_signer_set_in() { let send_fee = 180; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_config| {}, |_| {}, None, @@ -848,7 +845,7 @@ fn reloads_signer_set_in() { } }; if let Some(ref set) = reward_set { - info!("Signer set: {:?}", set); + info!("Signer set: {set:?}"); } Ok(reward_set.is_some()) }) @@ -912,7 +909,7 @@ fn forked_tenure_testing( let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -1030,7 +1027,7 @@ fn forked_tenure_testing( .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -1038,14 +1035,14 @@ fn forked_tenure_testing( let tip_b = StacksHeaderInfo { anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, - stacks_block_height: tip_b_block.header.chain_length.into(), + stacks_block_height: tip_b_block.header.chain_length, index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed - consensus_hash: tip_b_block.header.consensus_hash.clone(), - burn_header_hash: tip_sn.burn_header_hash.clone(), + consensus_hash: tip_b_block.header.consensus_hash, + burn_header_hash: tip_sn.burn_header_hash, burn_header_height: tip_sn.block_height as u32, burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, - burn_view: Some(tip_b_block.header.consensus_hash.clone()), + burn_view: Some(tip_b_block.header.consensus_hash), }; let blocks = test_observer::get_mined_nakamoto_blocks(); @@ -1227,10 +1224,8 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -1339,7 +1334,7 @@ fn bitcoind_forking_test() { let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; - assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 2); for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); @@ -1466,7 +1461,7 @@ fn multiple_miners() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1504,7 +1499,7 @@ fn multiple_miners() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1583,10 +1578,7 @@ fn multiple_miners() { let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); - info!( - "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", - &info_1, &info_2 - ); + info!("Issue next block-build request\ninfo 1: {info_1:?}\ninfo 2: {info_2:?}\n"); signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], @@ -1597,10 +1589,8 @@ fn multiple_miners() { btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure - let consensus_hash_set: HashSet<_> = blocks - .iter() - .map(|header| header.consensus_hash.clone()) - .collect(); + let consensus_hash_set: HashSet<_> = + blocks.iter().map(|header| header.consensus_hash).collect(); assert_eq!( consensus_hash_set.len(), blocks.len(), @@ -1667,14 +1657,7 @@ fn get_nakamoto_headers(config: &Config) -> Vec { let nakamoto_block_ids: HashSet<_> = test_observer::get_blocks() .into_iter() .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } + block_json.as_object().unwrap().get("miner_signature")?; let block_id = StacksBlockId::from_hex( &block_json .as_object() @@ -1753,7 +1736,7 @@ fn miner_forking() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1795,7 +1778,7 @@ fn miner_forking() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1816,7 +1799,7 @@ fn miner_forking() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1904,7 +1887,7 @@ fn miner_forking() { TEST_BROADCAST_STALL.lock().unwrap().replace(false); // wait for a block to be processed (or timeout!) - if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { + if wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err() { info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); return (sort_tip, false); } @@ -1946,7 +1929,7 @@ fn miner_forking() { .into_iter() .map(|header| { info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); - (header.consensus_hash.clone(), header) + (header.consensus_hash, header) }) .collect(); @@ -1985,9 +1968,7 @@ fn miner_forking() { expects_miner_2_to_be_valid = false; } else { info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - assert!(nakamoto_headers - .get(&sortition_data.consensus_hash) - .is_none()); + assert!(!nakamoto_headers.contains_key(&sortition_data.consensus_hash)); assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); won_by_miner_2_but_no_tenure = true; expects_miner_2_to_be_valid = true; @@ -2034,10 +2015,8 @@ fn end_of_tenure() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); @@ -2185,10 +2164,8 @@ fn retry_on_rejection() { let send_fee = 180; let short_timeout = Duration::from_secs(30); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2198,7 +2175,7 @@ fn retry_on_rejection() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -2324,10 +2301,8 @@ fn signers_broadcast_signed_blocks() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2345,8 +2320,8 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + "blocks_mined: {blocks_mined},{blocks_before}, stacks_tip_height: {},{}", + info.stacks_tip_height, info_before.stacks_tip_height ); Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) }) @@ -2388,11 +2363,7 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", - blocks_mined, - blocks_before, - signer_pushed, - signer_pushed_before, + "blocks_mined: {blocks_mined},{blocks_before}, signers_pushed: {signer_pushed},{signer_pushed_before}, stacks_tip_height: {},{}", info.stacks_tip_height, info_before.stacks_tip_height ); @@ -2432,7 +2403,7 @@ fn empty_sortition() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2613,7 +2584,7 @@ fn mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_| {}, |node_config| { node_config.miner.pre_nakamoto_mock_signing = true; @@ -2763,7 +2734,7 @@ fn multiple_miners_mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -2807,16 +2778,16 @@ fn multiple_miners_mock_sign_epoch_25() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -2829,7 +2800,7 @@ fn multiple_miners_mock_sign_epoch_25() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -2953,17 +2924,13 @@ fn signer_set_rollover() { let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .into_iter() .map(|_| StacksPrivateKey::new()) .collect(); let new_signer_public_keys: Vec<_> = new_signer_private_keys .iter() .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); - let new_signer_addresses: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| tests::to_addr(sk)) - .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; @@ -2972,15 +2939,15 @@ fn signer_set_rollover() { let mut initial_balances = new_signer_addresses .iter() - .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) .collect::>(); - initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); let run_stamp = rand::random(); let rpc_port = 51024; - let rpc_bind = format!("127.0.0.1:{}", rpc_port); + let rpc_bind = format!("127.0.0.1:{rpc_port}"); // Setup the new signers that will take over let new_signer_configs = build_signer_config_tomls( @@ -2997,12 +2964,11 @@ fn signer_set_rollover() { None, ); - let new_spawned_signers: Vec<_> = (0..new_num_signers) - .into_iter() - .map(|i| { + let new_spawned_signers: Vec<_> = new_signer_configs + .iter() + .map(|conf| { info!("spawning signer"); - let signer_config = - SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + let signer_config = SignerConfig::load_from_str(conf).unwrap(); SpawnedSigner::new(signer_config) }) .collect(); @@ -3047,7 +3013,7 @@ fn signer_set_rollover() { // Verify that naka_conf has our new signer's event observers for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let signer_config = SignerConfig::load_from_str(toml).unwrap(); let endpoint = format!("{}", signer_config.endpoint); assert!(signer_test .running_nodes @@ -3072,7 +3038,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the old signers ----"); let current_signers = signer_test.get_reward_set_signers(reward_cycle); - assert_eq!(current_signers.len(), num_signers as usize); + assert_eq!(current_signers.len(), num_signers); // Verify that the current signers are the same as the old signers for signer in current_signers.iter() { assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); @@ -3117,13 +3083,13 @@ fn signer_set_rollover() { for stacker_sk in new_signer_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -3136,7 +3102,7 @@ fn signer_set_rollover() { let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, signer_test.running_nodes.conf.burnchain.chain_id, @@ -3190,10 +3156,7 @@ fn signer_set_rollover() { assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!( - "---- Mining to the next reward cycle (block {}) -----", - next_cycle_height - ); + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, @@ -3204,7 +3167,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the new signers ----"); let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); - assert_eq!(current_signers.len(), new_num_signers as usize); + assert_eq!(current_signers.len(), new_num_signers); for signer in current_signers.iter() { assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); @@ -3262,13 +3225,12 @@ fn min_gap_between_blocks() { let send_amt = 100; let send_fee = 180; - let mut sender_nonce = 0; let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * interim_blocks)], + vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -3294,13 +3256,12 @@ fn min_gap_between_blocks() { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, - sender_nonce, + interim_block_ix, // same as the sender nonce send_fee, signer_test.running_nodes.conf.burnchain.chain_id, &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block to be processed"); @@ -3312,7 +3273,7 @@ fn min_gap_between_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!("Mined interim block:{}", interim_block_ix); + info!("Mined interim block:{interim_block_ix}"); } wait_for(60, || { @@ -3426,7 +3387,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {:?}", &m); + info!("Message(accepted): {m:?}"); Some(m) } _ => { @@ -3503,7 +3464,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3542,7 +3503,7 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3667,10 +3628,7 @@ fn multiple_miners_with_nakamoto_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -3681,7 +3639,7 @@ fn multiple_miners_with_nakamoto_blocks() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -3703,10 +3661,7 @@ fn multiple_miners_with_nakamoto_blocks() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -3724,10 +3679,7 @@ fn multiple_miners_with_nakamoto_blocks() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -3777,7 +3729,7 @@ fn partial_tenure_fork() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3812,7 +3764,7 @@ fn partial_tenure_fork() { panic!("Expected epochs to be set"); } }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3833,7 +3785,7 @@ fn partial_tenure_fork() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -4057,14 +4009,11 @@ fn partial_tenure_fork() { blocks = interim_block_ix; break; } else { - panic!("Failed to submit tx: {}", e); + panic!("Failed to submit tx: {e}"); } } } - info!( - "Attempted to mine interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); } if miner == 1 { @@ -4084,13 +4033,11 @@ fn partial_tenure_fork() { if miner == 1 { assert_eq!(mined_1, mined_before_1 + blocks + 1); + } else if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { - if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + // Miner 2 should have mined 0 blocks after the fork + assert_eq!(mined_2, mined_before_2); } } @@ -4110,10 +4057,7 @@ fn partial_tenure_fork() { // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 // before the fork was initiated assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); let sortdb = SortitionDB::open( &conf_node_2.get_burn_db_file_path(), @@ -4179,7 +4123,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4366,7 +4310,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4589,7 +4533,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers = signer_test .signer_stacks_private_keys @@ -4803,7 +4747,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -4817,7 +4761,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -5103,10 +5047,8 @@ fn continue_after_tenure_extend() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); let timeout = Duration::from_secs(200); let coord_channel = signer_test.running_nodes.coord_channel.clone(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -5178,17 +5120,16 @@ fn continue_after_tenure_extend() { match &parsed.payload { TransactionPayload::TenureChange(payload) if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {:?}", parsed), + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), }; // Verify that the miner can continue mining in the tenure with the tenure extend info!("------------------------- Mine After Tenure Extend -------------------------"); - let mut sender_nonce = 0; let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - for _ in 0..5 { + for sender_nonce in 0..5 { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5198,7 +5139,6 @@ fn continue_after_tenure_extend() { &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -5280,13 +5220,13 @@ fn signing_in_0th_tenure_of_reward_cycle() { assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } let blocks_before = signer_test @@ -5320,7 +5260,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { .unwrap() }) .expect("Unknown signer signature"); - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 1); } assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); @@ -5363,7 +5303,7 @@ fn multiple_miners_with_custom_chain_id() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -5404,7 +5344,7 @@ fn multiple_miners_with_custom_chain_id() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -5428,7 +5368,7 @@ fn multiple_miners_with_custom_chain_id() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -5530,10 +5470,7 @@ fn multiple_miners_with_custom_chain_id() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -5544,7 +5481,7 @@ fn multiple_miners_with_custom_chain_id() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -5566,10 +5503,7 @@ fn multiple_miners_with_custom_chain_id() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -5587,10 +5521,7 @@ fn multiple_miners_with_custom_chain_id() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); // Verify both nodes have the correct chain id let miner1_info = get_chain_info(&signer_test.running_nodes.conf); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index aa620d349b3..70d820fbb19 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -41,14 +41,13 @@ fn post_stackerdb_chunk( slot_version: u32, ) -> StackerDBChunkAckData { let mut chunk = StackerDBChunkData::new(slot_id, slot_version, data); - chunk.sign(&signer).unwrap(); + chunk.sign(signer).unwrap(); let chunk_body = serde_json::to_string(&chunk).unwrap(); let client = reqwest::blocking::Client::new(); let path = format!( - "{}/v2/stackerdb/{}/{}/chunks", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/chunks", &StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name ); @@ -60,8 +59,8 @@ fn post_stackerdb_chunk( .unwrap(); if res.status().is_success() { let ack: StackerDBChunkAckData = res.json().unwrap(); - info!("Got stackerdb ack: {:?}", &ack); - return ack; + info!("Got stackerdb ack: {ack:?}"); + ack } else { eprintln!("StackerDB post error: {}", res.text().unwrap()); panic!(""); @@ -76,20 +75,15 @@ fn get_stackerdb_chunk( ) -> Vec { let path = if let Some(version) = slot_version { format!( - "{}/v2/stackerdb/{}/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}/{version}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name, - slot_id, - version ) } else { format!( - "{}/v2/stackerdb/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), - stackerdb_contract_id.name, - slot_id + stackerdb_contract_id.name ) }; @@ -97,8 +91,7 @@ fn get_stackerdb_chunk( let res = client.get(&path).send().unwrap(); if res.status().is_success() { - let chunk_data: Vec = res.bytes().unwrap().to_vec(); - return chunk_data; + res.bytes().unwrap().to_vec() } else { eprintln!("Get chunk error: {}", res.text().unwrap()); panic!(""); @@ -115,7 +108,7 @@ fn test_stackerdb_load_store() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register_any(&mut conf); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -223,18 +216,18 @@ fn test_stackerdb_load_store() { // write some chunks and read them back for i in 0..3 { - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, chunk_str.as_bytes().to_vec(), &privks[0], 0, - (i + 1) as u32, + i + 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); - let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some((i + 1) as u32)); + let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some(i + 1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, None); @@ -252,7 +245,7 @@ fn test_stackerdb_event_observer() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -383,11 +376,10 @@ fn test_stackerdb_event_observer() { // get events, verifying that they're all for the same contract (i.e. this one) let stackerdb_events: Vec<_> = test_observer::get_stackerdb_chunks() .into_iter() - .map(|stackerdb_event| { + .flat_map(|stackerdb_event| { assert_eq!(stackerdb_event.contract_id, contract_id); stackerdb_event.modified_slots }) - .flatten() .collect(); assert_eq!(stackerdb_events.len(), 6); @@ -396,7 +388,7 @@ fn test_stackerdb_event_observer() { assert_eq!(i as u32, event.slot_id); assert_eq!(event.slot_version, 1); - let expected_data = format!("Hello chunks {}", &i); + let expected_data = format!("Hello chunks {i}"); let expected_hash = Sha512Trunc256Sum::from_data(expected_data.as_bytes()); assert_eq!(event.data, expected_data.as_bytes().to_vec()); From e19573628b31482f7fe6974c0b5e863bdbffec26 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 11:27:45 -0500 Subject: [PATCH 906/910] feat: remove panic in DB busy handler Instead of panicking after 5m, just print an error with a backtrace every 5 minutes. This is sufficient to detect the situation without the need to crash the node and potentially corrupt chainstate. --- stacks-common/src/util/db.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 89fe4677c73..53564af597c 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -51,26 +51,25 @@ pub fn update_lock_table(conn: &Connection) { /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; + // Every ~5min, report an error with a backtrace + // 5min * 60s/min * 1_000ms/s / 100ms + const ERROR_COUNT: u32 = 3_000; + // First, check if this is taking unreasonably long. If so, it's probably a deadlock let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + if run_count > 0 && run_count % ERROR_COUNT == 0 { + error!("Deadlock detected. Waited 5 minutes (estimated) for database lock."; "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); for (k, v) in LOCK_TABLE.lock().unwrap().iter() { error!("Database '{k}' last locked by {v}"); } - panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { From 04f5c9d68e3d287dddd609b0f242ff220a55fc18 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 11:55:18 -0500 Subject: [PATCH 907/910] chore: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff5fdd588b7..e7caac38fe9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) ## [3.0.0.0.0] From 4fc99df7f1dd7d172514aa1d4d22331578839c07 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 14:00:21 -0500 Subject: [PATCH 908/910] fix: remove duplicate conditions Additional minor change I noticed when reviewing #5418. --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 6 +----- testnet/stacks-node/src/neon_node.rs | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 441d7ecd2c9..63c931bba3f 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -303,13 +303,9 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - #[allow(clippy::nonminimal_bool)] - #[allow(clippy::eq_op)] fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index efc64bf8e74..8eaefbe432f 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2936,13 +2936,9 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - #[allow(clippy::nonminimal_bool)] - #[allow(clippy::eq_op)] pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all From bedef8f45420a8d497b8ef77b73b3ed0e4ad22e2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 15:43:37 -0500 Subject: [PATCH 909/910] chore: remove irrefutable if let This causes a warning in the latest versions of Rust. --- stackslib/src/net/api/getattachmentsinv.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 2ea73baf047..5f7dcc0cf8d 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,11 +96,13 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(',') { - if let Ok(page_index) = entry.parse::() { - page_indexes.insert(page_index); - } + #[allow(clippy::expect_used)] + let pages_indexes_value = value + .parse::() + .expect("parse from Cow is always safe"); + for entry in pages_indexes_value.split(',') { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); } } } From e234d37eab1d0c7b10f0430cea9f70a604d5de07 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 15:54:03 -0500 Subject: [PATCH 910/910] chore: better fix to the irrefutable if let warning --- stackslib/src/net/api/getattachmentsinv.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 5f7dcc0cf8d..b7fe94baf1d 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,10 +96,7 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - #[allow(clippy::expect_used)] - let pages_indexes_value = value - .parse::() - .expect("parse from Cow is always safe"); + let pages_indexes_value = value.to_string(); for entry in pages_indexes_value.split(',') { if let Ok(page_index) = entry.parse::() { page_indexes.insert(page_index);