diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 919ebe5056..26fde85f0c 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -135,6 +135,7 @@ jobs: - tests::signer::v0::tenure_extend_after_bad_commit - tests::signer::v0::block_proposal_max_age_rejections - tests::signer::v0::global_acceptance_depends_on_block_announcement + - tests::signer::v0::no_reorg_due_to_successive_block_validation_ok - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/CHANGELOG.md b/CHANGELOG.md index e34ade9a33..0a0f14bec2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,12 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] ### Added + - Add `tenure_timeout_secs` to the miner for determining when a time-based tenure extend should be attempted. ### Changed +- When a transaction is dropped due to replace-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) - Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) ## [3.1.0.0.2] diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 4d27f0e9d1..5f716cea2f 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -686,6 +686,22 @@ impl BlockResponse { BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, } } + + /// Get the block accept data from the block response + pub fn as_block_accepted(&self) -> Option<&BlockAccepted> { + match self { + BlockResponse::Accepted(accepted) => Some(accepted), + _ => None, + } + } + + /// Get the block accept data from the block response + pub fn as_block_rejection(&self) -> Option<&BlockRejection> { + match self { + BlockResponse::Rejected(rejection) => Some(rejection), + _ => None, + } + } } impl StacksMessageCodec for BlockResponse { diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 462f3dc2d2..dbf03c1f91 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -505,7 +505,7 @@ impl SortitionsView { /// Get the last block from the given tenure /// Returns the last locally accepted block if it is not timed out, otherwise it will return the last globally accepted block. - fn get_tenure_last_block_info( + pub fn get_tenure_last_block_info( consensus_hash: &ConsensusHash, signer_db: &SignerDb, tenure_last_block_proposal_timeout: Duration, @@ -517,7 +517,7 @@ impl SortitionsView { if let Some(local_info) = last_locally_accepted_block { if let Some(signed_over_time) = local_info.signed_self { - if signed_over_time + tenure_last_block_proposal_timeout.as_secs() + if signed_over_time.saturating_add(tenure_last_block_proposal_timeout.as_secs()) > get_epoch_time_secs() { // The last locally accepted block is not timed out, return it diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4cdc61471a..0da2c1adcc 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -733,6 +733,18 @@ impl SignerDb { try_deserialize(result) } + /// Return the last accepted block the signer (highest stacks height). It will tie break a match based on which was more recently signed. + pub fn get_signer_last_accepted_block(&self) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE state IN (?1, ?2) ORDER BY stacks_height DESC, signed_group DESC, signed_self DESC LIMIT 1"; + let args = params![ + &BlockState::GloballyAccepted.to_string(), + &BlockState::LocallyAccepted.to_string() + ]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Return the last accepted block in a tenure (identified by its consensus hash). pub fn get_last_accepted_block( &self, @@ -1757,4 +1769,69 @@ mod tests { < block_infos[0].proposed_time ); } + + #[test] + fn signer_last_accepted_block() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (mut block_info_1, _block_proposal_1) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + + let (mut block_info_2, _block_proposal_2) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 1; + }); + + let (mut block_info_3, _block_proposal_3) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 4; + }); + block_info_3 + .mark_locally_accepted(false) + .expect("Failed to mark block as locally accepted"); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert!(db.get_signer_last_accepted_block().unwrap().is_none()); + + block_info_1 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert_eq!( + db.get_signer_last_accepted_block().unwrap().unwrap(), + block_info_1 + ); + + block_info_2 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + block_info_2.signed_self = Some(get_epoch_time_secs()); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert_eq!( + db.get_signer_last_accepted_block().unwrap().unwrap(), + block_info_2 + ); + + db.insert_block(&block_info_3) + .expect("Unable to insert block into db"); + + assert_eq!( + db.get_signer_last_accepted_block().unwrap().unwrap(), + block_info_3 + ); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 2037a25def..92b7a6ed53 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -452,8 +452,12 @@ fn check_sortition_timeout() { fs::create_dir_all(signer_db_dir).unwrap(); let mut signer_db = SignerDb::new(signer_db_path).unwrap(); + let block_sk = StacksPrivateKey::from_seed(&[0, 1]); + let block_pk = StacksPublicKey::from_private(&block_sk); + let block_pkh = Hash160::from_node_public_key(&block_pk); + let mut sortition = SortitionState { - miner_pkh: Hash160([0; 20]), + miner_pkh: block_pkh, miner_pubkey: None, prior_sortition: ConsensusHash([0; 20]), parent_tenure_id: ConsensusHash([0; 20]), diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 476e2a5898..af5572c070 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -298,36 +298,113 @@ impl Signer { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); - let signature = self - .private_key - .sign(block_info.signer_signature_hash().bits()) - .expect("Failed to sign block"); - BlockResponse::accepted( - block_info.signer_signature_hash(), - signature, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - true, - ), - ) + self.create_block_acceptance(&block_info.block) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); - BlockResponse::rejected( - block_info.signer_signature_hash(), - RejectCode::RejectedInPriorRound, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - false, - ), - ) + self.create_block_rejection(RejectCode::RejectedInPriorRound, &block_info.block) }; Some(response) } + /// Create a block acceptance response for a block + pub fn create_block_acceptance(&self, block: &NakamotoBlock) -> BlockResponse { + let signature = self + .private_key + .sign(block.header.signer_signature_hash().bits()) + .expect("Failed to sign block"); + BlockResponse::accepted( + block.header.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + block, + true, + ), + ) + } + /// Create a block rejection response for a block with the given reject code + pub fn create_block_rejection( + &self, + reject_code: RejectCode, + block: &NakamotoBlock, + ) -> BlockResponse { + BlockResponse::rejected( + block.header.signer_signature_hash(), + reject_code, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + block, + false, + ), + ) + } + /// Check if block should be rejected based on sortition state + /// Will return a BlockResponse::Rejection if the block is invalid, none otherwise. + fn check_block_against_sortition_state( + &mut self, + stacks_client: &StacksClient, + sortition_state: &mut Option, + block: &NakamotoBlock, + miner_pubkey: &Secp256k1PublicKey, + ) -> Option { + let signer_signature_hash = block.header.signer_signature_hash(); + let block_id = block.block_id(); + // Get sortition view if we don't have it + if sortition_state.is_none() { + *sortition_state = + SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ) + }) + .ok(); + } + + // Check if proposal can be rejected now if not valid against sortition view + if let Some(sortition_state) = sortition_state { + match sortition_state.check_proposal( + stacks_client, + &mut self.signer_db, + block, + miner_pubkey, + true, + ) { + // Error validating block + Err(e) => { + warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::ConnectivityIssues, block)) + } + // Block proposal is bad + Ok(false) => { + warn!( + "{self}: Block proposal invalid"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::SortitionViewMismatch, block)) + } + // Block proposal passed check, still don't know if valid + Ok(true) => None, + } + } else { + warn!( + "{self}: Cannot validate block, no sortition view"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_id, + ); + Some(self.create_block_rejection(RejectCode::NoSortitionView, block)) + } + } + /// Handle block proposal messages submitted to signers stackerdb fn handle_block_proposal( &mut self, @@ -425,73 +502,12 @@ impl Signer { } // Check if proposal can be rejected now if not valid against sortition view - let block_response = if let Some(sortition_state) = sortition_state { - match sortition_state.check_proposal( - stacks_client, - &mut self.signer_db, - &block_proposal.block, - miner_pubkey, - true, - ) { - // Error validating block - Err(e) => { - warn!( - "{self}: Error checking block proposal: {e:?}"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } - // Block proposal is bad - Ok(false) => { - warn!( - "{self}: Block proposal invalid"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::SortitionViewMismatch, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - } - // Block proposal passed check, still don't know if valid - Ok(true) => None, - } - } else { - warn!( - "{self}: Cannot validate block, no sortition view"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::NoSortitionView, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) - }; + let block_response = self.check_block_against_sortition_state( + stacks_client, + sortition_state, + &block_proposal.block, + miner_pubkey, + ); #[cfg(any(test, feature = "testing"))] let block_response = @@ -524,6 +540,8 @@ impl Signer { "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, ); + #[cfg(any(test, feature = "testing"))] + self.test_stall_block_validation_submission(); match stacks_client.submit_block_for_validation(block_info.block.clone()) { Ok(_) => { self.submitted_block_proposal = @@ -563,6 +581,63 @@ impl Signer { } } } + + /// WARNING: Do NOT call this function PRIOR to check_proposal or block_proposal validation succeeds. + /// + /// Re-verify a block's chain length against the last signed block within signerdb. + /// This is required in case a block has been approved since the initial checks of the block validation endpoint. + fn check_block_against_signer_db_state( + &self, + proposed_block: &NakamotoBlock, + ) -> Option { + let signer_signature_hash = proposed_block.header.signer_signature_hash(); + let proposed_block_consensus_hash = proposed_block.header.consensus_hash; + + match self.signer_db.get_signer_last_accepted_block() { + Ok(Some(last_block_info)) => { + if proposed_block.header.chain_length <= last_block_info.block.header.chain_length { + // We do not allow reorgs at any time within the same consensus hash OR of globally accepted blocks + let non_reorgable_block = last_block_info.block.header.consensus_hash + == proposed_block_consensus_hash + || last_block_info.state == BlockState::GloballyAccepted; + // Is the reorg timeout requirement exceeded? + let reorg_timeout_exceeded = last_block_info + .signed_self + .map(|signed_over_time| { + signed_over_time.saturating_add( + self.proposal_config + .tenure_last_block_proposal_timeout + .as_secs(), + ) <= get_epoch_time_secs() + }) + .unwrap_or(false); + if non_reorgable_block || !reorg_timeout_exceeded { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %proposed_block_consensus_hash, + "proposed_block_signer_sighash" => %signer_signature_hash, + "proposed_chain_length" => proposed_block.header.chain_length, + "expected_at_least" => last_block_info.block.header.chain_length + 1, + ); + return Some(self.create_block_rejection( + RejectCode::SortitionViewMismatch, + proposed_block, + )); + } + } + None + } + Ok(_) => None, + Err(e) => { + warn!("{self}: Failed to check block against signer db: {e}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %proposed_block.block_id() + ); + Some(self.create_block_rejection(RejectCode::ConnectivityIssues, proposed_block)) + } + } + } + /// Handle the block validate ok response. Returns our block response if we have one fn handle_block_validate_ok( &mut self, @@ -608,40 +683,54 @@ impl Signer { return None; } }; - if let Err(e) = block_info.mark_locally_accepted(false) { - if !block_info.has_reached_consensus() { - warn!("{self}: Failed to mark block as locally accepted: {e:?}",); - return None; + + if let Some(block_response) = self.check_block_against_signer_db_state(&block_info.block) { + // The signer db state has changed. We no longer view this block as valid. Override the validation response. + if let Err(e) = block_info.mark_locally_rejected() { + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); + } + }; + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + let res = self + .stackerdb + .send_message_with_retry::(block_response.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } - block_info.signed_self.get_or_insert(get_epoch_time_secs()); - } - // Record the block validation time but do not consider stx transfers or boot contract calls - block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { - Some(0) + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + None } else { - Some(block_validate_ok.validation_time_ms) - }; - - let signature = self - .private_key - .sign(&signer_signature_hash.0) - .expect("Failed to sign block"); + if let Err(e) = block_info.mark_locally_accepted(false) { + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally accepted: {e:?}",); + return None; + } + block_info.signed_self.get_or_insert(get_epoch_time_secs()); + } + // Record the block validation time but do not consider stx transfers or boot contract calls + block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { + Some(0) + } else { + Some(block_validate_ok.validation_time_ms) + }; - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - let accepted = BlockAccepted::new( - block_info.signer_signature_hash(), - signature, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_info.block, - true, - ), - ); - // have to save the signature _after_ the block info - self.handle_block_signature(stacks_client, &accepted); - Some(BlockResponse::Accepted(accepted)) + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + let block_response = self.create_block_acceptance(&block_info.block); + // have to save the signature _after_ the block info + self.handle_block_signature(stacks_client, block_response.as_block_accepted()?); + Some(block_response) + } } /// Handle the block validate reject response. Returns our block response if we have one @@ -791,19 +880,12 @@ impl Signer { "signer_sighash" => %signature_sighash, "block_id" => %block_proposal.block.block_id(), ); - let rejection = BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - ); + let rejection = + self.create_block_rejection(RejectCode::ConnectivityIssues, &block_proposal.block); if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); + } }; debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); let res = self diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs index 0b9cdcc569..b9ea43fae5 100644 --- a/stacks-signer/src/v0/tests.rs +++ b/stacks-signer/src/v0/tests.rs @@ -41,6 +41,10 @@ pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new( /// A global variable that can be used to skip broadcasting the block to the network pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); +/// A global variable that can be used to pause the block validation submission +pub static TEST_STALL_BLOCK_VALIDATION_SUBMISSION: LazyLock> = + LazyLock::new(TestFlag::default); + impl Signer { /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { @@ -81,7 +85,9 @@ impl Signer { "consensus_hash" => %block_proposal.block.header.consensus_hash ); if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}"); + } }; // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject // as invalid since we rejected in a prior round if this crops up again) @@ -89,17 +95,7 @@ impl Signer { self.signer_db .insert_block(block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - self.signer_db.calculate_tenure_extend_timestamp( - self.proposal_config.tenure_idle_timeout, - &block_proposal.block, - false, - ), - )) + Some(self.create_block_rejection(RejectCode::TestingDirective, &block_proposal.block)) } else { block_response } @@ -138,4 +134,16 @@ impl Signer { } false } + + /// Stall the block validation submission if the TEST_STALL_BLOCK_VALIDATION_SUBMISSION flag is set + pub fn test_stall_block_validation_submission(&self) { + if TEST_STALL_BLOCK_VALIDATION_SUBMISSION.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block validation submission is stalled due to testing directive"); + while TEST_STALL_BLOCK_VALIDATION_SUBMISSION.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("{self}: Block validation submission is no longer stalled due to testing directive. Continuing..."); + } + } } diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index 70e82105a0..cf0ae6c1f8 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -12,7 +12,7 @@ keywords = [ "stacks", "stx", "bitcoin", "crypto", "blockstack", "decentralized" readme = "README.md" resolver = "2" edition = "2021" -rust-version = "1.61" +rust-version = "1.80" [lib] name = "blockstack_lib" diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index aadec8a519..afb80b2f47 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -375,7 +375,7 @@ fn handle_contract_publish( ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", PUBLISH_USAGE))); } if args.len() != 5 { @@ -433,7 +433,7 @@ fn handle_contract_call( clarity_version: ClarityVersion, ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", CALL_USAGE))); } if args.len() < 6 { @@ -518,7 +518,7 @@ fn handle_token_transfer( chain_id: u32, ) -> Result { let mut args = args_slice.to_vec(); - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!( "USAGE:\n {}", TOKEN_TRANSFER_USAGE @@ -575,7 +575,7 @@ fn handle_token_transfer( } fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result { - if args.len() >= 1 && args[0] == "-h" { + if !args.is_empty() && args[0] == "-h" { return Err(CliError::Message(format!("USAGE:\n {}", GENERATE_USAGE))); } @@ -606,7 +606,7 @@ fn generate_secret_key(args: &[String], version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!("USAGE:\n {}", ADDRESSES_USAGE))); } @@ -645,7 +645,7 @@ fn get_addresses(args: &[String], version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_TRANSACTION_USAGE @@ -683,7 +683,7 @@ fn decode_transaction(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_HEADER_USAGE @@ -722,7 +722,7 @@ fn decode_header(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_BLOCK_USAGE @@ -759,7 +759,7 @@ fn decode_block(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_MICROBLOCK_USAGE @@ -798,7 +798,7 @@ fn decode_microblock(args: &[String], _version: TransactionVersion) -> Result Result { - if (args.len() >= 1 && args[0] == "-h") || args.len() != 1 { + if (!args.is_empty() && args[0] == "-h") || args.len() != 1 { return Err(CliError::Message(format!( "Usage: {}\n", DECODE_MICROBLOCKS_USAGE diff --git a/stackslib/src/burnchains/affirmation.rs b/stackslib/src/burnchains/affirmation.rs index fc7398c9ff..88ad745800 100644 --- a/stackslib/src/burnchains/affirmation.rs +++ b/stackslib/src/burnchains/affirmation.rs @@ -378,6 +378,10 @@ impl AffirmationMap { self.affirmations.len() } + pub fn is_empty(&self) -> bool { + self.affirmations.is_empty() + } + pub fn as_slice(&self) -> &[AffirmationMapEntry] { &self.affirmations } @@ -876,7 +880,7 @@ fn inner_find_heaviest_block_commit_ptr( test_debug!("ancestors = {:?}", &ancestors); test_debug!("ancestor_confirmations = {:?}", &ancestor_confirmations); - if ancestor_confirmations.len() == 0 { + if ancestor_confirmations.is_empty() { // empty prepare phase test_debug!("Prepare-phase has no block-commits"); return None; diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index 7481fc8ce5..24e0ef8f9d 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -317,7 +317,7 @@ impl SegwitBitcoinAddress { None }?; - if quintets.len() == 0 || quintets.len() > 65 { + if quintets.is_empty() || quintets.len() > 65 { test_debug!("Invalid prog length: {}", quintets.len()); return None; } diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index fad0132235..4198bf3278 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -39,7 +39,7 @@ use crate::chainstate::stacks::{ }; /// Parse a script into its structured constituant opcodes and data and collect them -pub fn parse_script<'a>(script: &'a Script) -> Vec> { +pub fn parse_script(script: &Script) -> Vec> { // we will have to accept non-minimial pushdata since there's at least one OP_RETURN // in the transaction stream that has this property already. script.iter(false).collect() @@ -93,7 +93,7 @@ impl BitcoinTxInputStructured { segwit: bool, input_txid: (Txid, u32), ) -> Option { - if num_sigs < 1 || pubkey_pushbytes.len() < 1 || pubkey_pushbytes.len() < num_sigs { + if num_sigs < 1 || pubkey_pushbytes.is_empty() || pubkey_pushbytes.len() < num_sigs { test_debug!( "Not a multisig script: num_sigs = {}, num_pubkeys <= {}", num_sigs, @@ -153,7 +153,7 @@ impl BitcoinTxInputStructured { pubkey_vecs: &[Vec], input_txid: (Txid, u32), ) -> Option { - if num_sigs < 1 || pubkey_vecs.len() < 1 || pubkey_vecs.len() < num_sigs { + if num_sigs < 1 || pubkey_vecs.is_empty() || pubkey_vecs.len() < num_sigs { test_debug!( "Not a multisig script: num_sigs = {}, num_pubkeys <= {}", num_sigs, diff --git a/stackslib/src/burnchains/bitcoin/blocks.rs b/stackslib/src/burnchains/bitcoin/blocks.rs index b4b5dc24e4..b9623bd210 100644 --- a/stackslib/src/burnchains/bitcoin/blocks.rs +++ b/stackslib/src/burnchains/bitcoin/blocks.rs @@ -381,7 +381,7 @@ impl BitcoinBlockParser { tx: &Transaction, epoch_id: StacksEpochId, ) -> Option> { - if tx.output.len() == 0 { + if tx.output.is_empty() { return None; } @@ -656,7 +656,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("185c112401590b11acdfea6bb26d2a8e37cb31f24a0c89dbb8cc14b3d6271fb1").unwrap()), vtxindex, - opcode: '+' as u8, + opcode: b'+', data: hex_bytes("fae543ff5672fb607fe15e16b1c3ef38737c631c7c5d911c6617993c21fba731363f1cfe").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -703,7 +703,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("eb2e84a45cf411e528185a98cd5fb45ed349843a83d39fd4dff2de47adad8c8f").unwrap()), vtxindex, - opcode: '~' as u8, + opcode: b'~', data: hex_bytes("7061747269636b7374616e6c6579322e6964").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -746,7 +746,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), vtxindex, - opcode: ':' as u8, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -777,7 +777,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("16751ca54407b922e3072830cf4be58c5562a6dc350f6703192b673c4cc86182").unwrap()), vtxindex, - opcode: '?' as u8, + opcode: b'?', data: hex_bytes("9fab7f294936ddb6524a48feff691ecbd0ca9e8f107d845c417a5438d1cb441e827c5126").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -827,7 +827,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("185c112401590b11acdfea6bb26d2a8e37cb31f24a0c89dbb8cc14b3d6271fb1").unwrap()), vtxindex, - opcode: '+' as u8, + opcode: b'+', data: hex_bytes("fae543ff5672fb607fe15e16b1c3ef38737c631c7c5d911c6617993c21fba731363f1cfe").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -865,7 +865,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("eb2e84a45cf411e528185a98cd5fb45ed349843a83d39fd4dff2de47adad8c8f").unwrap()), vtxindex, - opcode: '~' as u8, + opcode: b'~', data: hex_bytes("7061747269636b7374616e6c6579322e6964").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -898,7 +898,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), vtxindex, - opcode: ':' as u8, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -929,7 +929,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("16751ca54407b922e3072830cf4be58c5562a6dc350f6703192b673c4cc86182").unwrap()), vtxindex, - opcode: '?' as u8, + opcode: b'?', data: hex_bytes("9fab7f294936ddb6524a48feff691ecbd0ca9e8f107d845c417a5438d1cb441e827c5126").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -962,7 +962,7 @@ mod tests { data_amt: 0, txid: to_txid(&hex_bytes("8b8a12909d48fd86c06e92270133d320498fb36caa0fdcb3292a8bba99669ebd").unwrap()), vtxindex, - opcode: '&' as u8, + opcode: b'&', data: hex_bytes("0000cd73fa046543210000000000aa000174657374").unwrap(), inputs: vec![ BitcoinTxInputRaw { @@ -1039,7 +1039,7 @@ mod tests { // NAME_REGISTRATION with segwit p2wpkh-p2sh input txid: to_txid(&hex_bytes("b908952b30ccfdfa59985dc1ffdd2a22ef054d20fa253510d2af7797dddee459").unwrap()), vtxindex: 1, - opcode: ':' as u8, + opcode: b':', data: hex_bytes("666f6f2e74657374").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1082,7 +1082,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("13f2c54dbbe3d4d6ed6c9fd1a68fe3c4238ec5de50316d102a106553b57b8728").unwrap()), vtxindex: 2, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1110,7 +1110,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("7c7c60ae8617daeb351da01d0f683633e6778eb39b69e6e652b24ca0ce230291").unwrap()), vtxindex: 4, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1138,7 +1138,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("ae1cf8b812cf28ea96c7343dc7ee9ff2d8dfb2f441ab11c886dfcd56a0a1a2b4").unwrap()), vtxindex: 7, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1166,7 +1166,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("12fed1db482a35dba87535a13089692cea35a71bfb159b21d0a04be41219b2bd").unwrap()), vtxindex: 10, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { @@ -1194,7 +1194,7 @@ mod tests { // TOKEN_TRANSFER txid: to_txid(&hex_bytes("78035609a8733f214555cfec29e3eee1d24014863dc9f9d98092f6fbc5df63e8").unwrap()), vtxindex: 13, - opcode: '$' as u8, + opcode: b'$', data: hex_bytes("7c503a2e30a905cb515cfbc291766dfa00000000000000000000000000535441434b530000000000000064").unwrap(), inputs: vec![ BitcoinTxInputStructured { diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 15fb15b52a..af9bc24864 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -705,7 +705,7 @@ impl BitcoinIndexer { e })?; - if reorg_headers.len() == 0 { + if reorg_headers.is_empty() { // chain shrank considerably info!( "Missing Bitcoin headers in block range {}-{} -- did the Bitcoin chain shrink?", @@ -736,7 +736,7 @@ impl BitcoinIndexer { })?; assert!( - canonical_headers.len() > 0, + !canonical_headers.is_empty(), "BUG: uninitialized canonical SPV headers DB" ); @@ -1379,7 +1379,7 @@ mod test { spv_client .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); - } else if hdrs.len() > 0 { + } else if !hdrs.is_empty() { test_debug!("insert at {}: {:?}", 0, &hdrs); spv_client.test_write_block_headers(0, hdrs).unwrap(); } @@ -1552,7 +1552,7 @@ mod test { spv_client .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); - } else if hdrs.len() > 0 { + } else if !hdrs.is_empty() { test_debug!("insert at {}: {:?}", 0, &hdrs); spv_client.test_write_block_headers(0, hdrs).unwrap(); } diff --git a/stackslib/src/burnchains/bitcoin/network.rs b/stackslib/src/burnchains/bitcoin/network.rs index 3411885ddb..3541fd6521 100644 --- a/stackslib/src/burnchains/bitcoin/network.rs +++ b/stackslib/src/burnchains/bitcoin/network.rs @@ -355,7 +355,7 @@ impl BitcoinIndexer { /// Send a GetData message pub fn send_getdata(&mut self, block_hashes: &Vec) -> Result<(), btc_error> { - assert!(block_hashes.len() > 0); + assert!(!block_hashes.is_empty()); let getdata_invs = block_hashes .iter() .map(|h| btc_message_blockdata::Inventory { diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 1ac16333d1..861baed580 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -132,7 +132,7 @@ impl FromColumn for Sha256dHash { } impl FromRow for BlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u32 = row.get_unwrap("version"); let prev_blockhash: Sha256dHash = Sha256dHash::from_column(row, "prev_blockhash")?; let merkle_root: Sha256dHash = Sha256dHash::from_column(row, "merkle_root")?; @@ -225,7 +225,7 @@ impl SpvClient { &mut self.headers_db } - pub fn tx_begin<'a>(&'a mut self) -> Result, btc_error> { + pub fn tx_begin(&mut self) -> Result, btc_error> { if !self.readwrite { return Err(db_error::ReadOnly.into()); } @@ -529,7 +529,7 @@ impl SpvClient { headers: &Vec, check_txcount: bool, ) -> Result<(), btc_error> { - if headers.len() == 0 { + if headers.is_empty() { return Ok(()); } @@ -741,8 +741,8 @@ impl SpvClient { } /// Insert a block header - fn insert_block_header<'a>( - tx: &mut DBTx<'a>, + fn insert_block_header( + tx: &mut DBTx<'_>, header: BlockHeader, height: u64, ) -> Result<(), btc_error> { @@ -945,7 +945,7 @@ impl SpvClient { ) -> Result<(), btc_error> { assert!(self.readwrite, "SPV header DB is open read-only"); - if block_headers.len() == 0 { + if block_headers.is_empty() { // no-op return Ok(()); } @@ -996,7 +996,7 @@ impl SpvClient { block_headers: Vec, ) -> Result<(), btc_error> { assert!(self.readwrite, "SPV header DB is open read-only"); - if block_headers.len() == 0 { + if block_headers.is_empty() { // no-op return Ok(()); } @@ -1137,7 +1137,7 @@ impl SpvClient { ]); let max_target_bits = BlockHeader::compact_target_from_u256(&max_target); - let parent_header = if headers_in_range.len() > 0 { + let parent_header = if !headers_in_range.is_empty() { headers_in_range[0] } else { match self.read_block_header(current_header_height - 1)? { diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index b688097d70..caeefe538c 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -130,7 +130,7 @@ impl BurnchainStateTransition { block_total_burns.sort(); - if block_total_burns.len() == 0 { + if block_total_burns.is_empty() { return Some(0); } else if block_total_burns.len() == 1 { return Some(block_total_burns[0]); diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index dfc7e8c08a..1f42881ac2 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -84,7 +84,7 @@ pub struct BlockCommitMetadata { } impl FromColumn for AffirmationMap { - fn from_column<'a>(row: &'a Row, col_name: &str) -> Result { + fn from_column(row: &Row, col_name: &str) -> Result { let txt: String = row.get_unwrap(col_name); let am = AffirmationMap::decode(&txt).ok_or(DBError::ParseError)?; Ok(am) @@ -92,13 +92,13 @@ impl FromColumn for AffirmationMap { } impl FromRow for AffirmationMap { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { AffirmationMap::from_column(row, "affirmation_map") } } impl FromRow for BlockCommitMetadata { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let burn_block_hash = BurnchainHeaderHash::from_column(row, "burn_block_hash")?; let txid = Txid::from_column(row, "txid")?; let block_height = u64::from_column(row, "block_height")?; @@ -311,7 +311,7 @@ const BURNCHAIN_DB_INDEXES: &[&str] = &[ "CREATE INDEX IF NOT EXISTS index_block_commit_metadata_burn_block_hash_anchor_block ON block_commit_metadata(burn_block_hash,anchor_block);", ]; -impl<'a> BurnchainDBTransaction<'a> { +impl BurnchainDBTransaction<'_> { /// Store a burnchain block header into the burnchain database. /// Returns the row ID on success. pub(crate) fn store_burnchain_db_entry( @@ -451,7 +451,7 @@ impl<'a> BurnchainDBTransaction<'a> { }) .collect() }; - if commits.len() == 0 { + if commits.is_empty() { test_debug!("No block-commits for block {}", hdr.block_height); return Ok(()); } @@ -1103,7 +1103,7 @@ impl BurnchainDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> { + pub fn tx_begin(&mut self) -> Result, BurnchainError> { let sql_tx = tx_begin_immediate(&mut self.conn)?; Ok(BurnchainDBTransaction { sql_tx }) } diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index 12cfd1aac9..eaa872876e 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -246,7 +246,7 @@ pub fn make_simple_key_register( &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: next_txid(), vtxindex, @@ -389,16 +389,11 @@ pub fn make_reward_cycle_with_vote( ); if let Some(ref parent_commit) = parent_commits[i].as_ref() { + assert!(parent_commit.block_height != block_commit.block_height); assert!( - parent_commit.block_height as u64 != block_commit.block_height as u64 - ); - assert!( - parent_commit.block_height as u64 - == block_commit.parent_block_ptr as u64 - ); - assert!( - parent_commit.vtxindex as u64 == block_commit.parent_vtxindex as u64 + parent_commit.block_height == u64::from(block_commit.parent_block_ptr) ); + assert!(parent_commit.vtxindex == u32::from(block_commit.parent_vtxindex)); } parent_commits[i] = Some(block_commit.clone()); diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 8d72d4efa9..7f6be5bcf8 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -99,7 +99,7 @@ fn test_process_block_ops() { &hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap(), @@ -119,7 +119,7 @@ fn test_process_block_ops() { &hex_bytes("bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7").unwrap(), @@ -139,7 +139,7 @@ fn test_process_block_ops() { &hex_bytes("de8af7037e522e65d2fe2d63fb1b764bfea829df78b84444338379df13144a02").unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c").unwrap(), @@ -464,11 +464,8 @@ fn test_process_block_ops() { 123, )); - let initial_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); + let initial_snapshot = + BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); // process up to 124 { @@ -574,7 +571,7 @@ fn test_process_block_ops() { acc }); - let next_sortition = block_ops_124.len() > 0 && burn_total > 0; + let next_sortition = !block_ops_124.is_empty() && burn_total > 0; let mut block_124_snapshot = BlockSnapshot { accumulated_coinbase_ustx: 400_000_000, @@ -733,11 +730,8 @@ fn test_burn_snapshot_sequence() { // insert all operations let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); - let mut prev_snapshot = BlockSnapshot::initial( - first_block_height, - &first_burn_hash, - first_block_height as u64, - ); + let mut prev_snapshot = + BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); let mut all_stacks_block_hashes = vec![]; for i in 0..32 { diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 6c474bba44..c8f568b5bf 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -602,14 +602,14 @@ fn test_get_commit_at() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 1, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -656,13 +656,13 @@ fn test_get_commit_at() { assert_eq!(cmt, cmts[4]); // fork off the last stored commit block - let fork_hdr = BurnchainHeaderHash([90 as u8; 32]); + let fork_hdr = BurnchainHeaderHash([90; 32]); let fork_block_header = BurnchainBlockHeader { block_height: 5, block_hash: fork_hdr, - parent_block_hash: BurnchainHeaderHash([4 as u8; 32]), + parent_block_hash: BurnchainHeaderHash([4; 32]), num_txs: 1, - timestamp: 4 as u64, + timestamp: 4, }; let mut fork_cmt = cmts[4].clone(); @@ -716,14 +716,14 @@ fn test_get_set_check_anchor_block() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 1, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -802,14 +802,14 @@ fn test_update_block_descendancy() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -926,14 +926,14 @@ fn test_update_block_descendancy_with_fork() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; headers.push(block_header.clone()); @@ -943,14 +943,14 @@ fn test_update_block_descendancy_with_fork() { for i in 0..5 { let hdr = BurnchainHeaderHash([(i + 128 + 1) as u8; 32]); let block_header = BurnchainBlockHeader { - block_height: (first_height + i) as u64, + block_height: first_height + i, block_hash: hdr, parent_block_hash: parent_block_header .as_ref() .map(|blk| blk.block_hash.clone()) .unwrap_or(first_block_header.block_hash.clone()), num_txs: 3, - timestamp: i as u64, + timestamp: i, }; fork_headers.push(block_header.clone()); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 544acb65e3..ab3763dac0 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -178,7 +178,7 @@ impl TestMiner { } pub fn next_VRF_key(&mut self) -> VRFPrivateKey { - let pk = if self.vrf_keys.len() == 0 { + let pk = if self.vrf_keys.is_empty() { // first key is simply the 32-byte hash of the secret state let mut buf: Vec = vec![]; for i in 0..self.privks.len() { @@ -204,7 +204,7 @@ impl TestMiner { } pub fn next_microblock_privkey(&mut self) -> StacksPrivateKey { - let pk = if self.microblock_privks.len() == 0 { + let pk = if self.microblock_privks.is_empty() { // first key is simply the 32-byte hash of the secret state let mut buf: Vec = vec![]; for i in 0..self.privks.len() { @@ -644,7 +644,6 @@ impl TestBurnchainBlock { } pub fn mine_pox< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider, @@ -655,7 +654,7 @@ impl TestBurnchainBlock { &self, db: &mut SortitionDB, burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R, CE, FE, B>, + coord: &mut ChainsCoordinator<'_, T, N, R, CE, FE, B>, ) -> BlockSnapshot { let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); let parent_hdr = indexer @@ -783,7 +782,6 @@ impl TestBurnchainFork { } pub fn mine_pending_blocks_pox< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, R: RewardSetProvider, @@ -794,7 +792,7 @@ impl TestBurnchainFork { &mut self, db: &mut SortitionDB, burnchain: &Burnchain, - coord: &mut ChainsCoordinator<'a, T, N, R, CE, FE, B>, + coord: &mut ChainsCoordinator<'_, T, N, R, CE, FE, B>, ) -> BlockSnapshot { let mut snapshot = { let ic = db.index_conn(); @@ -858,7 +856,7 @@ fn process_next_sortition( let mut next_commits = vec![]; let mut next_prev_keys = vec![]; - if prev_keys.len() > 0 { + if !prev_keys.is_empty() { assert_eq!(miners.len(), prev_keys.len()); // make a Stacks block (hash) for each of the prior block's keys diff --git a/stackslib/src/chainstate/burn/db/mod.rs b/stackslib/src/chainstate/burn/db/mod.rs index cbee114603..9136a36f6d 100644 --- a/stackslib/src/chainstate/burn/db/mod.rs +++ b/stackslib/src/chainstate/burn/db/mod.rs @@ -53,7 +53,7 @@ impl_byte_array_from_column_only!(TrieHash); impl_byte_array_from_column_only!(MessageSignature); impl FromColumn for VRFPublicKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let pubkey_hex: String = row.get_unwrap(column_name); match VRFPublicKey::from_hex(&pubkey_hex) { Some(pubk) => Ok(pubk), @@ -63,7 +63,7 @@ impl FromColumn for VRFPublicKey { } impl FromColumn for StacksAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_string(&address_str) { Some(a) => Ok(a), @@ -73,14 +73,14 @@ impl FromColumn for StacksAddress { } impl FromColumn for PrincipalData { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); Self::parse(&address_str).map_err(|_| db_error::ParseError) } } impl FromColumn for PoxAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_db_string(&address_str) { Some(a) => Ok(a), @@ -90,7 +90,7 @@ impl FromColumn for PoxAddress { } impl FromColumn for BitcoinAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let address_str: String = row.get_unwrap(column_name); match Self::from_string(&address_str) { Some(a) => Ok(a), diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 0aacd2816a..17e2546389 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -35,7 +35,7 @@ use crate::chainstate::stacks::index::{Error as MARFError, MARFValue, MarfTrieId use crate::core::INITIAL_MINING_BONUS_WINDOW; use crate::util_lib::db::Error as DBError; -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { /// Run a blockstack operation's "check()" method and return the result. fn check_transaction( &mut self, @@ -379,7 +379,7 @@ mod tests { "a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a", ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index baef7211f8..74317ba639 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -88,7 +88,7 @@ use crate::util_lib::db::{ u64_to_sql, DBConn, DBTx, Error as db_error, FromColumn, FromRow, IndexDBConn, IndexDBTx, }; -const BLOCK_HEIGHT_MAX: u64 = ((1 as u64) << 63) - 1; +const BLOCK_HEIGHT_MAX: u64 = (1 << 63) - 1; pub const REWARD_WINDOW_START: u64 = 144 * 15; pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START; @@ -96,25 +96,25 @@ pub const REWARD_WINDOW_END: u64 = 144 * 90 + REWARD_WINDOW_START; pub type BlockHeaderCache = HashMap, ConsensusHash)>; impl FromRow for SortitionId { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { SortitionId::from_column(row, "sortition_id") } } impl FromRow for ConsensusHash { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { ConsensusHash::from_column(row, "consensus_hash") } } impl FromRow for BurnchainHeaderHash { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { BurnchainHeaderHash::from_column(row, "burn_header_hash") } } impl FromRow for MissedBlockCommit { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?; let input_json: String = row.get_unwrap("input"); let input = @@ -130,7 +130,7 @@ impl FromRow for MissedBlockCommit { } impl FromRow for BlockSnapshot { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let block_height = u64::from_column(row, "block_height")?; let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; let burn_header_timestamp = u64::from_column(row, "burn_header_timestamp")?; @@ -211,7 +211,7 @@ impl FromRow for BlockSnapshot { } impl FromRow for LeaderKeyRegisterOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -240,7 +240,7 @@ impl FromRow for LeaderKeyRegisterOp { } impl FromRow for LeaderBlockCommitOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -314,7 +314,7 @@ impl FromRow for LeaderBlockCommitOp { } impl FromRow for StackStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -357,7 +357,7 @@ impl FromRow for StackStxOp { } impl FromRow for DelegateStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -389,7 +389,7 @@ impl FromRow for DelegateStxOp { } impl FromRow for TransferStxOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -417,7 +417,7 @@ impl FromRow for TransferStxOp { } impl FromRow for VoteForAggregateKeyOp { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); let block_height = u64::from_column(row, "block_height")?; @@ -450,7 +450,7 @@ impl FromRow for VoteForAggregateKeyOp { } impl FromColumn for ASTRules { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: u8 = row.get_unwrap(column_name); let ast_rules = ASTRules::from_u8(x).ok_or(db_error::ParseError)?; Ok(ast_rules) @@ -458,7 +458,7 @@ impl FromColumn for ASTRules { } impl FromRow<(ASTRules, u64)> for (ASTRules, u64) { - fn from_row<'a>(row: &'a Row) -> Result<(ASTRules, u64), db_error> { + fn from_row(row: &Row) -> Result<(ASTRules, u64), db_error> { let ast_rules = ASTRules::from_column(row, "ast_rule_id")?; let height = u64::from_column(row, "block_height")?; Ok((ast_rules, height)) @@ -479,7 +479,7 @@ pub struct InitialMiningBonus { } impl FromRow for AcceptedStacksBlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let tip_consensus_hash = ConsensusHash::from_column(row, "tip_consensus_hash")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let block_hash = BlockHeaderHash::from_column(row, "stacks_block_hash")?; @@ -495,7 +495,7 @@ impl FromRow for AcceptedStacksBlockHeader { } impl FromRow for StacksEpoch { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let epoch_id_u32: u32 = row.get_unwrap("epoch_id"); let epoch_id = StacksEpochId::try_from(epoch_id_u32).map_err(|_| db_error::ParseError)?; @@ -1533,7 +1533,7 @@ impl SortitionHandle for SortitionHandleConn<'_> { } } -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { pub fn set_stacks_block_accepted( &mut self, consensus_hash: &ConsensusHash, @@ -1604,7 +1604,7 @@ impl<'a> SortitionHandleTx<'a> { anchor_block, reward_set.rewarded_addresses.len() ); - if reward_set.rewarded_addresses.len() == 0 { + if reward_set.rewarded_addresses.is_empty() { return Ok(None); } @@ -2646,7 +2646,7 @@ impl<'a> SortitionHandleConn<'a> { // Connection methods impl SortitionDB { /// Begin a transaction. - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } @@ -2663,7 +2663,7 @@ impl SortitionDB { } /// Make an indexed connection - pub fn index_conn<'a>(&'a self) -> SortitionDBConn<'a> { + pub fn index_conn(&self) -> SortitionDBConn<'_> { SortitionDBConn::new( &self.marf, SortitionDBTxContext { @@ -2739,7 +2739,7 @@ impl SortitionDB { )) } - pub fn conn<'a>(&'a self) -> &'a Connection { + pub fn conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -3556,8 +3556,8 @@ impl SortitionDB { } #[cfg(any(test, feature = "testing"))] - pub fn override_ast_rule_height<'a>( - tx: &mut DBTx<'a>, + pub fn override_ast_rule_height( + tx: &mut DBTx<'_>, ast_rules: ASTRules, height: u64, ) -> Result<(), db_error> { @@ -3587,7 +3587,7 @@ impl SortitionDB { NO_PARAMS, )?; - assert!(ast_rule_sets.len() > 0); + assert!(!ast_rule_sets.is_empty()); let mut last_height = ast_rule_sets[0].1; let mut last_rules = ast_rule_sets[0].0; for (ast_rules, ast_rule_height) in ast_rule_sets.into_iter() { @@ -3699,7 +3699,7 @@ impl SortitionDB { } } -impl<'a> SortitionDBTx<'a> { +impl SortitionDBTx<'_> { pub fn find_sortition_tip_affirmation_map( &mut self, chain_tip: &SortitionId, @@ -3720,7 +3720,7 @@ impl<'a> SortitionDBTx<'a> { } } -impl<'a> SortitionDBConn<'a> { +impl SortitionDBConn<'_> { pub fn as_handle<'b>(&'b self, chain_tip: &SortitionId) -> SortitionHandleConn<'b> { SortitionHandleConn { index: self.index, @@ -4049,7 +4049,7 @@ impl SortitionDB { fn parse_last_anchor_block_hash(s: Option) -> Option { s.map(|s| { - if s == "" { + if s.is_empty() { None } else { Some(BlockHeaderHash::from_hex(&s).expect("BUG: Bad BlockHeaderHash stored in DB")) @@ -4060,7 +4060,7 @@ impl SortitionDB { fn parse_last_anchor_block_txid(s: Option) -> Option { s.map(|s| { - if s == "" { + if s.is_empty() { None } else { Some(Txid::from_hex(&s).expect("BUG: Bad Txid stored in DB")) @@ -4593,10 +4593,10 @@ impl SortitionDB { // remove the first entry -- it's always `n` based on the way we construct it, while the // heaviest affirmation map just has nothing. - if am.len() > 0 { - Ok(AffirmationMap::new(am.as_slice()[1..].to_vec())) - } else { + if am.is_empty() { Ok(AffirmationMap::empty()) + } else { + Ok(AffirmationMap::new(am.as_slice()[1..].to_vec())) } } @@ -4719,7 +4719,7 @@ impl SortitionDB { } /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. - pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { + pub fn index_handle_at_tip(&self) -> SortitionHandleConn<'_> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } @@ -4737,7 +4737,7 @@ impl SortitionDB { /// Open a tx handle at the burn chain tip /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. - pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { + pub fn tx_begin_at_tip(&mut self) -> SortitionHandleTx<'_> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() } @@ -5239,7 +5239,7 @@ impl SortitionDB { cache: &mut BlockHeaderCache, header_data: &Vec<(ConsensusHash, Option)>, ) { - if header_data.len() > 0 { + if !header_data.is_empty() { let mut i = header_data.len() - 1; while i > 0 { let cur_consensus_hash = &header_data[i].0; @@ -5394,7 +5394,7 @@ impl SortitionDB { } } -impl<'a> SortitionHandleTx<'a> { +impl SortitionHandleTx<'_> { /// Append a snapshot to a chain tip, and update various chain tip statistics. /// Returns the new state root of this fork. /// `initialize_bonus` - if Some(..), then this snapshot is the first mined snapshot, @@ -5881,7 +5881,7 @@ impl<'a> SortitionHandleTx<'a> { "SELECT 1 FROM snapshots WHERE burn_header_hash = ?1 AND pox_valid = 1 LIMIT 1", &[&snapshot.burn_header_hash], )?; - if all_valid_sortitions.len() > 0 { + if !all_valid_sortitions.is_empty() { error!("FATAL: Tried to insert snapshot {:?}, but already have pox-valid sortition for {:?}", &snapshot, &snapshot.burn_header_hash); panic!(); } @@ -6118,7 +6118,10 @@ impl<'a> SortitionHandleTx<'a> { if let Some(mut reward_set) = reward_info.known_selected_anchor_block_owned() { // record payouts separately from the remaining addresses, since some of them // could have just been consumed. - if reward_set.rewarded_addresses.len() > 0 { + if reward_set.rewarded_addresses.is_empty() { + // no payouts + pox_payout_addrs = vec![]; + } else { // if we have a reward set, then we must also have produced a recipient // info for this block let mut recipients_to_remove: Vec<_> = recipient_info @@ -6136,9 +6139,6 @@ impl<'a> SortitionHandleTx<'a> { "BUG: Attempted to remove used address from reward set, but failed to do so safely"); } pox_payout_addrs = addrs; - } else { - // no payouts - pox_payout_addrs = vec![]; } keys.push(db_keys::pox_reward_set_size().to_string()); @@ -6321,7 +6321,7 @@ impl<'a> SortitionHandleTx<'a> { } } - if tied.len() == 0 { + if tied.is_empty() { return None; } if tied.len() == 1 { @@ -6608,7 +6608,7 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleTx<'a> { + impl SortitionHandleTx<'_> { /// Update the canonical Stacks tip (testing only) pub fn test_update_canonical_stacks_tip( &mut self, @@ -7066,7 +7066,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7145,7 +7145,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7369,7 +7369,7 @@ pub mod tests { ) .unwrap(), public_key: public_key.clone(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -7422,7 +7422,7 @@ pub mod tests { for i in 0..255 { let sortition_id = SortitionId([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, i, ]); let parent_sortition_id = if i == 0 { last_snapshot.sortition_id.clone() @@ -7459,7 +7459,7 @@ pub mod tests { 0, 0, 0, - i - 1 as u8, + i - 1, ]) }; @@ -7471,7 +7471,7 @@ pub mod tests { burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: BurnchainHeaderHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, 0, i, ]) .unwrap(), sortition_id, @@ -7508,7 +7508,7 @@ pub mod tests { 0, 0, 0, - (if i == 0 { 0xff } else { i - 1 }) as u8, + (if i == 0 { 0xff } else { i - 1 }), ]) .unwrap(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -7531,12 +7531,12 @@ pub mod tests { 0, 0, 0, - (i + 1) as u8, + i + 1, ]) .unwrap(), ops_hash: OpsHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, i as u8, + 0, 0, 0, 0, 0, 0, i, ]) .unwrap(), total_burn: i as u64, @@ -7717,7 +7717,7 @@ pub mod tests { let snapshot_row = BlockSnapshot { accumulated_coinbase_ustx: 0, pox_valid: true, - block_height: i as u64 + 1, + block_height: i + 1, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: BurnchainHeaderHash::from_bytes(&[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -7789,7 +7789,7 @@ pub mod tests { 0, 0, 0, 0, 0, 0, i as u8, ]) .unwrap(), - total_burn: i as u64, + total_burn: i, sortition: true, sortition_hash: SortitionHash::initial(), winning_block_txid: Txid::from_hex( @@ -7801,7 +7801,7 @@ pub mod tests { ) .unwrap(), index_root: TrieHash::from_empty_data(), - num_sortitions: i as u64 + 1, + num_sortitions: i + 1, stacks_block_accepted: false, stacks_block_height: 0, arrival_index: 0, @@ -7824,7 +7824,7 @@ pub mod tests { last_snapshot = snapshot_row; last_snapshot.index_root = index_root; // should succeed within the tx - let ch = tx.get_consensus_at(i as u64 + 1).unwrap().unwrap(); + let ch = tx.get_consensus_at(i + 1).unwrap().unwrap(); assert_eq!(ch, last_snapshot.consensus_hash); tx.commit().unwrap(); @@ -7864,7 +7864,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -10080,7 +10080,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -10635,10 +10635,10 @@ pub mod tests { .map(|op| BlockstackOperationType::LeaderBlockCommit(op.clone())) }) .collect(); - let winner = if commit_set.len() > 0 { - commit_set[0].clone() - } else { + let winner = if commit_set.is_empty() { None + } else { + commit_set[0].clone() }; let burn_header_hash = headers[i + 1].block_hash.clone(); let burn_block_height = headers[i + 1].block_height; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 7b1f13040c..0d94c7e78d 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -366,7 +366,7 @@ impl BurnSamplePoint { /// Calculate the ranges between 0 and 2**256 - 1 over which each point in the burn sample /// applies, so we can later select which block to use. fn make_sortition_ranges(burn_sample: &mut Vec) { - if burn_sample.len() == 0 { + if burn_sample.is_empty() { // empty sample return; } @@ -818,7 +818,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -843,7 +843,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -868,7 +868,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("eb54704f71d4a2d1128d60ffccced547054b52250ada6f3e7356165714f44d4c") diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index d13ad4980b..4156375a5a 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -62,13 +62,13 @@ impl_byte_array_newtype!(SortitionHash, u8, 32); #[derive(Debug, Clone, PartialEq)] #[repr(u8)] pub enum Opcodes { - LeaderBlockCommit = '[' as u8, - LeaderKeyRegister = '^' as u8, - StackStx = 'x' as u8, - PreStx = 'p' as u8, - TransferStx = '$' as u8, - DelegateStx = '#' as u8, - VoteForAggregateKey = 'v' as u8, + LeaderBlockCommit = b'[', + LeaderKeyRegister = b'^', + StackStx = b'x', + PreStx = b'p', + TransferStx = b'$', + DelegateStx = b'#', + VoteForAggregateKey = b'v', } // a burnchain block snapshot @@ -350,8 +350,8 @@ impl ConsensusHashExtensions for ConsensusHash { ) -> Result, db_error> { let mut i = 0; let mut prev_chs = vec![]; - while i < 64 && block_height - (((1 as u64) << i) - 1) >= first_block_height { - let prev_block: u64 = block_height - (((1 as u64) << i) - 1); + while i < 64 && block_height - ((1 << i) - 1) >= first_block_height { + let prev_block: u64 = block_height - ((1 << i) - 1); let prev_ch = sort_tx .get_consensus_at(prev_block) .unwrap_or_else(|_| { @@ -366,7 +366,7 @@ impl ConsensusHashExtensions for ConsensusHash { prev_chs.push(prev_ch.clone()); i += 1; - if block_height < (((1 as u64) << i) - 1) { + if block_height < ((1 << i) - 1) { break; } } diff --git a/stackslib/src/chainstate/burn/operations/delegate_stx.rs b/stackslib/src/chainstate/burn/operations/delegate_stx.rs index 130a42784b..ad5c268878 100644 --- a/stackslib/src/chainstate/burn/operations/delegate_stx.rs +++ b/stackslib/src/chainstate/burn/operations/delegate_stx.rs @@ -136,7 +136,7 @@ impl DelegateStxOp { return Err(op_error::InvalidInput); } - if outputs.len() == 0 { + if outputs.is_empty() { warn!( "Invalid tx: inputs: {}, outputs: {}", tx.num_signers(), @@ -230,24 +230,24 @@ impl StacksMessageCodec for DelegateStxOp { .map_err(|e| codec_error::WriteError(e))?; if let Some((index, _)) = self.reward_addr { - fd.write_all(&(1 as u8).to_be_bytes()) + fd.write_all(&1_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; fd.write_all(&index.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } else { - fd.write_all(&(0 as u8).to_be_bytes()) + fd.write_all(&0_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(&(0 as u32).to_be_bytes()) + fd.write_all(&0_u32.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } if let Some(height) = self.until_burn_height { - fd.write_all(&(1 as u8).to_be_bytes()) + fd.write_all(&1_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; fd.write_all(&height.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } else { - fd.write_all(&(0 as u8).to_be_bytes()) + fd.write_all(&0_u8.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; } Ok(()) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cf85a02829..136e4d4a75 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -293,7 +293,7 @@ impl LeaderBlockCommitOp { return Err(op_error::InvalidInput); } - if outputs.len() == 0 { + if outputs.is_empty() { warn!( "Invalid tx: inputs: {}, outputs: {}", tx.num_signers(), @@ -832,7 +832,7 @@ impl LeaderBlockCommitOp { /// Check the epoch marker in a block-commit to make sure it matches the right epoch. /// Valid in Stacks 2.05+ fn check_epoch_commit_marker(&self, marker: u8) -> Result<(), op_error> { - if self.memo.len() < 1 { + if self.memo.is_empty() { debug!( "Invalid block commit"; "reason" => "no epoch marker byte given", @@ -860,7 +860,7 @@ impl LeaderBlockCommitOp { } StacksEpochId::Epoch20 => { // no-op, but log for helping node operators watch for old nodes - if self.memo.len() < 1 { + if self.memo.is_empty() { debug!( "Soon-to-be-invalid block commit"; "reason" => "no epoch marker byte given", @@ -1960,7 +1960,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -1982,7 +1982,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -2070,7 +2070,7 @@ mod tests { block_height: (i + 1 + first_block_height as usize) as u64, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: block_header_hashes[i].clone(), - sortition_id: SortitionId(block_header_hashes[i as usize].0.clone()), + sortition_id: SortitionId(block_header_hashes[i].0.clone()), parent_sortition_id: prev_snapshot.sortition_id.clone(), parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -2500,7 +2500,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -2522,7 +2522,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("9410df84e2b440055c33acb075a0687752df63fe8fe84aeec61abe469f0448c7") @@ -2605,7 +2605,7 @@ mod tests { block_height: (i + 1 + first_block_height as usize) as u64, burn_header_timestamp: get_epoch_time_secs(), burn_header_hash: block_header_hashes[i].clone(), - sortition_id: SortitionId(block_header_hashes[i as usize].0.clone()), + sortition_id: SortitionId(block_header_hashes[i].0.clone()), parent_sortition_id: prev_snapshot.sortition_id.clone(), parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(), consensus_hash: ConsensusHash::from_bytes(&[ @@ -3558,7 +3558,7 @@ mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid([0x01; 32]), vtxindex: 456, block_height: first_block_height + 1, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index a0406544f1..f4d9d4ddd9 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -284,7 +284,7 @@ pub mod tests { result: Some(LeaderKeyRegisterOp { consensus_hash: ConsensusHash::from_bytes(&hex_bytes("2222222222222222222222222222222222222222").unwrap()).unwrap(), public_key: VRFPublicKey::from_bytes(&hex_bytes("a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a").unwrap()).unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be(&hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562").unwrap()).unwrap(), vtxindex, @@ -491,7 +491,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") @@ -627,7 +627,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes( @@ -656,7 +656,7 @@ pub mod tests { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes( diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 67de678659..5d12c5e67f 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -119,7 +119,7 @@ impl PreStxOp { }; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let output = outputs[0] .as_ref() @@ -317,7 +317,7 @@ impl StackStxOp { })?; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let first_output = outputs[0].as_ref().ok_or_else(|| { warn!("Invalid tx: failed to decode first output"); @@ -869,7 +869,7 @@ mod tests { auth_id: Some(0u32), }; let op_bytes = { - let mut bytes = ['T' as u8, '3' as u8].to_vec(); + let mut bytes = [b'T', b'3'].to_vec(); op.consensus_serialize(&mut bytes) .expect("Expected to be able to serialize op into bytes"); bytes diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d1d562d9c..a36849518e 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -177,7 +177,7 @@ impl TransferStxOp { })?; let outputs = tx.get_recipients(); - assert!(outputs.len() > 0); + assert!(!outputs.is_empty()); let output = outputs[0] .as_ref() diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 648859abc6..3e547366cf 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -340,7 +340,7 @@ mod tests { assert_eq!(vote_op.signer_index, signer_index); assert_eq!(&vote_op.aggregate_key, &aggregate_key); - assert_eq!(vote_op.round, round as u32); + assert_eq!(vote_op.round, round); assert_eq!(vote_op.reward_cycle, reward_cycle); } diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 2187cb736c..6b357eece1 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -132,7 +132,7 @@ impl BlockSnapshot { VRF_seed: &VRFSeed, sortition_hash: &SortitionHash, ) -> Option { - if dist.len() == 0 { + if dist.is_empty() { // no winners return None; } @@ -592,7 +592,7 @@ impl BlockSnapshot { ) }; - if state_transition.burn_dist.len() == 0 { + if state_transition.burn_dist.is_empty() { // no burns happened debug!( "No burns happened in block"; @@ -1099,18 +1099,18 @@ mod test { for i in 0..100 { let header = BurnchainBlockHeader { block_height: prev_block_header.block_height + 1, - block_hash: BurnchainHeaderHash([i as u8; 32]), + block_hash: BurnchainHeaderHash([i; 32]), parent_block_hash: prev_block_header.block_hash.clone(), num_txs: 0, - timestamp: prev_block_header.timestamp + (i as u64) + 1, + timestamp: prev_block_header.timestamp + u64::from(i) + 1, }; - let sortition_hash = SortitionHash([i as u8; 32]); + let sortition_hash = SortitionHash([i; 32]); let commit_winner = LeaderBlockCommitOp { sunset_burn: 0, - block_header_hash: BlockHeaderHash([i as u8; 32]), - new_seed: VRFSeed([i as u8; 32]), + block_header_hash: BlockHeaderHash([i; 32]), + new_seed: VRFSeed([i; 32]), parent_block_ptr: 0, parent_vtxindex: 0, key_block_ptr: 0, @@ -1120,11 +1120,11 @@ mod test { burn_fee: 100, input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner(format!("signer {}", i)), - txid: Txid([i as u8; 32]), + apparent_sender: BurnchainSigner(format!("signer {i}")), + txid: Txid([i; 32]), vtxindex: 0, block_height: header.block_height, - burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_parent_modulus: i % BURN_BLOCK_MINED_AT_MODULUS as u8, burn_header_hash: header.block_hash.clone(), treatment: vec![], }; diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 209c6b8ef0..122aedbefb 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -326,7 +326,7 @@ impl OnChainRewardSetProvider<'static, DummyEventDispatcher> { } } -impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider<'a, T> { +impl RewardSetProvider for OnChainRewardSetProvider<'_, T> { fn get_reward_set( &self, cycle_start_burn_height: u64, @@ -394,7 +394,7 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider } } -impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { +impl OnChainRewardSetProvider<'_, T> { fn get_reward_set_epoch2( &self, // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` @@ -634,12 +634,12 @@ impl< } } -impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader> - ChainsCoordinator<'a, T, (), U, (), (), B> +impl + ChainsCoordinator<'_, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None #[cfg(test)] - pub fn test_new( + pub fn test_new<'a>( burnchain: &Burnchain, chain_id: u32, path: &str, @@ -659,7 +659,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader /// Create a coordinator for testing allowing for all configurable params #[cfg(test)] - pub fn test_new_full( + pub fn test_new_full<'a>( burnchain: &Burnchain, chain_id: u32, path: &str, @@ -910,7 +910,7 @@ pub fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { let mut burn_amt = 0; for op in ops.iter() { if let BlockstackOperationType::LeaderBlockCommit(commit) = op { - if commit.commit_outs.len() == 0 { + if commit.commit_outs.is_empty() { continue; } let amt_per_address = commit.burn_fee / (commit.commit_outs.len() as u64); @@ -1112,14 +1112,13 @@ pub fn static_get_stacks_tip_affirmation_map( } impl< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider, CE: CostEstimator + ?Sized, FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, - > ChainsCoordinator<'a, T, N, U, CE, FE, B> + > ChainsCoordinator<'_, T, N, U, CE, FE, B> { /// Process new Stacks blocks. If we get stuck for want of a missing PoX anchor block, return /// its hash. @@ -2773,7 +2772,7 @@ impl< } sortition_db_handle.commit()?; - if unorphan_blocks.len() > 0 { + if !unorphan_blocks.is_empty() { revalidated_stacks_block = true; let ic = self.sortition_db.index_conn(); let mut chainstate_db_tx = self.chain_state_db.db_tx_begin()?; @@ -3104,7 +3103,7 @@ impl< } } - if !found && staging_block_chs.len() > 0 { + if !found && !staging_block_chs.is_empty() { // we have seen this block before, but in a different consensus fork. // queue it for re-processing -- it might still be valid if it's in a reward // cycle that exists on the new PoX fork. diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index f203ea5e28..0863708122 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -991,7 +991,7 @@ fn make_stacks_block_with_input( parent_vtxindex, txid: next_txid(), - vtxindex: (1 + key_index) as u32, + vtxindex: 1 + key_index, block_height: 0, burn_parent_modulus: (BURN_BLOCK_MINED_AT_MODULUS - 1) as u8, burn_header_hash: BurnchainHeaderHash([0; 32]), diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 1bb5e44192..c6dd44ac39 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -84,7 +84,7 @@ macro_rules! inf_or_debug { }) } -impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { +impl OnChainRewardSetProvider<'_, T> { /// Read a reward_set written while updating .signers /// `debug_log` should be set to true if the reward set loading should /// log messages as `debug!` instead of `error!` or `info!`. This allows @@ -615,14 +615,13 @@ pub fn get_nakamoto_next_recipients( } impl< - 'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider, CE: CostEstimator + ?Sized, FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, - > ChainsCoordinator<'a, T, N, U, CE, FE, B> + > ChainsCoordinator<'_, T, N, U, CE, FE, B> { /// Get the first nakamoto reward cycle fn get_first_nakamoto_reward_cycle(&self) -> u64 { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0525717981..e0b3375452 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -82,7 +82,7 @@ use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; use crate::util_lib::strings::StacksString; -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_blocks_at_height(&self, height: u64) -> Vec { let sql = "SELECT data FROM nakamoto_staging_blocks WHERE height = ?1"; let args = rusqlite::params![&u64_to_sql(height).unwrap()]; @@ -568,7 +568,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } -impl<'a> TestPeer<'a> { +impl TestPeer<'_> { pub fn mine_single_block_tenure( &mut self, sender_key: &StacksPrivateKey, @@ -625,7 +625,7 @@ impl<'a> TestPeer<'a> { &mut test_signers, miner_setup, |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { + if blocks_so_far.is_empty() { let stx_transfer = make_token_transfer( chainstate, sortdb, @@ -1005,7 +1005,7 @@ fn block_info_tests(use_primary_testnet: bool) { let (last_2x_block_id, last_2x_block_ht) = get_tip_info(&mut peer); peer.mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() > 0 { + if !blocks_so_far.is_empty() { return vec![]; } info!("Producing first nakamoto block, publishing our three contracts"); @@ -2318,9 +2318,9 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a debug!("{}: {:?}", i, &matured_reward); if i < 10 { - assert_eq!(matured_reward.parent_miner.coinbase, 3600_000_000); + assert_eq!(matured_reward.parent_miner.coinbase, 3_600_000_000); } else { - assert_eq!(matured_reward.parent_miner.coinbase, 1000_000_000); + assert_eq!(matured_reward.parent_miner.coinbase, 1_000_000_000); } if i < 11 { @@ -2353,9 +2353,9 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let miner_reward = &matured_reward.latest_miners[0]; if i < 9 { - assert_eq!(miner_reward.coinbase, 3600_000_000); + assert_eq!(miner_reward.coinbase, 3_600_000_000); } else { - assert_eq!(miner_reward.coinbase, 1000_000_000); + assert_eq!(miner_reward.coinbase, 1_000_000_000); } if i < 10 { // epoch2 @@ -3243,7 +3243,7 @@ fn test_stacks_on_burnchain_ops() { until_burn_height: None, // mocked - txid: Txid([i as u8; 32]), + txid: Txid([i; 32]), vtxindex: 11, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3263,7 +3263,7 @@ fn test_stacks_on_burnchain_ops() { auth_id: Some(i as u32), // mocked - txid: Txid([(i as u8) | 0x80; 32]), + txid: Txid([i | 0x80; 32]), vtxindex: 12, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3275,7 +3275,7 @@ fn test_stacks_on_burnchain_ops() { memo: vec![0x2], // mocked - txid: Txid([(i as u8) | 0x40; 32]), + txid: Txid([i | 0x40; 32]), vtxindex: 13, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3294,7 +3294,7 @@ fn test_stacks_on_burnchain_ops() { )), // mocked - txid: Txid([(i as u8) | 0xc0; 32]), + txid: Txid([i | 0xc0; 32]), vtxindex: 14, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), @@ -3307,7 +3307,7 @@ fn test_stacks_on_burnchain_ops() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - bitpatterns.insert(consensus_hash.clone(), i as u8); + bitpatterns.insert(consensus_hash.clone(), i); tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); @@ -3337,11 +3337,11 @@ fn test_stacks_on_burnchain_ops() { .unwrap(); let mut expected_burnchain_txids = HashSet::new(); - for j in (i as u64).saturating_sub(6)..i { - expected_burnchain_txids.insert(Txid([j as u8; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0x80; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0x40; 32])); - expected_burnchain_txids.insert(Txid([(j as u8) | 0xc0; 32])); + for j in i.saturating_sub(6)..i { + expected_burnchain_txids.insert(Txid([j; 32])); + expected_burnchain_txids.insert(Txid([j | 0x80; 32])); + expected_burnchain_txids.insert(Txid([j | 0x40; 32])); + expected_burnchain_txids.insert(Txid([j | 0xc0; 32])); } assert_eq!(processed_burnchain_txids, expected_burnchain_txids); @@ -3441,7 +3441,7 @@ fn test_stacks_on_burnchain_ops() { sort_tip.consensus_hash ); assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); - assert_eq!(highest_tenure.coinbase_height, 12 + i); + assert_eq!(highest_tenure.coinbase_height, 12 + u64::from(i)); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); assert_eq!( highest_tenure.num_blocks_confirmed, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 22172795c2..8ae062ed00 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1098,7 +1098,7 @@ impl NakamotoBlock { /// Return Some(tenure-change-payload) if it's a tenure change /// Return None if not pub fn try_get_tenure_change_payload(&self) -> Option<&TenureChangePayload> { - if self.txs.len() == 0 { + if self.txs.is_empty() { return None; } if let TransactionPayload::TenureChange(ref tc) = &self.txs[0].payload { @@ -1145,7 +1145,7 @@ impl NakamotoBlock { }) .collect::>(); - if tenure_change_positions.len() == 0 { + if tenure_change_positions.is_empty() { return Ok(false); } @@ -1246,7 +1246,7 @@ impl NakamotoBlock { }) .collect::>(); - if coinbase_positions.len() == 0 && tenure_change_positions.len() == 0 { + if coinbase_positions.is_empty() && tenure_change_positions.is_empty() { // can't be a first block in a tenure return Ok(false); } @@ -1264,7 +1264,7 @@ impl NakamotoBlock { return Err(()); } - if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { + if coinbase_positions.len() == 1 && tenure_change_positions.is_empty() { // coinbase unaccompanied by a tenure change warn!("Invalid block -- have coinbase without tenure change"; "consensus_hash" => %self.header.consensus_hash, @@ -1274,7 +1274,7 @@ impl NakamotoBlock { return Err(()); } - if coinbase_positions.len() == 0 && tenure_change_positions.len() == 1 { + if coinbase_positions.is_empty() && tenure_change_positions.len() == 1 { // this is possibly a block with a tenure-extend transaction. // It must be the first tx if tenure_change_positions[0] != 0 { @@ -1864,11 +1864,11 @@ impl NakamotoChainState { /// /// It returns Err(..) on DB error, or if the child block does not connect to the parent. /// The caller should keep calling this until it gets Ok(None) - pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( + pub fn process_next_nakamoto_block( stacks_chain_state: &mut StacksChainState, sort_db: &mut SortitionDB, canonical_sortition_tip: &SortitionId, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result, ChainstateError> { #[cfg(test)] fault_injection::stall_block_processing(); @@ -3032,7 +3032,7 @@ impl NakamotoChainState { let args = params![tenure_start_block_id]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { - if bytes.len() == 0 { + if bytes.is_empty() { // no VRF proof return Ok(None); } @@ -4564,7 +4564,7 @@ impl NakamotoChainState { } // if any, append auto unlock events to the coinbase receipt - if auto_unlock_events.len() > 0 { + if !auto_unlock_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 025f4b49dc..dad10f62e0 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -750,7 +750,7 @@ impl NakamotoBlockBuilder { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not @@ -812,7 +812,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } } -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { /// Add a shadow block. /// Fails if there are any non-shadow blocks present in the tenure. pub fn add_shadow_block(&self, shadow_block: &NakamotoBlock) -> Result<(), ChainstateError> { diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0ac5ddcfde..9190bf99af 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -185,8 +185,8 @@ impl NakamotoStagingBlocksConn { pub struct NakamotoStagingBlocksConnRef<'a>(&'a rusqlite::Connection); -impl<'a> NakamotoStagingBlocksConnRef<'a> { - pub fn conn(&self) -> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { + pub fn conn(&self) -> NakamotoStagingBlocksConnRef<'_> { NakamotoStagingBlocksConnRef(self.0) } } @@ -200,7 +200,7 @@ impl Deref for NakamotoStagingBlocksConnRef<'_> { pub struct NakamotoStagingBlocksTx<'a>(rusqlite::Transaction<'a>); -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { pub fn commit(self) -> Result<(), rusqlite::Error> { self.0.commit() } @@ -217,17 +217,17 @@ impl<'a> Deref for NakamotoStagingBlocksTx<'a> { } } -impl<'a> DerefMut for NakamotoStagingBlocksTx<'a> { +impl DerefMut for NakamotoStagingBlocksTx<'_> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// Open a Blob handle to a Nakamoto block -fn inner_open_nakamoto_block<'a>( - conn: &'a Connection, +fn inner_open_nakamoto_block( + conn: &Connection, rowid: i64, readwrite: bool, -) -> Result, ChainstateError> { +) -> Result, ChainstateError> { let blob = conn.blob_open( rusqlite::DatabaseName::Main, "nakamoto_staging_blocks", @@ -240,11 +240,11 @@ fn inner_open_nakamoto_block<'a>( impl NakamotoStagingBlocksConn { /// Open a Blob handle to a Nakamoto block - pub fn open_nakamoto_block<'a>( - &'a self, + pub fn open_nakamoto_block( + &self, rowid: i64, readwrite: bool, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { inner_open_nakamoto_block(self.deref(), rowid, readwrite) } } @@ -511,7 +511,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } } -impl<'a> NakamotoStagingBlocksTx<'a> { +impl NakamotoStagingBlocksTx<'_> { /// Notify the staging database that a given stacks block has been processed. /// This will update the attachable status for children blocks, as well as marking the stacks /// block itself as processed. @@ -689,17 +689,15 @@ impl<'a> NakamotoStagingBlocksTx<'a> { impl StacksChainState { /// Begin a transaction against the staging blocks DB. /// Note that this DB is (or will eventually be) in a separate database from the headers. - pub fn staging_db_tx_begin<'a>( - &'a mut self, - ) -> Result, ChainstateError> { + pub fn staging_db_tx_begin(&mut self) -> Result, ChainstateError> { let tx = tx_begin_immediate(&mut self.nakamoto_staging_blocks_conn)?; Ok(NakamotoStagingBlocksTx(tx)) } /// Begin a tx to both the headers DB and the staging DB - pub fn headers_and_staging_tx_begin<'a>( - &'a mut self, - ) -> Result<(rusqlite::Transaction<'a>, NakamotoStagingBlocksTx<'a>), ChainstateError> { + pub fn headers_and_staging_tx_begin( + &mut self, + ) -> Result<(rusqlite::Transaction<'_>, NakamotoStagingBlocksTx<'_>), ChainstateError> { let header_tx = self .state_index .storage_tx() @@ -709,9 +707,9 @@ impl StacksChainState { } /// Open a connection to the headers DB, and open a tx to the staging DB - pub fn headers_conn_and_staging_tx_begin<'a>( - &'a mut self, - ) -> Result<(&'a rusqlite::Connection, NakamotoStagingBlocksTx<'a>), ChainstateError> { + pub fn headers_conn_and_staging_tx_begin( + &mut self, + ) -> Result<(&rusqlite::Connection, NakamotoStagingBlocksTx<'_>), ChainstateError> { let header_conn = self.state_index.sqlite_conn(); let staging_tx = tx_begin_immediate(&mut self.nakamoto_staging_blocks_conn)?; Ok((header_conn, NakamotoStagingBlocksTx(staging_tx))) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 6fd559da69..7b5e35a0fd 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -264,7 +264,7 @@ impl TestSigners { let aggregate_public_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); - self.aggregate_public_key = aggregate_public_key.clone(); + self.aggregate_public_key.clone_from(&aggregate_public_key); aggregate_public_key } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 3a02c5e3d5..bd415b68b0 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -94,7 +94,7 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index fbbc1c6cde..cd21c7eeaa 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -291,7 +291,7 @@ impl TestMiner { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn get_any_normal_tenure(&self) -> Result, ChainstateError> { let qry = "SELECT consensus_hash FROM nakamoto_staging_blocks WHERE obtain_method != ?1 ORDER BY RANDOM() LIMIT 1"; let args = params![&NakamotoBlockObtainMethod::Shadow.to_string()]; @@ -444,7 +444,7 @@ impl TestStacksNode { /// Record the nakamoto blocks as a new tenure pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { if let Some(last_tenure) = self.nakamoto_blocks.last_mut() { - if tenure_blocks.len() > 0 { + if !tenure_blocks.is_empty() { // this tenure is overwriting the last tenure if last_tenure.first().unwrap().header.consensus_hash == tenure_blocks.first().unwrap().header.consensus_hash @@ -743,7 +743,7 @@ impl TestStacksNode { let mut next_block_txs = block_builder(miner, chainstate, sortdb, &blocks); txs.append(&mut next_block_txs); - if txs.len() == 0 { + if txs.is_empty() { break; } @@ -1088,7 +1088,7 @@ impl TestStacksNode { } } -impl<'a> TestPeer<'a> { +impl TestPeer<'_> { /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or /// Stacks 2.x block. /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) @@ -2129,7 +2129,7 @@ impl<'a> TestPeer<'a> { // get_nakamoto_tenure_length // compare the DB to the block's ancestors let ancestors = Self::load_nakamoto_tenure(chainstate, &block.block_id()); - assert!(ancestors.len() > 0); + assert!(!ancestors.is_empty()); assert_eq!( ancestors.len(), NakamotoChainState::get_nakamoto_tenure_length(chainstate.db(), &block.block_id()) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 1dff530ba7..d1255d8549 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -313,7 +313,7 @@ impl StacksMessageCodec for StacksBlock { }?; // there must be at least one transaction (the coinbase) - if txs.len() == 0 { + if txs.is_empty() { warn!("Invalid block: Zero-transaction block"); return Err(codec_error::DeserializeError( "Invalid block: zero transactions".to_string(), @@ -429,7 +429,7 @@ impl StacksBlock { /// Find and return the coinbase transaction. It's always the first transaction. /// If there are 0 coinbase txs, or more than 1, then return None pub fn get_coinbase_tx(&self) -> Option { - if self.txs.len() == 0 { + if self.txs.is_empty() { return None; } match self.txs[0].payload { @@ -444,14 +444,14 @@ impl StacksBlock { let mut txids = HashMap::new(); for (i, tx) in txs.iter().enumerate() { let txid = tx.txid(); - if txids.get(&txid).is_some() { + if txids.contains_key(&txid) { warn!( "Duplicate tx {}: at index {} and {}", txid, txids.get(&txid).unwrap(), i ); - test_debug!("{:?}", &tx); + test_debug!("{tx:?}"); return false; } txids.insert(txid, i); @@ -831,7 +831,7 @@ impl StacksMessageCodec for StacksMicroblock { read_next(&mut bound_read) }?; - if txs.len() == 0 { + if txs.is_empty() { warn!("Invalid microblock: zero transactions"); return Err(codec_error::DeserializeError( "Invalid microblock: zero transactions".to_string(), @@ -1296,7 +1296,7 @@ mod test { .unwrap(), ) .unwrap(), - memo: vec![01, 02, 03, 04, 05], + memo: vec![1, 2, 3, 4, 5], txid: Txid::from_bytes_be( &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 4d4e875ba3..58701a2861 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -354,7 +354,7 @@ fn cost_2_contract_is_arithmetic_only() { impl BurnStateDB for TestSimBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - Some(self.height as u32) + Some(self.height) } fn get_tip_sortition_id(&self) -> Option { @@ -587,7 +587,7 @@ impl HeadersDB for TestSimHeadersDB { let burn_block_height = self.get_burn_block_height_for_block(id_bhh)? as u64; Some( BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64 + burn_block_height - - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u64, + - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, ) } } @@ -607,7 +607,7 @@ impl HeadersDB for TestSimHeadersDB { None } else { Some( - (BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32 + input_height as u32) + (BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + input_height) .try_into() .unwrap(), ) @@ -3047,7 +3047,7 @@ fn test_vote_too_many_confirms() { .0, Value::Response(ResponseData { committed: true, - data: Value::UInt(i as u128).into() + data: Value::UInt(i).into() }) ); } @@ -3061,10 +3061,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "vote-proposal", - &symbols_from_values(vec![ - Value::UInt(i as u128), - Value::UInt(USTX_PER_HOLDER) - ]), + &symbols_from_values(vec![Value::UInt(i), Value::UInt(USTX_PER_HOLDER)]), ) .unwrap() .0, @@ -3079,7 +3076,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "confirm-votes", - &symbols_from_values(vec![Value::UInt(i as u128)]) + &symbols_from_values(vec![Value::UInt(i)]) ) .unwrap() .0, @@ -3093,10 +3090,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "withdraw-votes", - &symbols_from_values(vec![ - Value::UInt(i as u128), - Value::UInt(USTX_PER_HOLDER), - ]), + &symbols_from_values(vec![Value::UInt(i), Value::UInt(USTX_PER_HOLDER)]), ) .unwrap() .0; @@ -3122,7 +3116,7 @@ fn test_vote_too_many_confirms() { None, COST_VOTING_CONTRACT_TESTNET.clone(), "confirm-miners", - &symbols_from_values(vec![Value::UInt(i as u128)]) + &symbols_from_values(vec![Value::UInt(i)]) ) .unwrap() .0, diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 64782c67d6..47b57cdd2c 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -294,7 +294,7 @@ pub fn check_pox_print_event( } // assert_eq!(inner_tuple.data_map.get(inner_key), Some(&inner_val)); } - if missing.len() > 0 || wrong.len() > 0 { + if !missing.is_empty() || !wrong.is_empty() { eprintln!("missing:\n{:#?}", &missing); eprintln!("wrong:\n{:#?}", &wrong); assert!(false); @@ -382,7 +382,7 @@ pub fn check_stacking_state_invariants( let mut cycle_indexes = HashMap::new(); - if reward_indexes.len() > 0 || expect_indexes { + if !reward_indexes.is_empty() || expect_indexes { assert_eq!( reward_indexes.len() as u128, lock_period, @@ -3631,7 +3631,7 @@ fn test_pox_2_getters() { .expect_optional() .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); - assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(bob_delegation_amt, LOCKUP_AMT); assert!(bob_pox_addr_opt.is_none()); let allowance = data @@ -3679,7 +3679,7 @@ fn test_pox_2_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(partial_stacked, LOCKUP_AMT as u128); + assert_eq!(partial_stacked, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-now") @@ -3695,7 +3695,7 @@ fn test_pox_2_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(rejected, LOCKUP_AMT as u128); + assert_eq!(rejected, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-future") @@ -3848,10 +3848,9 @@ fn test_get_pox_addrs() { if tenure_id <= 1 { // record the first reward cycle when tokens get stacked - lockup_reward_cycle = 1 - + (burnchain - .block_height_to_reward_cycle(tip_burn_block_height) - .unwrap()) as u64; + lockup_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); eprintln!( "\nlockup reward cycle: {}\ncur reward cycle: {}\n", lockup_reward_cycle, cur_reward_cycle @@ -4145,10 +4144,9 @@ fn test_stack_with_segwit() { if tenure_id <= 1 { // record the first reward cycle when tokens get stacked - lockup_reward_cycle = 1 - + (burnchain - .block_height_to_reward_cycle(tip_burn_block_height) - .unwrap()) as u64; + lockup_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); eprintln!( "\nlockup reward cycle: {}\ncur reward cycle: {}\n", lockup_reward_cycle, cur_reward_cycle @@ -4460,7 +4458,7 @@ fn test_pox_2_delegate_stx_addr_validation() { alice_delegation_addr, charlie_address.to_account_principal() ); - assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(alice_delegation_amt, LOCKUP_AMT); assert!(alice_pox_addr_opt.is_some()); let alice_pox_addr = alice_pox_addr_opt.unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 8a173c6adc..5c52297969 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -3278,7 +3278,7 @@ fn pox_3_getters() { .expect_optional() .unwrap(); assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); - assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(bob_delegation_amt, LOCKUP_AMT); assert!(bob_pox_addr_opt.is_none()); let allowance = data @@ -3326,7 +3326,7 @@ fn pox_3_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(partial_stacked, LOCKUP_AMT as u128); + assert_eq!(partial_stacked, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-now") @@ -3334,7 +3334,7 @@ fn pox_3_getters() { .unwrap() .expect_u128() .unwrap(); - assert_eq!(rejected, LOCKUP_AMT as u128); + assert_eq!(rejected, LOCKUP_AMT); let rejected = data .get("get-total-pox-rejection-next") @@ -4430,7 +4430,7 @@ fn pox_3_delegate_stx_addr_validation() { alice_delegation_addr, charlie_address.to_account_principal() ); - assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert_eq!(alice_delegation_amt, LOCKUP_AMT); assert!(alice_pox_addr_opt.is_some()); let alice_pox_addr = alice_pox_addr_opt.unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 14dc9e75ab..072f1d33ef 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -6378,7 +6378,7 @@ fn stack_increase(use_nakamoto: bool) { &pox_addr, lock_period, &signing_pk, - block_height as u64, + block_height, Some(signature), u128::MAX, 1, @@ -8930,7 +8930,7 @@ pub fn tenure_with_txs( test_signers, |_miner, _chainstate, _sort_dbconn, _blocks| { info!("Building nakamoto block. Blocks len {}", _blocks.len()); - if _blocks.len() == 0 { + if _blocks.is_empty() { txs.to_vec() } else { vec![] diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 42682ea7c8..0ad5687f12 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -123,7 +123,7 @@ impl FromRow for MinerPaymentSchedule { } impl FromRow for MinerReward { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let address = StacksAddress::from_column(row, "address")?; let recipient_str: Option = row.get_unwrap("recipient"); let recipient = recipient_str @@ -503,8 +503,8 @@ impl StacksChainState { } /// Store a matured miner reward for subsequent query in Clarity, without doing any validation - fn inner_insert_matured_miner_reward<'a>( - tx: &mut DBTx<'a>, + fn inner_insert_matured_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, reward: &MinerReward, @@ -516,7 +516,7 @@ impl StacksChainState { &(*parent_block_id).into(), &(*child_block_id).into(), )?; - if cur_rewards.len() > 0 { + if !cur_rewards.is_empty() { let mut present = false; for rw in cur_rewards.iter() { if (rw.is_parent() && reward.is_parent()) || (rw.is_child() && reward.is_child()) { @@ -564,8 +564,8 @@ impl StacksChainState { /// Store a parent block's matured reward. This is the share of the streamed tx fees produced /// by the miner who mined this block, and nothing else. - pub fn insert_matured_parent_miner_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_parent_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, parent_reward: &MinerReward, @@ -594,8 +594,8 @@ impl StacksChainState { /// Store a child block's matured miner reward. This is the block's coinbase, anchored tx fees, and /// share of the confirmed streamed tx fees - pub fn insert_matured_child_miner_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_child_miner_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, child_reward: &MinerReward, @@ -625,8 +625,8 @@ impl StacksChainState { /// Store a child block's matured user burn-support reward. This is the share of the /// block's coinbase, anchored tx fees, and share of the confirmed streamed tx fees that go to /// the user burn-support sender - pub fn insert_matured_child_user_reward<'a>( - tx: &mut DBTx<'a>, + pub fn insert_matured_child_user_reward( + tx: &mut DBTx<'_>, parent_block_id: &StacksBlockId, child_block_id: &StacksBlockId, child_reward: &MinerReward, @@ -724,8 +724,8 @@ impl StacksChainState { } /// Get the scheduled miner rewards in a particular Stacks fork at a particular height. - pub fn get_scheduled_block_rewards_in_fork_at_height<'a>( - tx: &mut StacksDBTx<'a>, + pub fn get_scheduled_block_rewards_in_fork_at_height( + tx: &mut StacksDBTx<'_>, tip: &StacksHeaderInfo, block_height: u64, ) -> Result, Error> { @@ -868,9 +868,9 @@ impl StacksChainState { // of all participants' burns. let coinbase_reward = participant .coinbase - .checked_mul(this_burn_total as u128) + .checked_mul(this_burn_total) .expect("FATAL: STX coinbase reward overflow") - / (burn_total as u128); + / burn_total; // process poison -- someone can steal a fraction of the total coinbase if they can present // evidence that the miner forked the microblock stream. The remainder of the coinbase is @@ -1003,7 +1003,7 @@ impl StacksChainState { let reward_height = tip_stacks_height - MINER_REWARD_MATURITY; - assert!(latest_matured_miners.len() > 0); + assert!(!latest_matured_miners.is_empty()); assert!(latest_matured_miners[0].vtxindex == 0); assert!(latest_matured_miners[0].miner); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 7c56c839e9..c8800131e6 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -344,7 +344,7 @@ impl StagingBlock { } impl FromRow for StagingMicroblock { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let anchored_block_hash: BlockHeaderHash = BlockHeaderHash::from_column(row, "anchored_block_hash")?; let consensus_hash: ConsensusHash = ConsensusHash::from_column(row, "consensus_hash")?; @@ -373,7 +373,7 @@ impl FromRow for StagingMicroblock { } impl FromRow for StagingBlock { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let anchored_block_hash: BlockHeaderHash = BlockHeaderHash::from_column(row, "anchored_block_hash")?; let parent_anchored_block_hash: BlockHeaderHash = @@ -939,7 +939,7 @@ impl StacksChainState { 0 => Ok(None), 1 => { let blob = blobs.pop().unwrap(); - if blob.len() == 0 { + if blob.is_empty() { // cleared Ok(None) } else { @@ -1044,7 +1044,7 @@ impl StacksChainState { block_hash, )? { Some(staging_block) => { - if staging_block.block_data.len() == 0 { + if staging_block.block_data.is_empty() { return Ok(None); } @@ -1258,7 +1258,7 @@ impl StacksChainState { } ret.reverse(); - if ret.len() > 0 { + if !ret.is_empty() { // should start with 0 if ret[0].header.sequence != 0 { warn!("Invalid microblock stream from {}/{} to {}: sequence does not start with 0, but with {}", @@ -1341,7 +1341,7 @@ impl StacksChainState { let staging_microblocks = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; - if staging_microblocks.len() == 0 { + if staging_microblocks.is_empty() { // haven't seen any microblocks that descend from this block yet test_debug!( "No microblocks built on {} up to {}", @@ -1436,7 +1436,7 @@ impl StacksChainState { ret.push(mblock); } - if fork_poison.is_none() && ret.len() == 0 { + if fork_poison.is_none() && ret.is_empty() { // just as if there were no blocks loaded Ok(None) } else { @@ -1543,8 +1543,8 @@ impl StacksChainState { /// Store a preprocessed block, queuing it up for subsequent processing. /// The caller should at least verify that the block is attached to some fork in the burn /// chain. - fn store_staging_block<'a>( - tx: &mut DBTx<'a>, + fn store_staging_block( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, block: &StacksBlock, @@ -1586,8 +1586,8 @@ impl StacksChainState { ) .map_err(Error::DBError)?; let parent_not_in_staging_blocks = - has_parent_rows.len() == 0 && block.header.parent_block != FIRST_STACKS_BLOCK_HASH; - if has_unprocessed_parent_rows.len() > 0 || parent_not_in_staging_blocks { + has_parent_rows.is_empty() && block.header.parent_block != FIRST_STACKS_BLOCK_HASH; + if !has_unprocessed_parent_rows.is_empty() || parent_not_in_staging_blocks { // still have unprocessed parent OR its parent is not in staging_blocks at all -- this block is not attachable debug!( "Store non-attachable anchored block {}/{}", @@ -1665,8 +1665,8 @@ impl StacksChainState { /// order, this method does not check that. /// The consensus_hash and anchored_block_hash correspond to the _parent_ Stacks block. /// Microblocks ought to only be stored if they are first confirmed to have been signed. - pub fn store_staging_microblock<'a>( - tx: &mut DBTx<'a>, + pub fn store_staging_microblock( + tx: &mut DBTx<'_>, parent_consensus_hash: &ConsensusHash, parent_anchored_block_hash: &BlockHeaderHash, microblock: &StacksMicroblock, @@ -1753,7 +1753,7 @@ impl StacksChainState { ) -> Result, Error> { StacksChainState::read_i64s(blocks_conn, "SELECT processed FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(None) } else if processed.len() == 1 { @@ -1786,7 +1786,7 @@ impl StacksChainState { ) -> Result { StacksChainState::read_i64s(blocks_conn, "SELECT orphaned FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2", &[block_hash, consensus_hash]) .and_then(|orphaned| { - if orphaned.len() == 0 { + if orphaned.is_empty() { Ok(false) } else if orphaned.len() == 1 { @@ -1810,7 +1810,7 @@ impl StacksChainState { ) -> Result, Error> { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE anchored_block_hash = ?1 AND microblock_hash = ?2 AND consensus_hash = ?3", &[&parent_block_hash, microblock_hash, &parent_consensus_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(None) } else if processed.len() == 1 { @@ -1884,7 +1884,7 @@ impl StacksChainState { FROM staging_blocks JOIN staging_microblocks ON staging_blocks.parent_anchored_block_hash = staging_microblocks.anchored_block_hash AND staging_blocks.parent_consensus_hash = staging_microblocks.consensus_hash WHERE staging_blocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2 AND staging_microblocks.orphaned = 0", &[child_index_block_hash, &parent_microblock_hash]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(false) } else if processed.len() == 1 { @@ -2165,13 +2165,12 @@ impl StacksChainState { /// Used to see if we have the block data for an unaffirmed PoX anchor block /// (hence the test_debug! macros referring to PoX anchor blocks) fn has_stacks_block_for(chainstate_conn: &DBConn, block_commit: LeaderBlockCommitOp) -> bool { - StacksChainState::get_known_consensus_hashes_for_block( + !StacksChainState::get_known_consensus_hashes_for_block( chainstate_conn, &block_commit.block_header_hash, ) .expect("FATAL: failed to query staging blocks DB") - .len() - > 0 + .is_empty() } /// Find the canonical affirmation map. Handle unaffirmed anchor blocks by simply seeing if we @@ -2295,8 +2294,8 @@ impl StacksChainState { /// Mark an anchored block as orphaned and both orphan and delete its descendant microblock data. /// The blocks database will eventually delete all orphaned data. - fn delete_orphaned_epoch_data<'a>( - tx: &mut DBTx<'a>, + fn delete_orphaned_epoch_data( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2357,8 +2356,8 @@ impl StacksChainState { /// fork but processable on another (i.e. the same block can show up in two different PoX /// forks, but will only be valid in at most one of them). /// This does not restore any block data; it merely makes it possible to go re-process them. - pub fn forget_orphaned_epoch_data<'a>( - tx: &mut DBTx<'a>, + pub fn forget_orphaned_epoch_data( + tx: &mut DBTx<'_>, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, ) -> Result<(), Error> { @@ -2384,9 +2383,9 @@ impl StacksChainState { /// Mark its children as attachable. /// Idempotent. /// sort_tx_opt is required if accept is true - fn set_block_processed<'a, 'b>( - tx: &mut DBTx<'a>, - mut sort_tx_opt: Option<&mut SortitionHandleTx<'b>>, + fn set_block_processed( + tx: &mut DBTx<'_>, + mut sort_tx_opt: Option<&mut SortitionHandleTx<'_>>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2506,8 +2505,8 @@ impl StacksChainState { } #[cfg(test)] - fn set_block_orphaned<'a>( - tx: &mut DBTx<'a>, + fn set_block_orphaned( + tx: &mut DBTx<'_>, blocks_path: &str, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, @@ -2571,8 +2570,8 @@ impl StacksChainState { /// Drop a trail of staging microblocks. Mark them as orphaned and delete their data. /// Also, orphan any anchored children blocks that build off of the now-orphaned microblocks. - fn drop_staging_microblocks<'a>( - tx: &mut DBTx<'a>, + fn drop_staging_microblocks( + tx: &mut DBTx<'_>, consensus_hash: &ConsensusHash, anchored_block_hash: &BlockHeaderHash, invalid_block_hash: &BlockHeaderHash, @@ -2638,8 +2637,8 @@ impl StacksChainState { /// Mark a range of a stream of microblocks as confirmed. /// All the corresponding blocks must have been validated and proven contiguous. - fn set_microblocks_processed<'a>( - tx: &mut DBTx<'a>, + fn set_microblocks_processed( + tx: &mut DBTx<'_>, child_consensus_hash: &ConsensusHash, child_anchored_block_hash: &BlockHeaderHash, last_microblock_hash: &BlockHeaderHash, @@ -2717,7 +2716,7 @@ impl StacksChainState { StacksBlockHeader::make_index_block_hash(&parent_consensus_hash, &parent_block_hash); StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence = ?2", &[&parent_index_block_hash, &seq]) .and_then(|processed| { - if processed.len() == 0 { + if processed.is_empty() { Ok(false) } else if processed.len() == 1 { @@ -2788,7 +2787,7 @@ impl StacksChainState { min_seq: u16, ) -> Result { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 LIMIT 1", &[&parent_index_block_hash, &min_seq]) - .and_then(|processed| Ok(processed.len() > 0)) + .and_then(|processed| Ok(!processed.is_empty())) } /// Do we have a given microblock as a descendant of a given anchored block? @@ -2801,7 +2800,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result { StacksChainState::read_i64s(&self.db(), "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 LIMIT 1", &[parent_index_block_hash, microblock_hash]) - .and_then(|processed| Ok(processed.len() > 0)) + .and_then(|processed| Ok(!processed.is_empty())) } /// Do we have any microblock available to serve in any capacity, given its parent anchored block's @@ -2816,7 +2815,7 @@ impl StacksChainState { "SELECT processed FROM staging_microblocks WHERE index_block_hash = ?1 LIMIT 1", &[&parent_index_block_hash], ) - .and_then(|processed| Ok(processed.len() > 0)) + .and_then(|processed| Ok(!processed.is_empty())) } /// Given an index block hash, get the consensus hash and block hash @@ -3017,7 +3016,7 @@ impl StacksChainState { microblocks.to_owned() }; - if signed_microblocks.len() == 0 { + if signed_microblocks.is_empty() { if anchored_block_header.parent_microblock == EMPTY_MICROBLOCK_PARENT_HASH && anchored_block_header.parent_microblock_sequence == 0 { @@ -3742,8 +3741,8 @@ impl StacksChainState { /// Call this method repeatedly to remove long chains of orphaned blocks and microblocks from /// staging. /// Returns true if an orphan block was processed - fn process_next_orphaned_staging_block<'a>( - blocks_tx: &mut DBTx<'a>, + fn process_next_orphaned_staging_block( + blocks_tx: &mut DBTx<'_>, blocks_path: &str, ) -> Result { test_debug!("Find next orphaned block"); @@ -3753,7 +3752,7 @@ impl StacksChainState { let sql = "SELECT * FROM staging_blocks WHERE processed = 0 AND orphaned = 1 ORDER BY RANDOM() LIMIT 1"; let mut rows = query_rows::(blocks_tx, sql, NO_PARAMS).map_err(Error::DBError)?; - if rows.len() == 0 { + if rows.is_empty() { test_debug!("No orphans to remove"); return Ok(false); } @@ -3839,8 +3838,8 @@ impl StacksChainState { /// can process, as well as its parent microblocks that it confirms /// Returns Some(microblocks, staging block) if we found a sequence of blocks to process. /// Returns None if not. - fn find_next_staging_block<'a>( - blocks_tx: &mut StacksDBTx<'a>, + fn find_next_staging_block( + blocks_tx: &mut StacksDBTx<'_>, blocks_path: &str, sort_tx: &mut SortitionHandleTx, ) -> Result, StagingBlock)>, Error> { @@ -3958,7 +3957,7 @@ impl StacksChainState { &candidate.anchored_block_hash, )? { Some(bytes) => { - if bytes.len() == 0 { + if bytes.is_empty() { error!( "CORRUPTION: No block data for {}/{}", &candidate.consensus_hash, &candidate.anchored_block_hash @@ -4646,8 +4645,8 @@ impl StacksChainState { /// Process matured miner rewards for this block. /// Returns the number of liquid uSTX created -- i.e. the coinbase - pub fn process_matured_miner_rewards<'a, 'b>( - clarity_tx: &mut ClarityTx<'a, 'b>, + pub fn process_matured_miner_rewards( + clarity_tx: &mut ClarityTx<'_, '_>, miner_share: &MinerReward, users_share: &[MinerReward], parent_share: &MinerReward, @@ -4667,8 +4666,8 @@ impl StacksChainState { /// Process all STX that unlock at this block height. /// Return the total number of uSTX unlocked in this block - pub fn process_stx_unlocks<'a, 'b>( - clarity_tx: &mut ClarityTx<'a, 'b>, + pub fn process_stx_unlocks( + clarity_tx: &mut ClarityTx<'_, '_>, ) -> Result<(u128, Vec), Error> { let mainnet = clarity_tx.config.mainnet; let lockup_contract_id = boot_code_id("lockup", mainnet); @@ -5486,7 +5485,9 @@ impl StacksChainState { ) }; - let (last_microblock_hash, last_microblock_seq) = if microblocks.len() > 0 { + let (last_microblock_hash, last_microblock_seq) = if microblocks.is_empty() { + (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) + } else { let _first_mblock_hash = microblocks[0].block_hash(); let num_mblocks = microblocks.len(); let last_microblock_hash = microblocks[num_mblocks - 1].block_hash(); @@ -5502,8 +5503,6 @@ impl StacksChainState { parent_block_hash ); (last_microblock_hash, last_microblock_seq) - } else { - (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0) }; if last_microblock_hash != block.header.parent_microblock @@ -5701,7 +5700,7 @@ impl StacksChainState { }; // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + if !lockup_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { @@ -5713,7 +5712,7 @@ impl StacksChainState { } } // if any, append auto unlock events to the coinbase receipt - if auto_unlock_events.len() > 0 { + if !auto_unlock_events.is_empty() { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction if let Some(receipt) = tx_receipts.get_mut(0) { @@ -6065,11 +6064,11 @@ impl StacksChainState { /// Return a poison microblock transaction payload if the microblock stream contains a /// deliberate miner fork (this is NOT consensus-critical information, but is instead meant for /// consumption by future miners). - pub fn process_next_staging_block<'a, T: BlockEventDispatcher>( + pub fn process_next_staging_block( &mut self, burnchain_dbconn: &DBConn, sort_tx: &mut SortitionHandleTx, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result<(Option, Option), Error> { let blocks_path = self.blocks_path.clone(); let (mut chainstate_tx, clarity_instance) = self.chainstate_tx_begin()?; @@ -6467,12 +6466,12 @@ impl StacksChainState { /// found. For each chain tip produced, return the header info, receipts, parent microblock /// stream execution cost, and block execution cost. A value of None will be returned for the /// epoch receipt if the block was invalid. - pub fn process_blocks<'a, T: BlockEventDispatcher>( + pub fn process_blocks( &mut self, burnchain_db_conn: &DBConn, mut sort_tx: SortitionHandleTx, max_blocks: usize, - dispatcher_opt: Option<&'a T>, + dispatcher_opt: Option<&T>, ) -> Result, Option)>, Error> { // first, clear out orphans let blocks_path = self.blocks_path.clone(); @@ -9630,17 +9629,14 @@ pub mod test { ) .unwrap() .is_none()); - assert!( - StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &consensus_hashes[i + 1], - &blocks[i + 1].block_hash() - ) - .unwrap() - .unwrap() - .len() - > 0 - ); + assert!(!StacksChainState::load_block_bytes( + &chainstate.blocks_path, + &consensus_hashes[i + 1], + &blocks[i + 1].block_hash() + ) + .unwrap() + .unwrap() + .is_empty()); for mblock in microblocks[i + 1].iter() { let staging_mblock = StacksChainState::load_staging_microblock( @@ -9653,7 +9649,7 @@ pub mod test { .unwrap(); assert!(!staging_mblock.processed); assert!(!staging_mblock.orphaned); - assert!(staging_mblock.block_data.len() > 0); + assert!(!staging_mblock.block_data.is_empty()); } } @@ -9976,7 +9972,7 @@ pub mod test { .unwrap(); mblocks.push(next_mblock); } - if mblock_ptr.len() == 0 { + if mblock_ptr.is_empty() { break; } } @@ -10266,10 +10262,7 @@ pub mod test { SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) .unwrap(); - assert_eq!( - tip.block_height, - first_stacks_block_height + (tenure_id as u64) - ); + assert_eq!(tip.block_height, first_stacks_block_height + tenure_id); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 98f41bf9c7..92584e362a 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -35,7 +35,7 @@ use crate::util_lib::db::{ }; impl FromRow for StacksBlockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u8 = row.get_unwrap("version"); let total_burn_str: String = row.get_unwrap("total_burn"); let total_work_str: String = row.get_unwrap("total_work"); @@ -80,7 +80,7 @@ impl FromRow for StacksBlockHeader { } impl FromRow for StacksMicroblockHeader { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: u8 = row.get_unwrap("version"); let sequence: u16 = row.get_unwrap("sequence"); let prev_block = BlockHeaderHash::from_column(row, "prev_block")?; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index cfcaa62b52..31159137ac 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -413,7 +413,7 @@ impl StacksHeaderInfo { } impl FromRow for DBConfig { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let version: String = row.get_unwrap("version"); let mainnet_i64: i64 = row.get_unwrap("mainnet"); let chain_id_i64: i64 = row.get_unwrap("chain_id"); @@ -430,7 +430,7 @@ impl FromRow for DBConfig { } impl FromRow for StacksHeaderInfo { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let block_height: u64 = u64::from_column(row, "block_height")?; let index_root = TrieHash::from_column(row, "index_root")?; let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; @@ -485,7 +485,7 @@ pub struct ClarityTx<'a, 'b> { pub config: DBConfig, } -impl<'a, 'b> ClarityConnection for ClarityTx<'a, 'b> { +impl ClarityConnection for ClarityTx<'_, '_> { fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where F: FnOnce(ClarityDatabase) -> (R, ClarityDatabase), @@ -1069,11 +1069,7 @@ impl StacksChainState { Ok(db_config.version != CHAINSTATE_VERSION) } - fn apply_schema_migrations<'a>( - tx: &DBTx<'a>, - mainnet: bool, - chain_id: u32, - ) -> Result<(), Error> { + fn apply_schema_migrations(tx: &DBTx<'_>, mainnet: bool, chain_id: u32) -> Result<(), Error> { if !Self::need_schema_migrations(tx, mainnet, chain_id)? { return Ok(()); } @@ -1145,7 +1141,7 @@ impl StacksChainState { Ok(()) } - fn add_indexes<'a>(tx: &DBTx<'a>) -> Result<(), Error> { + fn add_indexes(tx: &DBTx<'_>) -> Result<(), Error> { for cmd in CHAINSTATE_INDEXES { tx.execute_batch(cmd)?; } @@ -1331,7 +1327,7 @@ impl StacksChainState { } let mut allocation_events: Vec = vec![]; - if boot_data.initial_balances.len() > 0 { + if !boot_data.initial_balances.is_empty() { warn!( "Seeding {} balances coming from the config", boot_data.initial_balances.len() @@ -1550,7 +1546,7 @@ impl StacksChainState { StacksChainState::parse_genesis_address(&entry.owner, mainnet); let zonefile_hash = { - if entry.zonefile_hash.len() == 0 { + if entry.zonefile_hash.is_empty() { Value::buff_from(vec![]).unwrap() } else { let buffer = Hash160::from_hex(&entry.zonefile_hash) @@ -1907,25 +1903,25 @@ impl StacksChainState { /// Begin a transaction against the (indexed) stacks chainstate DB. /// Does not create a Clarity instance. - pub fn index_tx_begin<'a>(&'a mut self) -> StacksDBTx<'a> { + pub fn index_tx_begin(&mut self) -> StacksDBTx<'_> { StacksDBTx::new(&mut self.state_index, ()) } - pub fn index_conn<'a>(&'a self) -> StacksDBConn<'a> { + pub fn index_conn(&self) -> StacksDBConn<'_> { StacksDBConn::new(&self.state_index, ()) } /// Begin a transaction against the underlying DB /// Does not create a Clarity instance, and does not affect the MARF. - pub fn db_tx_begin<'a>(&'a mut self) -> Result, Error> { + pub fn db_tx_begin(&mut self) -> Result, Error> { self.state_index.storage_tx().map_err(Error::DBError) } /// Simultaneously begin a transaction against both the headers and blocks. /// Used when considering a new block to append the chain state. - pub fn chainstate_tx_begin<'a>( - &'a mut self, - ) -> Result<(ChainstateTx<'a>, &'a mut ClarityInstance), Error> { + pub fn chainstate_tx_begin( + &mut self, + ) -> Result<(ChainstateTx<'_>, &mut ClarityInstance), Error> { let config = self.config(); let blocks_path = self.blocks_path.clone(); let clarity_instance = &mut self.clarity_state; @@ -2592,8 +2588,8 @@ impl StacksChainState { /// Append a Stacks block to an existing Stacks block, and grant the miner the block reward. /// Return the new Stacks header info. - pub fn advance_tip<'a>( - headers_tx: &mut StacksDBTx<'a>, + pub fn advance_tip( + headers_tx: &mut StacksDBTx<'_>, parent_tip: &StacksBlockHeader, parent_consensus_hash: &ConsensusHash, new_tip: &StacksBlockHeader, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index da8e7da505..e56624b84f 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -979,7 +979,7 @@ impl StacksChainState { TransactionPayload::TokenTransfer(ref addr, ref amount, ref memo) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); info!("{}", &msg; "txid" => %tx.txid()); @@ -1391,7 +1391,7 @@ impl StacksChainState { TransactionPayload::PoisonMicroblock(ref mblock_header_1, ref mblock_header_2) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: PoisonMicroblock transactions do not support post-conditions"); info!("{}", &msg); @@ -1423,7 +1423,7 @@ impl StacksChainState { TransactionPayload::TenureChange(ref payload) => { // post-conditions are not allowed for this variant, since they're non-sensical. // Their presence in this variant makes the transaction invalid. - if tx.post_conditions.len() > 0 { + if !tx.post_conditions.is_empty() { let msg = format!("Invalid Stacks transaction: TenureChange transactions do not support post-conditions"); info!("{msg}"); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index a871b98944..b39de26c18 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -259,7 +259,7 @@ impl UnconfirmedState { let mut num_new_mblocks = 0; let mut have_state = self.have_state; - if mblocks.len() > 0 { + if !mblocks.is_empty() { let cur_cost = self.cost_so_far.clone(); // NOTE: we *must* commit the clarity_tx now that it's begun. diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 28d1ee483d..b917dffe41 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -239,7 +239,7 @@ pub trait MarfConnection { } } -impl<'a, T: MarfTrieId> MarfConnection for MarfTransaction<'a, T> { +impl MarfConnection for MarfTransaction<'_, T> { fn with_conn(&mut self, exec: F) -> R where F: FnOnce(&mut TrieStorageConnection) -> R, @@ -529,7 +529,7 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { Some(WriteChainTip { ref block_hash, .. }) => Ok(block_hash.clone()), }?; - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -1348,7 +1348,7 @@ impl MARF { ) -> Result<(), Error> { assert_eq!(keys.len(), values.len()); - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -1394,7 +1394,7 @@ impl MARF { // instance methods impl MARF { - pub fn begin_tx<'a>(&'a mut self) -> Result, Error> { + pub fn begin_tx(&mut self) -> Result, Error> { let storage = self.storage.transaction()?; Ok(MarfTransaction { storage, @@ -1456,7 +1456,7 @@ impl MARF { Some(WriteChainTip { ref block_hash, .. }) => Ok(block_hash.clone()), }?; - if keys.len() == 0 { + if keys.is_empty() { return Ok(()); } @@ -1620,7 +1620,7 @@ impl MARF { } /// Make a raw transaction to the underlying storage - pub fn storage_tx<'a>(&'a mut self) -> Result, db_error> { + pub fn storage_tx(&mut self) -> Result, db_error> { self.storage.sqlite_tx() } diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index faf7885ea4..b689035675 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -372,7 +372,7 @@ impl TrieCursor { /// last ptr visited pub fn ptr(&self) -> TriePtr { // should always be true by construction - assert!(self.node_ptrs.len() > 0); + assert!(!self.node_ptrs.is_empty()); self.node_ptrs[self.node_ptrs.len() - 1].clone() } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 85e91ebefb..4d399c9f70 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -610,7 +610,7 @@ impl TrieMerkleProof { // need the target node's root trie ptr, unless this is the first proof (in which case // it's a junction proof) - if proof.len() > 0 { + if !proof.is_empty() { let root_ptr = storage.root_trieptr(); let (root_node, _) = storage.read_nodetype(&root_ptr)?; @@ -706,7 +706,7 @@ impl TrieMerkleProof { return None; } - if hashes.len() == 0 { + if hashes.is_empty() { // special case -- if this shunt proof has no hashes (i.e. this is a leaf from the first // block), then we can safely skip this step trace!( @@ -839,7 +839,7 @@ impl TrieMerkleProof { ) -> Result>, Error> { trace!("make_segment_proof: ptrs = {:?}", &ptrs); - assert!(ptrs.len() > 0); + assert!(!ptrs.is_empty()); assert_eq!(ptrs[0], storage.root_trieptr()); for i in 1..ptrs.len() { assert!(!is_backptr(ptrs[i].id())); @@ -1004,7 +1004,7 @@ impl TrieMerkleProof { /// * segment proof 0 must end in a leaf /// * all segment proofs must end in a Node256 (a root) fn is_proof_well_formed(proof: &Vec>, expected_path: &TrieHash) -> bool { - if proof.len() == 0 { + if proof.is_empty() { trace!("Proof is empty"); return false; } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index d4a9bbe86c..d8d1b9133a 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -103,7 +103,7 @@ impl BlockMap for TrieFileStorage { } } -impl<'a, T: MarfTrieId> BlockMap for TrieStorageConnection<'a, T> { +impl BlockMap for TrieStorageConnection<'_, T> { type TrieId = T; fn get_block_hash(&self, id: u32) -> Result { @@ -142,7 +142,7 @@ impl<'a, T: MarfTrieId> BlockMap for TrieStorageConnection<'a, T> { } } -impl<'a, T: MarfTrieId> BlockMap for TrieStorageTransaction<'a, T> { +impl BlockMap for TrieStorageTransaction<'_, T> { type TrieId = T; fn get_block_hash(&self, id: u32) -> Result { @@ -925,7 +925,7 @@ impl TrieRAM { data.push((root_node, root_hash)); - while frontier.len() > 0 { + while !frontier.is_empty() { let next_ptr = frontier .pop_front() .expect("BUG: no ptr in non-empty frontier"); @@ -1162,7 +1162,7 @@ enum SqliteConnection<'a> { Tx(Transaction<'a>), } -impl<'a> Deref for SqliteConnection<'a> { +impl Deref for SqliteConnection<'_> { type Target = Connection; fn deref(&self) -> &Connection { match self { @@ -1322,7 +1322,7 @@ impl TrieStorageTransientData { } impl TrieFileStorage { - pub fn connection<'a>(&'a mut self) -> TrieStorageConnection<'a, T> { + pub fn connection(&mut self) -> TrieStorageConnection<'_, T> { TrieStorageConnection { db: SqliteConnection::ConnRef(&self.db), db_path: &self.db_path, @@ -1338,7 +1338,7 @@ impl TrieFileStorage { } } - pub fn transaction<'a>(&'a mut self) -> Result, Error> { + pub fn transaction(&mut self) -> Result, Error> { if self.readonly() { return Err(Error::ReadOnlyError); } @@ -1363,7 +1363,7 @@ impl TrieFileStorage { &self.db } - pub fn sqlite_tx<'a>(&'a mut self) -> Result, db_error> { + pub fn sqlite_tx(&mut self) -> Result, db_error> { tx_begin_immediate(&mut self.db) } @@ -1956,7 +1956,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { } } -impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { +impl TrieStorageConnection<'_, T> { pub fn readonly(&self) -> bool { self.data.readonly } diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 50efc260ab..7f92bb678d 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -107,16 +107,16 @@ fn marf_insert_different_leaf_different_path_different_block_100() { for i in 0..100 { debug!("insert {}", i); - let block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, i as u8, + 23, 24, 25, 26, 27, 28, 29, 30, i, ]; marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &block_header) .unwrap(); let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -133,26 +133,26 @@ fn marf_insert_different_leaf_different_path_different_block_100() { debug!("---------"); for i in 0..100 { - let block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, i as u8, + 23, 24, 25, 26, 27, 28, 29, 30, i, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &path) .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!(marf.borrow_storage_backend().get_cur_block(), block_header); merkle_test_marf( &mut marf.borrow_storage_backend(), &block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -190,13 +190,13 @@ fn marf_insert_same_leaf_different_block_100() { let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &next_block_header) .unwrap(); let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -213,8 +213,8 @@ fn marf_insert_same_leaf_different_block_100() { debug!("---------"); for i in 0..100 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, @@ -223,7 +223,7 @@ fn marf_insert_same_leaf_different_block_100() { .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!( marf.borrow_storage_backend().get_cur_block(), next_block_header @@ -233,7 +233,7 @@ fn marf_insert_same_leaf_different_block_100() { &mut marf.borrow_storage_backend(), &next_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -266,16 +266,16 @@ fn marf_insert_leaf_sequence_2() { for i in 0..2 { let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let prior_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let prior_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); marf.commit().unwrap(); marf.begin(&prior_block_header, &next_block_header).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -287,14 +287,14 @@ fn marf_insert_leaf_sequence_2() { debug!("---------"); for i in 0..2 { - let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i + 1; 32]).unwrap(); let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); let leaf = MARF::get_path( &mut marf.borrow_storage_backend(), &last_block_header, @@ -303,7 +303,7 @@ fn marf_insert_leaf_sequence_2() { .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); assert_eq!( marf.borrow_storage_backend().get_cur_block(), next_block_header @@ -313,7 +313,7 @@ fn marf_insert_leaf_sequence_2() { &mut marf.borrow_storage_backend(), &last_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -343,17 +343,17 @@ fn marf_insert_leaf_sequence_100() { for i in 1..101 { let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); marf.commit().unwrap(); - let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); marf.begin(&last_block_header, &next_block_header).unwrap(); last_block_header = next_block_header; - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } marf.commit().unwrap(); @@ -365,26 +365,26 @@ fn marf_insert_leaf_sequence_100() { let mut f = marf.borrow_storage_backend(); for i in 1..101 { - let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); + let next_block_header = BlockHeaderHash::from_bytes(&[i; 32]).unwrap(); let path_bytes = [ - i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; let path = TrieHash::from_bytes(&path_bytes).unwrap(); - let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); + let value = TrieLeaf::new(&vec![], &[i; 40].to_vec()); eprintln!("Finding value inserted at {}", &next_block_header); let leaf = MARF::get_path(&mut f, &last_block_header, &path) .unwrap() .unwrap(); - assert_eq!(leaf.data.to_vec(), [i as u8; 40].to_vec()); + assert_eq!(leaf.data.to_vec(), [i; 40].to_vec()); merkle_test_marf( &mut f, &last_block_header, &path_bytes.to_vec(), - &[i as u8; 40].to_vec(), + &[i; 40].to_vec(), None, ); } @@ -846,7 +846,7 @@ fn marf_merkle_verify_backptrs() { marf.begin(&block_header_1, &block_header_2).unwrap(); marf.insert_raw( TrieHash::from_bytes(&path_2[..]).unwrap(), - TrieLeaf::new(&vec![], &[20 as u8; 40].to_vec()), + TrieLeaf::new(&vec![], &[20; 40].to_vec()), ) .unwrap(); @@ -864,14 +864,14 @@ fn marf_merkle_verify_backptrs() { marf.begin(&block_header_2, &block_header_3).unwrap(); marf.insert_raw( TrieHash::from_bytes(&path_3[..]).unwrap(), - TrieLeaf::new(&vec![], &[21 as u8; 40].to_vec()), + TrieLeaf::new(&vec![], &[21; 40].to_vec()), ) .unwrap(); debug!("----------------"); debug!( "Merkle verify {:?} from {:?}", - &to_hex(&[21 as u8; 40]), + &to_hex(&[21; 40]), block_header_3 ); debug!("----------------"); @@ -882,7 +882,7 @@ fn marf_merkle_verify_backptrs() { &mut marf.borrow_storage_backend(), &block_header_3, &path_3, - &[21 as u8; 40].to_vec(), + &[21; 40].to_vec(), None, ); if let Some(root_hashes) = last_root_hashes.take() { diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index d122e11db0..f563d507a7 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -62,7 +62,7 @@ where let (root, root_hash) = Trie::read_root(s).unwrap(); frontier.push((root, root_hash, 0)); - while frontier.len() > 0 { + while !frontier.is_empty() { let (next, next_hash, depth) = frontier.pop().unwrap(); let (ptrs, path_len) = match next { TrieNodeType::Leaf(ref leaf_data) => { @@ -248,16 +248,16 @@ pub fn make_node_path( // update parent match parent { TrieNodeType::Node256(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node48(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node16(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Node4(ref mut data) => { - assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr as u32))) + assert!(data.insert(&TriePtr::new(node_id, chr, node_ptr))) } TrieNodeType::Leaf(_) => panic!("can't insert into leaf"), }; @@ -270,7 +270,7 @@ pub fn make_node_path( .unwrap(); nodes.push(parent.clone()); - node_ptrs.push(TriePtr::new(node_id, chr, node_ptr as u32)); + node_ptrs.push(TriePtr::new(node_id, chr, node_ptr)); hashes.push(TrieHash::from_data(&[(seg_id + 1) as u8; 32])); parent = node; @@ -292,26 +292,18 @@ pub fn make_node_path( // update parent match parent { - TrieNodeType::Node256(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node48(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node16(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), - TrieNodeType::Node4(ref mut data) => assert!(data.insert(&TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32 - ))), + TrieNodeType::Node256(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node48(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node16(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } + TrieNodeType::Node4(ref mut data) => { + assert!(data.insert(&TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr))) + } TrieNodeType::Leaf(_) => panic!("can't insert into leaf"), }; @@ -323,11 +315,7 @@ pub fn make_node_path( .unwrap(); nodes.push(parent.clone()); - node_ptrs.push(TriePtr::new( - TrieNodeID::Leaf as u8, - child_chr, - child_ptr as u32, - )); + node_ptrs.push(TriePtr::new(TrieNodeID::Leaf as u8, child_chr, child_ptr)); hashes.push(TrieHash::from_data(&[(seg_id + 1) as u8; 32])); (nodes, node_ptrs, hashes) diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index 227adda439..45e07014a3 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -3938,7 +3938,7 @@ fn read_write_node256() { assert!(wres.is_ok()); let root_ptr = trie_io.root_ptr(); - let rres = trie_io.read_nodetype(&TriePtr::new(TrieNodeID::Node256 as u8, 0, root_ptr as u32)); + let rres = trie_io.read_nodetype(&TriePtr::new(TrieNodeID::Node256 as u8, 0, root_ptr)); assert!(rres.is_ok()); assert_eq!(rres.unwrap(), (node256.as_trie_node_type(), hash)); diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index fdd3e30191..ebd97fd5c7 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -81,8 +81,8 @@ fn trie_cmp( let mut frontier_1 = VecDeque::new(); let mut frontier_2 = VecDeque::new(); - assert!(t1.data().len() > 0); - assert!(t2.data().len() > 0); + assert!(!t1.data().is_empty()); + assert!(!t2.data().is_empty()); let (n1_data, n1_hash) = t1.data()[0].clone(); let (n2_data, n2_hash) = t2.data()[0].clone(); @@ -99,7 +99,7 @@ fn trie_cmp( frontier_1.push_back((n1_data, n1_hash)); frontier_2.push_back((n2_data, n2_hash)); - while frontier_1.len() > 0 && frontier_2.len() > 0 { + while !frontier_1.is_empty() && !frontier_2.is_empty() { if frontier_1.len() != frontier_2.len() { debug!("frontier len mismatch"); return false; diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index 9bac45508c..8625527a16 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -474,7 +474,7 @@ fn trie_cursor_promote_node4_to_node16() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -490,12 +490,12 @@ fn trie_cursor_promote_node4_to_node16() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -635,7 +635,7 @@ fn trie_cursor_promote_node16_to_node48() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -652,12 +652,12 @@ fn trie_cursor_promote_node16_to_node48() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -742,7 +742,7 @@ fn trie_cursor_promote_node16_to_node48() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -759,12 +759,12 @@ fn trie_cursor_promote_node16_to_node48() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -905,7 +905,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -922,12 +922,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -1012,7 +1012,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -1028,12 +1028,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } @@ -1118,7 +1118,7 @@ fn trie_cursor_promote_node48_to_node256() { Trie::test_try_attach_leaf( &mut f, &mut c, - &mut TrieLeaf::new(&vec![], &[128 + j as u8; 40].to_vec()), + &mut TrieLeaf::new(&vec![], &[128 + j; 40].to_vec()), &mut node, ) .unwrap() @@ -1135,12 +1135,12 @@ fn trie_cursor_promote_node48_to_node256() { ) .unwrap() .unwrap(), - TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j as u8; 40].to_vec()) + TrieLeaf::new(&path[k + 1..].to_vec(), &[128 + j; 40].to_vec()) ); // without a MARF commit, merkle tests will fail in deferred mode if f.hash_calculation_mode != TrieHashCalculationMode::Deferred { - merkle_test(&mut f, &path.to_vec(), &[(j + 128) as u8; 40].to_vec()); + merkle_test(&mut f, &path.to_vec(), &[j + 128; 40].to_vec()); } } } diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 65e41cf3ed..af52906fb9 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -281,7 +281,7 @@ impl Trie { ))); } - value.path = cur_leaf.path_bytes().clone(); + value.path.clone_from(cur_leaf.path_bytes()); let leaf_hash = get_leaf_hash(value); @@ -341,7 +341,7 @@ impl Trie { ) -> Result { // can only work if we're not at the end of the path, and the current node has a path assert!(!cursor.eop()); - assert!(cur_leaf_data.path.len() > 0); + assert!(!cur_leaf_data.path.is_empty()); // switch from lazy expansion to path compression -- // * the current and new leaves will have unique suffixes @@ -361,11 +361,8 @@ impl Trie { // update current leaf (path changed) and save it let cur_leaf_disk_ptr = cur_leaf_ptr.ptr(); - let cur_leaf_new_ptr = TriePtr::new( - TrieNodeID::Leaf as u8, - cur_leaf_chr, - cur_leaf_disk_ptr as u32, - ); + let cur_leaf_new_ptr = + TriePtr::new(TrieNodeID::Leaf as u8, cur_leaf_chr, cur_leaf_disk_ptr); assert!(cur_leaf_path.len() <= cur_leaf_data.path.len()); let _sav_cur_leaf_data = cur_leaf_data.clone(); @@ -563,7 +560,7 @@ impl Trie { // append this leaf to the Trie let new_node_disk_ptr = storage.last_ptr()?; - let ret = TriePtr::new(new_node.id(), node_ptr.chr(), new_node_disk_ptr as u32); + let ret = TriePtr::new(new_node.id(), node_ptr.chr(), new_node_disk_ptr); storage.write_nodetype(new_node_disk_ptr, &new_node, new_node_hash)?; // update the cursor so its path of nodes and ptrs accurately reflects that we would have @@ -639,7 +636,7 @@ impl Trie { let new_cur_node_ptr = TriePtr::new( cur_node_cur_ptr.id(), new_cur_node_chr, - new_cur_node_disk_ptr as u32, + new_cur_node_disk_ptr, ); node.set_path(new_cur_node_path); @@ -873,13 +870,13 @@ impl Trie { cursor: &TrieCursor, update_skiplist: bool, ) -> Result<(), Error> { - assert!(cursor.node_ptrs.len() > 0); + assert!(!cursor.node_ptrs.is_empty()); let mut ptrs = cursor.node_ptrs.clone(); trace!("update_root_hash: ptrs = {:?}", &ptrs); let mut child_ptr = ptrs.pop().unwrap(); - if ptrs.len() == 0 { + if ptrs.is_empty() { // root node was already updated by trie operations, but it will have the wrong hash. // we need to "fix" the root node so it mixes in its ancestor hashes. trace!("Fix up root node so it mixes in its ancestor hashes"); diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 8134db9d44..c3b44c4c82 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -399,7 +399,7 @@ pub fn write_trie_blob_to_unconfirmed( } /// Open a trie blob. Returns a Blob<'a> readable/writeable handle to it. -pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { +pub fn open_trie_blob(conn: &Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( DatabaseName::Main, "marf_data", @@ -411,7 +411,7 @@ pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result readable handle to it. -pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { +pub fn open_trie_blob_readonly(conn: &Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( DatabaseName::Main, "marf_data", diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 76422e69ff..eae3e1f14d 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -120,7 +120,7 @@ impl MinerStatus { } pub fn is_blocked(&self) -> bool { - if self.blockers.len() > 0 { + if !self.blockers.is_empty() { debug!("Miner: blocked by {:?}", &self.blockers); true } else { @@ -876,7 +876,7 @@ impl<'a> StacksMicroblockBuilder<'a> { ) -> Result { let miner_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(miner_key)); - if txs.len() == 0 { + if txs.is_empty() { return Err(Error::NoTransactionsToMine); } @@ -1387,7 +1387,7 @@ impl<'a> StacksMicroblockBuilder<'a> { }, ); - if to_drop_and_blacklist.len() > 0 { + if !to_drop_and_blacklist.is_empty() { debug!( "Dropping and blacklisting {} problematic transaction(s)", &to_drop_and_blacklist.len() @@ -1427,8 +1427,16 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.num_mined = num_txs; mem_pool.drop_txs(&invalidated_txs)?; - event_dispatcher.mempool_txs_dropped(invalidated_txs, MemPoolDropReason::TOO_EXPENSIVE); - event_dispatcher.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); + event_dispatcher.mempool_txs_dropped( + invalidated_txs, + None, + MemPoolDropReason::TOO_EXPENSIVE, + ); + event_dispatcher.mempool_txs_dropped( + to_drop_and_blacklist, + None, + MemPoolDropReason::PROBLEMATIC, + ); if blocked { debug!( @@ -1463,7 +1471,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } } -impl<'a> Drop for StacksMicroblockBuilder<'a> { +impl Drop for StacksMicroblockBuilder<'_> { fn drop(&mut self) { debug!( "Drop StacksMicroblockBuilder"; @@ -1953,7 +1961,7 @@ impl StacksBlockBuilder { parent_microblocks.len() ); - if parent_microblocks.len() == 0 { + if parent_microblocks.is_empty() { self.set_parent_microblock(&EMPTY_MICROBLOCK_PARENT_HASH, 0); } else { let num_mblocks = parent_microblocks.len(); @@ -2114,12 +2122,12 @@ impl StacksBlockBuilder { let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; - let mblock_opt = if mblock_txs.len() > 0 { + let mblock_opt = if mblock_txs.is_empty() { + None + } else { builder.micro_txs.append(&mut mblock_txs); let mblock = builder.mine_next_microblock()?; Some(mblock) - } else { - None }; let cost = builder.epoch_finish(epoch_tx)?; @@ -2513,7 +2521,7 @@ impl StacksBlockBuilder { } } - if to_drop_and_blacklist.len() > 0 { + if !to_drop_and_blacklist.is_empty() { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } @@ -2543,8 +2551,12 @@ impl StacksBlockBuilder { mempool.drop_txs(&invalidated_txs)?; if let Some(observer) = event_observer { - observer.mempool_txs_dropped(invalidated_txs, MemPoolDropReason::TOO_EXPENSIVE); - observer.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); + observer.mempool_txs_dropped(invalidated_txs, None, MemPoolDropReason::TOO_EXPENSIVE); + observer.mempool_txs_dropped( + to_drop_and_blacklist, + None, + MemPoolDropReason::PROBLEMATIC, + ); } if let Err(e) = result { diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index c637f2078d..bcf7611695 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -801,7 +801,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { ) .unwrap(); - if parent_mblock_stream.len() > 0 { + if !parent_mblock_stream.is_empty() { if tenure_id != 5 { assert_eq!( anchored_block.0.header.parent_microblock, @@ -1058,7 +1058,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { ) .unwrap(); - if parent_mblock_stream.len() > 0 { + if parent_mblock_stream.is_empty() { + assert_eq!(tenure_id, 0); + } else { // force the block to confirm a microblock stream, even if it would result in // an invalid block. test_debug!( @@ -1074,8 +1076,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { parent_mblock_stream.last().unwrap().block_hash() ); test_debug!("New block hash is {}", &anchored_block.0.block_hash()); - } else { - assert_eq!(tenure_id, 0); } (anchored_block.0, parent_mblock_stream) @@ -1256,7 +1256,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::limited(), None, @@ -1637,7 +1637,7 @@ fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, @@ -1728,7 +1728,7 @@ fn test_build_anchored_blocks_zero_fee_transaction() { &parent_tip, vrf_proof, tip.total_burn, - Hash160([0 as u8; 20]), + Hash160([0; 20]), ) .unwrap(); @@ -3355,10 +3355,10 @@ fn test_build_microblock_stream_forks_with_descendants() { for burn_op in burn_ops.iter_mut() { if let BlockstackOperationType::LeaderBlockCommit(ref mut op) = burn_op { // patch it up - op.parent_block_ptr = (*parent_block_ptrs + op.parent_block_ptr = *parent_block_ptrs .borrow() .get(&stacks_block.header.parent_block) - .unwrap()) as u32; + .unwrap(); } } @@ -4841,7 +4841,7 @@ fn test_fee_order_mismatch_nonce_order() { &parent_tip, tip.total_burn, vrf_proof, - Hash160([0 as u8; 20]), + Hash160([0; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 4e1b774ba7..4859451cb1 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -2513,7 +2513,7 @@ fn assert_chainstate_blocks_eq(test_name_1: &str, test_name_2: &str) { } for i in 0..all_microblocks_1.len() { - if all_microblocks_1[i].2.len() == 0 { + if all_microblocks_1[i].2.is_empty() { continue; } @@ -2650,7 +2650,25 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { &block_commit_op, ); - if microblocks.len() > 0 { + if microblocks.is_empty() { + // process all the blocks we can + test_debug!( + "Process Stacks block {} and {} microblocks in {}", + &stacks_block.block_hash(), + microblocks.len(), + &node_name + ); + let tip_info_list = node + .chainstate + .process_blocks_at_tip( + connect_burnchain_db(&miner_trace.burn_node.burnchain).conn(), + &mut miner_trace.burn_node.sortdb, + expected_num_blocks, + ) + .unwrap(); + + num_processed += tip_info_list.len(); + } else { for mblock in microblocks.iter() { preprocess_stacks_block_data( &mut node, @@ -2680,24 +2698,6 @@ fn miner_trace_replay_randomized(miner_trace: &mut TestMinerTrace) { num_processed += tip_info_list.len(); } - } else { - // process all the blocks we can - test_debug!( - "Process Stacks block {} and {} microblocks in {}", - &stacks_block.block_hash(), - microblocks.len(), - &node_name - ); - let tip_info_list = node - .chainstate - .process_blocks_at_tip( - connect_burnchain_db(&miner_trace.burn_node.burnchain).conn(), - &mut miner_trace.burn_node.sortdb, - expected_num_blocks, - ) - .unwrap(); - - num_processed += tip_info_list.len(); } } } @@ -2857,7 +2857,7 @@ pub fn mine_invalid_token_transfers_block( ); builder.force_mine_tx(clarity_tx, &tx1).unwrap(); - if miner.spent_at_nonce.get(&1).is_none() { + if !miner.spent_at_nonce.contains_key(&1) { miner.spent_at_nonce.insert(1, 11111); } @@ -2871,7 +2871,7 @@ pub fn mine_invalid_token_transfers_block( ); builder.force_mine_tx(clarity_tx, &tx2).unwrap(); - if miner.spent_at_nonce.get(&2).is_none() { + if !miner.spent_at_nonce.contains_key(&2) { miner.spent_at_nonce.insert(2, 22222); } diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 798e4c2b05..714800b1a9 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -86,7 +86,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); - while dir_queue.len() > 0 { + while !dir_queue.is_empty() { let next_dir = dir_queue.pop_front().unwrap(); let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); @@ -214,7 +214,7 @@ impl TestMinerTrace { let mut num_blocks = 0; for p in self.points.iter() { for miner_id in p.stacks_blocks.keys() { - if p.stacks_blocks.get(miner_id).is_some() { + if p.stacks_blocks.contains_key(miner_id) { num_blocks += 1; } } @@ -227,7 +227,7 @@ impl TestMinerTrace { let mut num_sortitions = 0; for p in self.points.iter() { for miner_id in p.fork_snapshots.keys() { - if p.fork_snapshots.get(miner_id).is_some() { + if p.fork_snapshots.contains_key(miner_id) { num_sortitions += 1; } } @@ -974,7 +974,7 @@ pub fn get_last_microblock_header( let last_microblock_header_opt = match last_microblocks_opt { Some(last_microblocks) => { - if last_microblocks.len() == 0 { + if last_microblocks.is_empty() { None } else { let l = last_microblocks.len() - 1; diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index c9af312cc8..d813dbcf01 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -34,6 +34,7 @@ use crate::chainstate::stacks::{TransactionPayloadID, *}; use crate::codec::Error as CodecError; use crate::core::*; use crate::net::Error as net_error; +use crate::util_lib::boot::boot_code_addr; impl StacksMessageCodec for TransactionContractCall { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { @@ -1031,6 +1032,16 @@ impl StacksTransaction { _ => false, } } + + /// Is this a phantom transaction? + pub fn is_phantom(&self) -> bool { + let boot_address = boot_code_addr(self.is_mainnet()).into(); + if let TransactionPayload::TokenTransfer(address, amount, _) = &self.payload { + *address == boot_address && *amount == 0 + } else { + false + } + } } impl StacksTransactionSigner { @@ -3384,7 +3395,7 @@ mod test { .consensus_serialize(&mut contract_call_bytes) .unwrap(); - let mut transaction_contract_call = vec![0xff as u8]; + let mut transaction_contract_call = vec![0xff]; transaction_contract_call.append(&mut contract_call_bytes.clone()); assert!( @@ -3489,14 +3500,14 @@ mod test { let asset_name = ClarityName::try_from("hello-asset").unwrap(); let mut asset_name_bytes = vec![ // length - asset_name.len() as u8, + asset_name.len(), ]; asset_name_bytes.extend_from_slice(&asset_name.to_string().as_str().as_bytes()); let contract_name = ContractName::try_from("hello-world").unwrap(); let mut contract_name_bytes = vec![ // length - contract_name.len() as u8, + contract_name.len(), ]; contract_name_bytes.extend_from_slice(&contract_name.to_string().as_str().as_bytes()); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 7758abc41b..f67ab22eaa 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -35,7 +35,7 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{bytes_to_hex, Hash160, Sha512Trunc256Sum}; -use stacks_common::util::{cargo_workspace, get_epoch_time_ms, log}; +use stacks_common::util::{get_epoch_time_ms, log}; use crate::burnchains::{Address, PoxConstants, Txid}; use crate::chainstate::stacks::boot::{ @@ -173,7 +173,7 @@ trait ClarityStorage { headers_db: &'a dyn HeadersDB, burn_db: &'a dyn BurnStateDB, ) -> ClarityDatabase<'a>; - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a>; + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_>; } impl ClarityStorage for WritableMarfStore<'_> { @@ -185,7 +185,7 @@ impl ClarityStorage for WritableMarfStore<'_> { self.as_clarity_db(headers_db, burn_db) } - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_> { self.as_analysis_db() } } @@ -199,7 +199,7 @@ impl ClarityStorage for MemoryBackingStore { self.as_clarity_db() } - fn get_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + fn get_analysis_db(&mut self) -> AnalysisDatabase<'_> { self.as_analysis_db() } } @@ -708,7 +708,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { - Some((height * 600 + 1231006505) as u64) + Some(height * 600 + 1231006505) } else { None } @@ -717,7 +717,7 @@ impl HeadersDB for CLIHeadersDB { fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { - Some((height * 10 + 1713799973) as u64) + Some(height * 10 + 1713799973) } else { None } @@ -995,7 +995,7 @@ pub fn add_serialized_output(result: &mut serde_json::Value, value: Value) { /// Returns (process-exit-code, Option) pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option) { - if args.len() < 1 { + if args.is_empty() { print_usage(invoked_by); return (1, None); } @@ -1950,6 +1950,8 @@ pub fn invoke_command(invoked_by: &str, args: &[String]) -> (i32, Option 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check tokens (idempotency)"); let invoked = invoke_command( @@ -2079,7 +2081,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("launch tokens"); let invoked = invoke_command( @@ -2096,7 +2098,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names"); let invoked = invoke_command( @@ -2112,7 +2114,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names with different contract ID"); let invoked = invoke_command( @@ -2130,7 +2132,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("check names with analysis"); let invoked = invoke_command( @@ -2147,7 +2149,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["analysis"] != json!(null)); eprintln!("check names with cost"); @@ -2165,7 +2167,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["costs"] != json!(null)); assert!(result["assets"] == json!(null)); @@ -2186,7 +2188,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!(result["costs"] != json!(null)); assert!(result["assets"] != json!(null)); @@ -2207,8 +2209,8 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); - assert!(result["events"].as_array().unwrap().len() == 0); + assert!(!result["message"].as_str().unwrap().is_empty()); + assert!(result["events"].as_array().unwrap().is_empty()); assert_eq!(result["output"], json!({"UInt": 1000})); eprintln!("eval tokens"); @@ -2344,7 +2346,7 @@ mod test { let result = invoked.1.unwrap(); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); eprintln!("launch tokens"); let invoked = invoke_command( @@ -2364,7 +2366,7 @@ mod test { eprintln!("{}", serde_json::to_string(&result).unwrap()); assert_eq!(exit, 0); - assert!(result["message"].as_str().unwrap().len() > 0); + assert!(!result["message"].as_str().unwrap().is_empty()); assert!( result["assets"]["tokens"]["S1G2081040G2081040G2081040G208105NK8PE5"] ["S1G2081040G2081040G2081040G208105NK8PE5.tokens-ft::tokens"] diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index fdf2b9894f..08782e315b 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -175,9 +175,9 @@ macro_rules! using { }}; } -impl<'a, 'b> ClarityBlockConnection<'a, 'b> { +impl ClarityBlockConnection<'_, '_> { #[cfg(test)] - pub fn new_test_conn( + pub fn new_test_conn<'a, 'b>( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, burn_state_db: &'b dyn BurnStateDB, @@ -647,7 +647,7 @@ impl ClarityInstance { } } -impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { +impl ClarityConnection for ClarityBlockConnection<'_, '_> { /// Do something with ownership of the underlying DB that involves only reading. fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where @@ -711,7 +711,7 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { } } -impl<'a> PreCommitClarityBlock<'a> { +impl PreCommitClarityBlock<'_> { pub fn commit(self) { debug!("Committing Clarity block connection"; "index_block" => %self.commit_to); self.datastore @@ -720,7 +720,7 @@ impl<'a> PreCommitClarityBlock<'a> { } } -impl<'a, 'b> ClarityBlockConnection<'a, 'b> { +impl<'a> ClarityBlockConnection<'a, '_> { /// Rolls back all changes in the current block by /// (1) dropping all writes from the current MARF tip, /// (2) rolling back side-storage @@ -837,9 +837,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // instantiate costs 2 contract... let cost_2_code = if mainnet { - &*BOOT_CODE_COSTS_2 + BOOT_CODE_COSTS_2 } else { - &*BOOT_CODE_COSTS_2_TESTNET + BOOT_CODE_COSTS_2_TESTNET }; let payload = TransactionPayload::SmartContract( @@ -1028,7 +1028,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { } /////////////////// .costs-3 //////////////////////// - let cost_3_code = &*BOOT_CODE_COSTS_3; + let cost_3_code = BOOT_CODE_COSTS_3; let payload = TransactionPayload::SmartContract( TransactionSmartContract { @@ -1638,7 +1638,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { } } -impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { +impl ClarityConnection for ClarityTransactionConnection<'_, '_> { /// Do something with ownership of the underlying DB that involves only reading. fn with_clarity_db_readonly_owned(&mut self, to_do: F) -> R where @@ -1677,7 +1677,7 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> Drop for ClarityTransactionConnection<'a, 'b> { +impl Drop for ClarityTransactionConnection<'_, '_> { fn drop(&mut self) { if thread::panicking() { // if the thread is panicking, we've likely lost our cost_tracker handle, @@ -1697,7 +1697,7 @@ impl<'a, 'b> Drop for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { +impl TransactionConnection for ClarityTransactionConnection<'_, '_> { fn with_abort_callback( &mut self, to_do: F, @@ -1771,7 +1771,7 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { } } -impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { +impl ClarityTransactionConnection<'_, '_> { /// Do something to the underlying DB that involves writing. pub fn with_clarity_db(&mut self, to_do: F) -> Result where @@ -1947,7 +1947,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -1955,8 +1955,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2000,7 +2000,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2008,8 +2008,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2061,7 +2061,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2069,8 +2069,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2173,7 +2173,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2181,8 +2181,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2229,7 +2229,7 @@ mod tests { } let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin_read_only(Some(&StacksBlockId([1 as u8; 32]))); + let mut conn = marf.begin_read_only(Some(&StacksBlockId([1; 32]))); assert!(conn.get_contract_hash(&contract_identifier).is_ok()); } @@ -2242,7 +2242,7 @@ mod tests { { let mut conn = clarity_instance.begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2276,7 +2276,7 @@ mod tests { let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin(&StacksBlockId::sentinel(), &StacksBlockId([0 as u8; 32])); + let mut conn = marf.begin(&StacksBlockId::sentinel(), &StacksBlockId([0; 32])); // should not be in the marf. assert_eq!( conn.get_contract_hash(&contract_identifier).unwrap_err(), @@ -2314,7 +2314,7 @@ mod tests { confirmed_clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2336,7 +2336,7 @@ mod tests { // make an unconfirmed block off of the confirmed block { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2369,7 +2369,7 @@ mod tests { // contract is still there, in unconfirmed status { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2388,7 +2388,7 @@ mod tests { // rolled back (but that should only drop the current TrieRAM) { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2406,7 +2406,7 @@ mod tests { // contract is now absent, now that we did a rollback of unconfirmed state { let mut conn = clarity_instance.begin_unconfirmed( - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2421,7 +2421,7 @@ mod tests { } let mut marf = clarity_instance.destroy(); - let mut conn = marf.begin_unconfirmed(&StacksBlockId([0 as u8; 32])); + let mut conn = marf.begin_unconfirmed(&StacksBlockId([0; 32])); // should not be in the marf. assert_eq!( @@ -2452,7 +2452,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2460,8 +2460,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2672,7 +2672,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2680,8 +2680,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2833,7 +2833,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -2841,8 +2841,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ); @@ -2883,8 +2883,8 @@ mod tests { { let mut conn = clarity_instance.begin_block( - &StacksBlockId([1 as u8; 32]), - &StacksBlockId([2 as u8; 32]), + &StacksBlockId([1; 32]), + &StacksBlockId([2; 32]), &TEST_HEADER_DB, &burn_state_db, ); diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 3a8636b3b5..56a1fde107 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -262,7 +262,7 @@ impl MarfedKV { self.marf.sqlite_conn() } - pub fn index_conn<'a, C>(&'a self, context: C) -> IndexDBConn<'a, C, StacksBlockId> { + pub fn index_conn(&self, context: C) -> IndexDBConn<'_, C, StacksBlockId> { IndexDBConn { index: &self.marf, context, @@ -280,7 +280,7 @@ pub struct ReadOnlyMarfStore<'a> { marf: &'a mut MARF, } -impl<'a> ReadOnlyMarfStore<'a> { +impl ReadOnlyMarfStore<'_> { pub fn as_clarity_db<'b>( &'b mut self, headers_db: &'b dyn HeadersDB, @@ -289,7 +289,7 @@ impl<'a> ReadOnlyMarfStore<'a> { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'b>(&'b mut self) -> AnalysisDatabase<'b> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } @@ -301,7 +301,7 @@ impl<'a> ReadOnlyMarfStore<'a> { } } -impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { +impl ClarityBackingStore for ReadOnlyMarfStore<'_> { fn get_side_store(&mut self) -> &Connection { self.marf.sqlite_conn() } @@ -546,7 +546,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { } } -impl<'a> WritableMarfStore<'a> { +impl WritableMarfStore<'_> { pub fn as_clarity_db<'b>( &'b mut self, headers_db: &'b dyn HeadersDB, @@ -555,7 +555,7 @@ impl<'a> WritableMarfStore<'a> { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'b>(&'b mut self) -> AnalysisDatabase<'b> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } @@ -625,7 +625,7 @@ impl<'a> WritableMarfStore<'a> { } } -impl<'a> ClarityBackingStore for WritableMarfStore<'a> { +impl ClarityBackingStore for WritableMarfStore<'_> { fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { self.marf .check_ancestor_block_hash(&bhh) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 0bce54dcfb..e901e8d908 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -157,7 +157,7 @@ impl GetTenureStartId for MARF { pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); -impl<'a> HeadersDB for HeadersDBConn<'a> { +impl HeadersDB for HeadersDBConn<'_> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, @@ -328,7 +328,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { } } -impl<'a> HeadersDB for ChainstateTx<'a> { +impl HeadersDB for ChainstateTx<'_> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, @@ -1205,7 +1205,7 @@ impl MemoryBackingStore { memory_marf } - pub fn as_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { + pub fn as_clarity_db(&mut self) -> ClarityDatabase<'_> { ClarityDatabase::new(self, &NULL_HEADER_DB, &NULL_BURN_STATE_DB) } @@ -1219,7 +1219,7 @@ impl MemoryBackingStore { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db<'a>(&'a mut self) -> AnalysisDatabase<'a> { + pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { AnalysisDatabase::new(self) } } diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index 4fe887f2c3..dc5b33fd31 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -81,7 +81,7 @@ fn setup_tracked_cost_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &burn_state_db, ) @@ -89,8 +89,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -107,8 +107,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([1 as u8; 32]), - &StacksBlockId([2 as u8; 32]), + &StacksBlockId([1; 32]), + &StacksBlockId([2; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -145,8 +145,8 @@ fn setup_tracked_cost_test( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([2 as u8; 32]), - &StacksBlockId([3 as u8; 32]), + &StacksBlockId([2; 32]), + &StacksBlockId([3; 32]), &TEST_HEADER_DB, &burn_state_db, ); @@ -221,7 +221,7 @@ fn test_tracked_costs( { let mut conn = clarity_instance.begin_block( - &StacksBlockId([3 as u8; 32]), + &StacksBlockId([3; 32]), &StacksBlockId([4 + prog_id as u8; 32]), &TEST_HEADER_DB, &burn_state_db, diff --git a/stackslib/src/clarity_vm/tests/ast.rs b/stackslib/src/clarity_vm/tests/ast.rs index 2074fa7636..edeaf9d553 100644 --- a/stackslib/src/clarity_vm/tests/ast.rs +++ b/stackslib/src/clarity_vm/tests/ast.rs @@ -35,7 +35,7 @@ fn dependency_edge_counting_runtime( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) @@ -43,8 +43,8 @@ fn dependency_edge_counting_runtime( let mut cost_track = clarity_instance .begin_block( - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 29c57b2e92..030b62af93 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -204,7 +204,7 @@ where let mut tip = first_block.clone(); if epoch >= StacksEpochId::Epoch2_05 { - let next_block = StacksBlockId([1 as u8; 32]); + let next_block = StacksBlockId([1; 32]); let mut clarity_conn = clarity_instance.begin_block(&tip, &next_block, &TEST_HEADER_DB, &TEST_BURN_STATE_DB); clarity_conn.initialize_epoch_2_05().unwrap(); @@ -213,7 +213,7 @@ where } if epoch >= StacksEpochId::Epoch21 { - let next_block = StacksBlockId([2 as u8; 32]); + let next_block = StacksBlockId([2; 32]); let mut clarity_conn = clarity_instance.begin_block(&tip, &next_block, &TEST_HEADER_DB, &TEST_BURN_STATE_DB); clarity_conn.initialize_epoch_2_1().unwrap(); @@ -223,7 +223,7 @@ where let mut marf_kv = clarity_instance.destroy(); - let mut store = marf_kv.begin(&tip, &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&tip, &StacksBlockId([3; 32])); to_do(OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), @@ -1052,7 +1052,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity let mut clarity_inst = ClarityInstance::new(use_mainnet, chain_id, marf_kv); let mut block_conn = clarity_inst.begin_block( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1111,7 +1111,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let without_interposing_5 = { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1134,7 +1134,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let without_interposing_10 = { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1163,7 +1163,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; { - let mut store = marf_kv.begin(&StacksBlockId([3 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([3; 32]), &StacksBlockId([4; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); db.set_variable_unknown_descriptor( @@ -1194,7 +1194,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity } let with_interposing_5 = { - let mut store = marf_kv.begin(&StacksBlockId([4 as u8; 32]), &StacksBlockId([5 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([4; 32]), &StacksBlockId([5; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), @@ -1218,7 +1218,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity }; let with_interposing_10 = { - let mut store = marf_kv.begin(&StacksBlockId([5 as u8; 32]), &StacksBlockId([6 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([5; 32]), &StacksBlockId([6; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1304,7 +1304,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let mut clarity_inst = ClarityInstance::new(use_mainnet, chain_id, marf_kv); let mut block_conn = clarity_inst.begin_block( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1479,7 +1479,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi }; { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); @@ -1517,7 +1517,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } let le_cost_without_interception = { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, @@ -1578,7 +1578,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ]; { - let mut store = marf_kv.begin(&StacksBlockId([3 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([3; 32]), &StacksBlockId([4; 32])); let mut db = store.as_clarity_db(&TEST_HEADER_DB, burn_db); db.begin(); @@ -1618,7 +1618,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } { - let mut store = marf_kv.begin(&StacksBlockId([4 as u8; 32]), &StacksBlockId([5 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([4; 32]), &StacksBlockId([5; 32])); let mut owned_env = OwnedEnvironment::new_max_limit( store.as_clarity_db(&TEST_HEADER_DB, burn_db), StacksEpochId::Epoch20, diff --git a/stackslib/src/clarity_vm/tests/events.rs b/stackslib/src/clarity_vm/tests/events.rs index 7037e8dcf3..3e09b6b924 100644 --- a/stackslib/src/clarity_vm/tests/events.rs +++ b/stackslib/src/clarity_vm/tests/events.rs @@ -88,7 +88,7 @@ fn helper_execute_epoch( &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, ), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), ); let mut owned_env = OwnedEnvironment::new_max_limit( diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index c74cb0c8b0..22a3f07321 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -195,7 +195,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc Error::Runtime(x, _) => assert_eq!( x, RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() + vec![2; 32].as_slice() )) ), _ => panic!("Unexpected error"), @@ -287,7 +287,7 @@ fn with_separate_forks_environment( let mut marf_kv = MarfedKV::temporary(); { - let mut store = marf_kv.begin(&StacksBlockId::sentinel(), &StacksBlockId([0 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId::sentinel(), &StacksBlockId([0; 32])); store .as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB) .initialize(); @@ -295,7 +295,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([0 as u8; 32]), &StacksBlockId([1 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([0; 32]), &StacksBlockId([1; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -307,7 +307,7 @@ fn with_separate_forks_environment( // Now, we can do our forking. { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([2; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -317,7 +317,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([3 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([1; 32]), &StacksBlockId([3; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, @@ -327,7 +327,7 @@ fn with_separate_forks_environment( } { - let mut store = marf_kv.begin(&StacksBlockId([2 as u8; 32]), &StacksBlockId([4 as u8; 32])); + let mut store = marf_kv.begin(&StacksBlockId([2; 32]), &StacksBlockId([4; 32])); let mut owned_env = OwnedEnvironment::new( store.as_clarity_db(&TEST_HEADER_DB, &TEST_BURN_STATE_DB), epoch, diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 6dfb902fe4..6e2255446a 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -48,7 +48,7 @@ use crate::clarity_vm::database::MemoryBackingStore; use crate::util_lib::boot::boot_code_id; fn test_block_headers(n: u8) -> StacksBlockId { - StacksBlockId([n as u8; 32]) + StacksBlockId([n; 32]) } pub const TEST_BURN_STATE_DB_AST_PRECHECK: UnitTestBurnStateDB = UnitTestBurnStateDB { @@ -131,7 +131,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac let mut gb = clarity.begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0xfe as u8; 32]), + &StacksBlockId([0xfe; 32]), &TEST_HEADER_DB, burn_db, ); @@ -197,8 +197,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac { let mut block = new_block( &mut clarity, - &StacksBlockId([0xfe as u8; 32]), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0xfe; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ); @@ -697,7 +697,7 @@ pub fn rollback_log_memory_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -706,8 +706,8 @@ pub fn rollback_log_memory_test( { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -768,7 +768,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -777,8 +777,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -847,7 +847,7 @@ pub fn argument_memory_test( clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -856,8 +856,8 @@ pub fn argument_memory_test( { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -924,7 +924,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -933,8 +933,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); @@ -1043,7 +1043,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0; 32]), &TEST_HEADER_DB, burn_db, ) @@ -1052,8 +1052,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ { let mut conn = new_block( &mut clarity_instance, - &StacksBlockId([0 as u8; 32]), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([0; 32]), + &StacksBlockId([1; 32]), &TEST_HEADER_DB, burn_db, ); diff --git a/stackslib/src/clarity_vm/tests/simple_tests.rs b/stackslib/src/clarity_vm/tests/simple_tests.rs index 0b84b209da..a73489bb95 100644 --- a/stackslib/src/clarity_vm/tests/simple_tests.rs +++ b/stackslib/src/clarity_vm/tests/simple_tests.rs @@ -30,7 +30,7 @@ where { let mut store = marf_kv.begin( &StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH), - &StacksBlockId([1 as u8; 32]), + &StacksBlockId([1; 32]), ); let mut owned_env = OwnedEnvironment::new( @@ -65,7 +65,7 @@ fn test_at_unknown_block() { Error::Runtime(x, _) => assert_eq!( x, RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() + vec![2; 32].as_slice() )) ), _ => panic!("Unexpected error"), diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index cbe27e1585..f1b41b803a 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1438,7 +1438,7 @@ impl BurnchainConfigFile { // check magic bytes and set if not defined let mainnet_magic = ConfigFile::mainnet().burnchain.unwrap().magic_bytes; if self.magic_bytes.is_none() { - self.magic_bytes = mainnet_magic.clone(); + self.magic_bytes.clone_from(&mainnet_magic); } if self.magic_bytes != mainnet_magic { return Err(format!( diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 93ba692678..865f99d3b0 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -390,7 +390,12 @@ pub trait ProposalCallbackReceiver: Send { pub trait MemPoolEventDispatcher { fn get_proposal_callback_receiver(&self) -> Option>; - fn mempool_txs_dropped(&self, txids: Vec, reason: MemPoolDropReason); + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ); fn mined_block_event( &self, target_burn_height: u64, @@ -582,13 +587,13 @@ impl MemPoolWalkSettings { } impl FromRow for Txid { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { row.get(0).map_err(db_error::SqliteError) } } impl FromRow for MemPoolTxMetadata { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let tenure_consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; let tenure_block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; @@ -624,7 +629,7 @@ impl FromRow for MemPoolTxMetadata { } impl FromRow for MemPoolTxInfo { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let md = MemPoolTxMetadata::from_row(row)?; let tx_bytes: Vec = row.get_unwrap("tx"); let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]) @@ -639,7 +644,7 @@ impl FromRow for MemPoolTxInfo { } impl FromRow for MemPoolTxInfoPartial { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let txid = Txid::from_column(row, "txid")?; let fee_rate: Option = match row.get("fee_rate") { Ok(rate) => Some(rate), @@ -662,7 +667,7 @@ impl FromRow for MemPoolTxInfoPartial { } impl FromRow<(u64, u64)> for (u64, u64) { - fn from_row<'a>(row: &'a Row) -> Result<(u64, u64), db_error> { + fn from_row(row: &Row) -> Result<(u64, u64), db_error> { let t1: i64 = row.get_unwrap(0); let t2: i64 = row.get_unwrap(1); if t1 < 0 || t2 < 0 { @@ -1214,6 +1219,12 @@ impl CandidateCache { fn len(&self) -> usize { self.cache.len() + self.next.len() } + + /// Is the cache empty? + #[cfg_attr(test, mutants::skip)] + fn is_empty(&self) -> bool { + self.cache.is_empty() && self.next.is_empty() + } } /// Evaluates the pair of nonces, to determine an order @@ -1835,13 +1846,10 @@ impl MemPoolDB { continue; } - let do_consider = if settings.filter_origins.len() > 0 { - settings + let do_consider = settings.filter_origins.is_empty() + || settings .filter_origins - .contains(&tx_info.metadata.origin_address) - } else { - true - }; + .contains(&tx_info.metadata.origin_address); if !do_consider { debug!("Will skip mempool tx, since it does not have an allowed origin"; @@ -1933,7 +1941,7 @@ impl MemPoolDB { drop(query_stmt_null); drop(query_stmt_fee); - if retry_store.len() > 0 { + if !retry_store.is_empty() { let tx = self.tx_begin()?; for (address, nonce) in retry_store.into_iter() { nonce_cache.update(address, nonce, &tx); @@ -1953,7 +1961,7 @@ impl MemPoolDB { &self.db } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { let tx = tx_begin_immediate(&mut self.db)?; Ok(MemPoolTx::new( tx, @@ -2229,7 +2237,7 @@ impl MemPoolDB { // broadcast drop event if a tx is being replaced if let (Some(prior_tx), Some(event_observer)) = (prior_tx, event_observer) { - event_observer.mempool_txs_dropped(vec![prior_tx.txid], replace_reason); + event_observer.mempool_txs_dropped(vec![prior_tx.txid], Some(txid), replace_reason); }; Ok(()) @@ -2275,7 +2283,7 @@ impl MemPoolDB { if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; let txids = query_rows(tx, sql, args)?; - event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + event_observer.mempool_txs_dropped(txids, None, MemPoolDropReason::STALE_COLLECT); } let sql = "DELETE FROM mempool WHERE accept_time < ?1"; @@ -2297,7 +2305,7 @@ impl MemPoolDB { if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE height < ?1"; let txids = query_rows(tx, sql, args)?; - event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + event_observer.mempool_txs_dropped(txids, None, MemPoolDropReason::STALE_COLLECT); } let sql = "DELETE FROM mempool WHERE height < ?1"; @@ -2572,11 +2580,7 @@ impl MemPoolDB { /// Blacklist transactions from the mempool /// Do not call directly; it's `pub` only for testing - pub fn inner_blacklist_txs<'a>( - tx: &DBTx<'a>, - txids: &[Txid], - now: u64, - ) -> Result<(), db_error> { + pub fn inner_blacklist_txs(tx: &DBTx<'_>, txids: &[Txid], now: u64) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; let args = params![txid, &u64_to_sql(now)?]; @@ -2587,8 +2591,8 @@ impl MemPoolDB { /// garbage-collect the tx blacklist -- delete any transactions whose blacklist timeout has /// been exceeded - pub fn garbage_collect_tx_blacklist<'a>( - tx: &DBTx<'a>, + pub fn garbage_collect_tx_blacklist( + tx: &DBTx<'_>, now: u64, timeout: u64, max_size: u64, @@ -2649,7 +2653,7 @@ impl MemPoolDB { /// Inner code body for dropping transactions. /// Note that the bloom filter will *NOT* be updated. That's the caller's job, if desired. - fn inner_drop_txs<'a>(tx: &DBTx<'a>, txids: &[Txid]) -> Result<(), db_error> { + fn inner_drop_txs(tx: &DBTx<'_>, txids: &[Txid]) -> Result<(), db_error> { let sql = "DELETE FROM mempool WHERE txid = ?"; for txid in txids.iter() { tx.execute(sql, &[txid])?; diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 0f43c40756..899f9d4a2f 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -219,10 +219,10 @@ pub const BLOCK_LIMIT_MAINNET_21: ExecutionCost = ExecutionCost { // Block limit for the testnet in Stacks 2.0. pub const HELIUM_BLOCK_LIMIT_20: ExecutionCost = ExecutionCost { - write_length: 15_0_000_000, - write_count: 5_0_000, + write_length: 150_000_000, + write_count: 50_000, read_length: 1_000_000_000, - read_count: 5_0_000, + read_count: 50_000, // allow much more runtime in helium blocks than mainnet runtime: 100_000_000_000, }; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index e41f9bda1a..963820a741 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1211,10 +1211,10 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx.commit().unwrap(); } - assert!(expected_addr_nonces.len() > 0); + assert!(!expected_addr_nonces.is_empty()); let all_addr_nonces = db_get_all_nonces(mempool.conn()).unwrap(); - assert_eq!(all_addr_nonces.len(), 0); + assert!(all_addr_nonces.is_empty()); // start a thread that holds a write-lock on the mempool let write_thread = std::thread::spawn(move || { @@ -1266,7 +1266,7 @@ fn test_iterate_candidates_concurrent_write_lock() { assert_eq!(all_addr_nonces.len(), expected_addr_nonces.len()); for (addr, nonce) in all_addr_nonces { - assert!(expected_addr_nonces.get(&addr).is_some()); + assert!(expected_addr_nonces.contains_key(&addr)); assert_eq!(nonce, 24); } } @@ -1992,8 +1992,7 @@ fn test_txtags() { let txtags = mempool.get_txtags(&seed).unwrap(); let len_txtags = all_txtags.len(); - let last_txtags = - &all_txtags[len_txtags.saturating_sub(BLOOM_COUNTER_DEPTH as usize)..len_txtags]; + let last_txtags = &all_txtags[len_txtags.saturating_sub(BLOOM_COUNTER_DEPTH)..len_txtags]; let mut expected_txtag_set = HashSet::new(); for txtags in last_txtags.iter() { @@ -2240,7 +2239,7 @@ fn test_find_next_missing_transactions() { txid.clone(), tx_bytes, tx_fee, - block_height as u64, + block_height, &origin_addr, origin_nonce, &sponsor_addr, @@ -2375,9 +2374,9 @@ fn test_find_next_missing_transactions() { ) .unwrap(); assert!(txs.len() <= page_size as usize); - assert!(num_visited <= page_size as u64); + assert!(num_visited <= page_size); - if txs.len() == 0 { + if txs.is_empty() { assert!(next_page_opt.is_none()); break; } @@ -2414,9 +2413,9 @@ fn test_find_next_missing_transactions() { eprintln!("find_next_missing_transactions with empty bloom filter took {} ms to serve {} transactions", ts_after.saturating_sub(ts_before), page_size); assert!(txs.len() <= page_size as usize); - assert!(num_visited <= page_size as u64); + assert!(num_visited <= page_size); - if txs.len() == 0 { + if txs.is_empty() { assert!(next_page_opt.is_none()); break; } diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index 8db75ba134..38d200d8a2 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -200,7 +200,7 @@ impl FeeEstimator for WeightedMedianFeeRateEstimator { maybe_add_minimum_fee_rate(&mut working_fee_rates, self.full_block_weight); // If fee rates non-empty, then compute an update. - if working_fee_rates.len() > 0 { + if !working_fee_rates.is_empty() { // Values must be sorted. working_fee_rates.sort_by(|a, b| { a.fee_rate @@ -244,7 +244,7 @@ pub fn fee_rate_estimate_from_sorted_weighted_fees( for rate_and_weight in sorted_fee_rates { cumulative_weight += rate_and_weight.weight as f64; let percentile_n: f64 = - (cumulative_weight as f64 - rate_and_weight.weight as f64 / 2f64) / total_weight as f64; + (cumulative_weight - rate_and_weight.weight as f64 / 2f64) / total_weight; percentiles.push(percentile_n); } assert_eq!(percentiles.len(), sorted_fee_rates.len()); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 00f5aeb97e..730303cbd2 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1023,7 +1023,7 @@ check if the associated microblocks can be downloaded } i += 1; let line = line.unwrap().trim().to_string(); - if line.len() == 0 { + if line.is_empty() { continue; } let vals: Vec<_> = line.split(" => ").map(|x| x.trim()).collect(); @@ -1585,7 +1585,7 @@ check if the associated microblocks can be downloaded null_event_dispatcher, ) .unwrap(); - if receipts.len() == 0 { + if receipts.is_empty() { break; } } diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index 7f1aa9db26..6db895249c 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -388,7 +388,7 @@ pub fn test_convert_uint256_to_f64() { let original = ((Uint512::from_uint256(&Uint256::max()) * Uint512::from_u64(10)) / Uint512::from_u64(100)) .to_uint256(); - assert_approx_eq!(convert_uint256_to_f64_percentage(original, 7), 10 as f64); + assert_approx_eq!(convert_uint256_to_f64_percentage(original, 7), 10_f64); let original = ((Uint512::from_uint256(&Uint256::max()) * Uint512::from_u64(122)) / Uint512::from_u64(1000)) diff --git a/stackslib/src/net/api/getheaders.rs b/stackslib/src/net/api/getheaders.rs index d0ae36a0bb..98a9fb3062 100644 --- a/stackslib/src/net/api/getheaders.rs +++ b/stackslib/src/net/api/getheaders.rs @@ -242,7 +242,7 @@ impl HttpChunkGenerator for StacksHeaderStream { // then write ']' test_debug!("Opening header stream"); self.total_bytes += 1; - return Ok(vec!['[' as u8]); + return Ok(vec![b'[']); } if self.num_headers == 0 { test_debug!("End of header stream"); @@ -270,7 +270,7 @@ impl HttpChunkGenerator for StacksHeaderStream { self.num_headers -= 1; if self.num_headers > 0 { - header_bytes.push(',' as u8); + header_bytes.push(b','); } else { self.end_of_stream = true; } @@ -298,7 +298,7 @@ impl HttpChunkGenerator for StacksHeaderStream { test_debug!("Corking header stream"); self.corked = true; self.total_bytes += 1; - return Ok(vec![']' as u8]); + return Ok(vec![b']']); } test_debug!("Header stream terminated"); diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index 24c3c87d71..9888b5563f 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -304,7 +304,7 @@ impl HttpChunkGenerator for NakamotoTenureStream { fn generate_next_chunk(&mut self) -> Result, String> { let next_block_chunk = self.block_stream.generate_next_chunk()?; - if next_block_chunk.len() > 0 { + if !next_block_chunk.is_empty() { // have block data to send return Ok(next_block_chunk); } @@ -358,7 +358,7 @@ impl StacksHttpResponse { let ptr = &mut tenure_bytes.as_slice(); let mut blocks = vec![]; - while ptr.len() > 0 { + while !ptr.is_empty() { let block = NakamotoBlock::consensus_deserialize(ptr)?; blocks.push(block); } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index c832695103..515836814a 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -359,7 +359,9 @@ impl NakamotoBlockProposal { while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { std::thread::sleep(std::time::Duration::from_millis(10)); } - info!("Block validation is no longer stalled due to testing directive."); + info!( + "Block validation is no longer stalled due to testing directive. Continuing..." + ); } } let start = Instant::now(); @@ -562,7 +564,10 @@ impl NakamotoBlockProposal { // Clone signatures from block proposal // These have already been validated by `validate_nakamoto_block_burnchain()`` block.header.miner_signature = self.block.header.miner_signature.clone(); - block.header.signer_signature = self.block.header.signer_signature.clone(); + block + .header + .signer_signature + .clone_from(&self.block.header.signer_signature); // Clone the timestamp from the block proposal, which has already been validated block.header.timestamp = self.block.header.timestamp; diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 25da52a66d..185fe16a64 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -175,7 +175,7 @@ impl HttpChunkGenerator for StacksMemPoolStream { "max_txs" => self.max_txs ); - if next_txs.len() > 0 { + if !next_txs.is_empty() { // have another tx to send let chunk = next_txs[0].serialize_to_vec(); if let Some(next_last_randomized_txid) = next_last_randomized_txid_opt { diff --git a/stackslib/src/net/api/tests/gettenure.rs b/stackslib/src/net/api/tests/gettenure.rs index c4f179acc9..a6a23fb4af 100644 --- a/stackslib/src/net/api/tests/gettenure.rs +++ b/stackslib/src/net/api/tests/gettenure.rs @@ -191,7 +191,7 @@ fn test_stream_nakamoto_tenure() { let ptr = &mut all_block_bytes.as_slice(); let mut blocks = vec![]; - while ptr.len() > 0 { + while !ptr.is_empty() { let block = NakamotoBlock::consensus_deserialize(ptr).unwrap(); blocks.push(block); } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 481d0b2047..231ffe3366 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -186,7 +186,13 @@ impl MemPoolEventDispatcher for ProposalTestObserver { Some(Box::new(Arc::clone(&self.proposal_observer))) } - fn mempool_txs_dropped(&self, txids: Vec, reason: mempool::MemPoolDropReason) {} + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: mempool::MemPoolDropReason, + ) { + } fn mined_block_event( &self, diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index 6954024844..8f921525a3 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -178,7 +178,7 @@ fn test_stream_mempool_txs() { txid.clone(), tx_bytes, tx_fee, - block_height as u64, + block_height, &origin_addr, origin_nonce, &sponsor_addr, diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 53340946f3..d11dd9995d 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -127,14 +127,14 @@ pub enum AttachmentInstanceStatus { } impl FromRow for Attachment { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let content: Vec = row.get_unwrap("content"); Ok(Attachment { content }) } } impl FromRow for AttachmentInstance { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let hex_content_hash: String = row.get_unwrap("content_hash"); let attachment_index: u32 = row.get_unwrap("attachment_index"); let block_height = @@ -160,7 +160,7 @@ impl FromRow for AttachmentInstance { } impl FromRow<(u32, u32)> for (u32, u32) { - fn from_row<'a>(row: &'a Row) -> Result<(u32, u32), db_error> { + fn from_row(row: &Row) -> Result<(u32, u32), db_error> { let t1: u32 = row.get_unwrap(0); let t2: u32 = row.get_unwrap(1); Ok((t1, t2)) @@ -445,7 +445,7 @@ impl AtlasDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index a9dad242a5..f877a0da3a 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -102,7 +102,7 @@ impl AttachmentsDownloader { let mut events_to_deregister = vec![]; // Handle initial batch - if self.initial_batch.len() > 0 { + if !self.initial_batch.is_empty() { let mut resolved = self.enqueue_initial_attachments(&mut network.atlasdb)?; resolved_attachments.append(&mut resolved); } @@ -703,7 +703,7 @@ impl BatchedDNSLookupsState { let mut state = BatchedDNSLookupsResults::default(); for url_str in urls.drain(..) { - if url_str.len() == 0 { + if url_str.is_empty() { continue; } let url = match url_str.parse_to_block_url() { @@ -932,7 +932,7 @@ impl BatchedRequestsState } }); - if pending_requests.len() > 0 { + if !pending_requests.is_empty() { // We need to keep polling for (event_id, request) in pending_requests.drain() { state.remaining.insert(event_id, request); @@ -1314,10 +1314,11 @@ impl ReliabilityReport { } pub fn score(&self) -> u32 { - match self.total_requests_sent { - 0 => 0 as u32, - n => self.total_requests_success * 1000 / (n * 1000) + n, + let n = self.total_requests_sent; + if n == 0 { + return n; } + self.total_requests_success * 1000 / (n * 1000) + n } } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index d1c2185d10..e8deeeda47 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1181,7 +1181,8 @@ impl ConversationP2P { &mut self, stacker_db_data: &StackerDBHandshakeData, ) { - self.db_smart_contracts = stacker_db_data.smart_contracts.clone(); + self.db_smart_contracts + .clone_from(&stacker_db_data.smart_contracts); } /// Forget about this peer's stacker DB replication state @@ -1442,7 +1443,7 @@ impl ConversationP2P { peer_dbconn, self.network_id, epoch.network_epoch, - (get_epoch_time_secs() as u64).saturating_sub(self.connection.options.max_neighbor_age), + get_epoch_time_secs().saturating_sub(self.connection.options.max_neighbor_age), MAX_NEIGHBORS_DATA_LEN, chain_view.burn_block_height, false, @@ -3252,7 +3253,7 @@ mod test { for i in prev_snapshot.block_height..chain_view.burn_block_height + 1 { let mut next_snapshot = prev_snapshot.clone(); - let big_i = Uint256::from_u64(i as u64); + let big_i = Uint256::from_u64(i); let mut big_i_bytes_32 = [0u8; 32]; let mut big_i_bytes_20 = [0u8; 20]; big_i_bytes_32.copy_from_slice(&big_i.to_u8_slice()); @@ -5575,7 +5576,7 @@ mod test { let getblocksdata_1 = GetBlocksInv { consensus_hash: convo_1_ancestor.consensus_hash, - num_blocks: 10 as u16, + num_blocks: 10, }; let getblocksdata_1_msg = convo_1 .sign_message( diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index 6c7c5fb9e8..9476c042f5 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1426,7 +1426,7 @@ impl StacksMessage { /// Sign the StacksMessage. The StacksMessage must _not_ have any relayers (i.e. we're /// originating this messsage). pub fn sign(&mut self, seq: u32, private_key: &Secp256k1PrivateKey) -> Result<(), net_error> { - if self.relayers.len() > 0 { + if !self.relayers.is_empty() { return Err(net_error::InvalidMessage); } self.preamble.seq = seq; @@ -1682,7 +1682,7 @@ pub mod test { } // short message shouldn't parse, but should EOF - if write_buf.len() > 0 { + if !write_buf.is_empty() { let mut short_buf = write_buf.clone(); let short_len = short_buf.len() - 1; short_buf.truncate(short_len); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 5ce72ef594..954b16ced8 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -689,7 +689,7 @@ impl ConnectionInbox

{ } Err(net_error::UnderflowError(_)) => { // not enough data to form a preamble yet - if bytes_consumed == 0 && bytes.len() > 0 { + if bytes_consumed == 0 && !bytes.is_empty() { // preamble is too long return Err(net_error::DeserializeError( "Preamble size would exceed maximum allowed size".to_string(), @@ -773,7 +773,7 @@ impl ConnectionInbox

{ self.payload_ptr = 0; self.buf = trailer; - if self.buf.len() > 0 { + if !self.buf.is_empty() { test_debug!( "Buffer has {} bytes remaining: {:?}", self.buf.len(), @@ -956,7 +956,7 @@ impl ConnectionInbox

{ // we can buffer bytes faster than we can process messages, so be sure to drain the buffer // before returning. - if self.buf.len() > 0 { + if !self.buf.is_empty() { loop { let mut consumed_message = false; @@ -1093,7 +1093,7 @@ impl ConnectionOutbox

{ } fn begin_next_message(&mut self) -> Option { - if self.outbox.len() == 0 { + if self.outbox.is_empty() { // nothing to send return None; } @@ -1110,7 +1110,7 @@ impl ConnectionOutbox

{ } fn finish_message(&mut self) { - assert!(self.outbox.len() > 0); + assert!(!self.outbox.is_empty()); // wake up any receivers when (if) we get a reply let mut inflight_message = self.outbox.pop_front(); @@ -1562,7 +1562,7 @@ mod test { let mut i = 0; // push the message, and force pipes to go out of scope to close the write end - while pipes.len() > 0 { + while !pipes.is_empty() { let mut p = pipes.remove(0); protocol.write_message(&mut p, &messages[i]).unwrap(); i += 1; @@ -1725,7 +1725,7 @@ mod test { let mut rhs = vec![]; // push the message, and force pipes to go out of scope to close the write end - while handles.len() > 0 { + while !handles.is_empty() { let mut rh = handles.remove(0); protocol.write_message(&mut rh, &messages[i]).unwrap(); i += 1; @@ -1989,12 +1989,12 @@ mod test { let mut serialized_ping = vec![]; ping.consensus_serialize(&mut serialized_ping).unwrap(); assert_eq!( - conn.outbox.socket_out_buf[0..(conn.outbox.socket_out_ptr as usize)], - serialized_ping[0..(conn.outbox.socket_out_ptr as usize)] + conn.outbox.socket_out_buf[0..conn.outbox.socket_out_ptr], + serialized_ping[0..conn.outbox.socket_out_ptr] ); let mut half_ping = - conn.outbox.socket_out_buf.clone()[0..(conn.outbox.socket_out_ptr as usize)].to_vec(); + conn.outbox.socket_out_buf.clone()[0..conn.outbox.socket_out_ptr].to_vec(); let mut ping_buf_05 = vec![0; 2 * ping_size - (ping_size + ping_size / 2)]; // flush the remaining half-ping @@ -2097,7 +2097,7 @@ mod test { let pinger = thread::spawn(move || { let mut i = 0; - while pipes.len() > 0 { + while !pipes.is_empty() { let mut p = pipes.remove(0); i += 1; @@ -2203,7 +2203,7 @@ mod test { let pinger = thread::spawn(move || { let mut rhs = vec![]; - while handle_vec.len() > 0 { + while !handle_vec.is_empty() { let mut handle = handle_vec.remove(0); handle.flush().unwrap(); rhs.push(handle); @@ -2317,7 +2317,7 @@ mod test { let pinger = thread::spawn(move || { let mut rhs = vec![]; - while handle_vec.len() > 0 { + while !handle_vec.is_empty() { let mut handle = handle_vec.remove(0); handle.flush().unwrap(); rhs.push(handle); diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 7d6450011c..35471183f3 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -50,7 +50,7 @@ pub const PEERDB_VERSION: &str = "3"; const NUM_SLOTS: usize = 8; impl FromColumn for PeerAddress { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let addrbytes_bin: String = row.get_unwrap(column_name); if addrbytes_bin.len() != 128 { error!("Unparsable peer address {}", addrbytes_bin); @@ -74,7 +74,7 @@ impl FromColumn for PeerAddress { } impl FromRow for QualifiedContractIdentifier { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let cid_str: String = row.get_unwrap("smart_contract_id"); let cid = QualifiedContractIdentifier::parse(&cid_str).map_err(|_e| db_error::ParseError)?; @@ -157,7 +157,7 @@ impl LocalPeer { info!( "Will be authenticating p2p messages with the following"; "public key" => &Secp256k1PublicKey::from_private(&pkey).to_hex(), - "services" => &to_hex(&(services as u16).to_be_bytes()), + "services" => &to_hex(&services.to_be_bytes()), "Stacker DBs" => stacker_dbs.iter().map(|cid| format!("{}", &cid)).collect::>().join(",") ); @@ -203,7 +203,7 @@ impl LocalPeer { } impl FromRow for LocalPeer { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let network_id: u32 = row.get_unwrap("network_id"); let parent_network_id: u32 = row.get_unwrap("parent_network_id"); let nonce_hex: String = row.get_unwrap("nonce"); @@ -253,7 +253,7 @@ impl FromRow for LocalPeer { } impl FromRow for ASEntry4 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let prefix: u32 = row.get_unwrap("prefix"); let mask: u8 = row.get_unwrap("mask"); let asn: u32 = row.get_unwrap("asn"); @@ -269,7 +269,7 @@ impl FromRow for ASEntry4 { } impl FromRow for Neighbor { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let peer_version: u32 = row.get_unwrap("peer_version"); let network_id: u32 = row.get_unwrap("network_id"); let addrbytes: PeerAddress = PeerAddress::from_column(row, "addrbytes")?; @@ -812,7 +812,7 @@ impl PeerDB { &self.conn } - pub fn tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn tx_begin(&mut self) -> Result, db_error> { if !self.readwrite { return Err(db_error::ReadOnly); } @@ -876,7 +876,7 @@ impl PeerDB { /// Re-key and return the new local peer pub fn rekey(&mut self, new_expire_block: u64) -> Result { - if new_expire_block > ((1 as u64) << 63) - 1 { + if new_expire_block > (1 << 63) - 1 { return Err(db_error::Overflow); } @@ -1244,7 +1244,7 @@ impl PeerDB { let empty_key = StacksPublicKey::from_private(&StacksPrivateKey::new()); let mut empty_neighbor = Neighbor::empty(&nk, &empty_key, 0); - empty_neighbor.allowed = allow_deadline as i64; + empty_neighbor.allowed = allow_deadline; debug!("Preemptively allow peer {:?}", &nk); if !PeerDB::try_insert_peer(tx, &empty_neighbor, &[])? { @@ -2821,7 +2821,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: (now_secs + 600) as i64, denied: -1, asn: (34567 + i) as u32, @@ -2841,7 +2841,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: 0, denied: -1, asn: (34567 + i) as u32, @@ -2925,7 +2925,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: -1, denied: -1, asn: (34567 + i) as u32, @@ -2946,7 +2946,7 @@ mod test { }, public_key: Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), expire_block: (i + 23456) as u64, - last_contact_time: (1552509642 + (i as u64)) as u64, + last_contact_time: (1552509642 + (i as u64)), allowed: -1, denied: -1, asn: (34567 + i) as u32, diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 9c3a90fa35..b610f2a156 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -150,7 +150,7 @@ impl DNSResolver { } }; - if addrs.len() == 0 { + if addrs.is_empty() { return DNSResponse::error(req, "DNS resolve error: got zero addresses".to_string()); } test_debug!("{}:{} resolved to {:?}", &req.host, req.port, &addrs); diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 6ca1e91b5a..d58321118e 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -335,7 +335,7 @@ impl BlockDownloader { self.pox_id = pox_id.clone(); self.dns_lookups.clear(); for url_str in urls.into_iter() { - if url_str.len() == 0 { + if url_str.is_empty() { continue; } let url = url_str.parse_to_block_url()?; // NOTE: should always succeed, since a UrlString shouldn't decode unless it's a valid URL or the empty string @@ -537,7 +537,7 @@ impl BlockDownloader { }); // are we done? - if pending_block_requests.len() == 0 { + if pending_block_requests.is_empty() { self.state = BlockDownloaderState::GetMicroblocksBegin; return Ok(true); } @@ -626,7 +626,7 @@ impl BlockDownloader { Some(http_response) => { match StacksHttpResponse::decode_microblocks(http_response) { Ok(microblocks) => { - if microblocks.len() == 0 { + if microblocks.is_empty() { // we wouldn't have asked for a 0-length stream info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url; "consensus_hash" => %block_key.consensus_hash @@ -675,7 +675,7 @@ impl BlockDownloader { }); // are we done? - if pending_microblock_requests.len() == 0 { + if pending_microblock_requests.is_empty() { self.state = BlockDownloaderState::Done; return Ok(true); } @@ -997,7 +997,7 @@ impl BlockDownloader { if microblocks { // being requested now? for (_, reqs) in self.microblocks_to_try.iter() { - if reqs.len() > 0 { + if !reqs.is_empty() { if reqs[0].index_block_hash == *index_hash { return true; } @@ -1012,7 +1012,7 @@ impl BlockDownloader { } } else { for (_, reqs) in self.blocks_to_try.iter() { - if reqs.len() > 0 { + if !reqs.is_empty() { if reqs[0].index_block_hash == *index_hash { return true; } @@ -1060,10 +1060,10 @@ impl PeerNetwork { match self.events.get(neighbor_key) { Some(ref event_id) => match self.peers.get(event_id) { Some(ref convo) => { - if convo.data_url.len() > 0 { - Some(convo.data_url.clone()) - } else { + if convo.data_url.is_empty() { None + } else { + Some(convo.data_url.clone()) } } None => None, @@ -1455,7 +1455,7 @@ impl PeerNetwork { ); continue; }; - if data_url.len() == 0 { + if data_url.is_empty() { // peer doesn't yet know its public IP address, and isn't given a data URL // directly debug!( @@ -1578,7 +1578,7 @@ impl PeerNetwork { let (need_blocks, block_sortition_height, microblock_sortition_height) = match self.block_downloader { Some(ref mut downloader) => ( - downloader.blocks_to_try.len() == 0, + downloader.blocks_to_try.is_empty(), downloader.block_sortition_height, downloader.microblock_sortition_height, ), @@ -1653,7 +1653,7 @@ impl PeerNetwork { } } - if next_microblocks_to_try.len() == 0 { + if next_microblocks_to_try.is_empty() { // have no microblocks to try in the first place, so just advance to the // next batch debug!( @@ -1705,7 +1705,7 @@ impl PeerNetwork { let requests = next_blocks_to_try.remove(&height).expect( "BUG: hashmap both contains and does not contain sortition height", ); - if requests.len() == 0 { + if requests.is_empty() { height += 1; continue; } @@ -1767,7 +1767,7 @@ impl PeerNetwork { let requests = next_microblocks_to_try.remove(&mblock_height).expect( "BUG: hashmap both contains and does not contain sortition height", ); - if requests.len() == 0 { + if requests.is_empty() { debug!("No microblock requests for {}", mblock_height); mblock_height += 1; continue; @@ -1843,7 +1843,7 @@ impl PeerNetwork { } } - if downloader.blocks_to_try.len() == 0 { + if downloader.blocks_to_try.is_empty() { // nothing in this range, so advance sortition range to try for next time next_block_sortition_height = next_block_sortition_height + (network.burnchain.pox_constants.reward_cycle_length as u64); @@ -1852,7 +1852,7 @@ impl PeerNetwork { &network.local_peer, next_block_sortition_height ); } - if downloader.microblocks_to_try.len() == 0 { + if downloader.microblocks_to_try.is_empty() { // nothing in this range, so advance sortition range to try for next time next_microblock_sortition_height = next_microblock_sortition_height + (network.burnchain.pox_constants.reward_cycle_length as u64); @@ -1920,7 +1920,7 @@ impl PeerNetwork { match requestables.pop_front() { Some(requestable) => { if let Some(Some(ref sockaddrs)) = dns_lookups.get(requestable.get_url()) { - assert!(sockaddrs.len() > 0); + assert!(!sockaddrs.is_empty()); let peerhost = match PeerHost::try_from_url(requestable.get_url()) { Some(ph) => ph, @@ -2176,12 +2176,12 @@ impl PeerNetwork { let mut microblocks_empty = vec![]; for (height, requests) in downloader.blocks_to_try.iter() { - if requests.len() == 0 { + if requests.is_empty() { blocks_empty.push(*height); } } for (height, requests) in downloader.microblocks_to_try.iter() { - if requests.len() == 0 { + if requests.is_empty() { microblocks_empty.push(*height); } } @@ -2272,9 +2272,8 @@ impl PeerNetwork { debug!("Re-trying blocks:"); for (height, requests) in downloader.blocks_to_try.iter() { assert!( - requests.len() > 0, - "Empty block requests at height {}", - height + !requests.is_empty(), + "Empty block requests at height {height}" ); debug!( " Height {}: anchored block {} available from {} peers: {:?}", @@ -2289,9 +2288,8 @@ impl PeerNetwork { } for (height, requests) in downloader.microblocks_to_try.iter() { assert!( - requests.len() > 0, - "Empty microblock requests at height {}", - height + !requests.is_empty(), + "Empty microblock requests at height {height}" ); debug!( " Height {}: microblocks {} available from {} peers: {:?}", diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index cf059d608e..3ebed7e9d2 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -133,7 +133,7 @@ impl HttpResponseContents { HttpResponseContents::RAM(ref mut buf) => { // dump directly into the pipewrite // TODO: zero-copy? - if buf.len() > 0 { + if !buf.is_empty() { fd.write_all(&buf[..]).map_err(Error::WriteError)?; buf.clear(); } diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 56c0994b46..1688b95b25 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -80,12 +80,12 @@ pub enum TipRequest { impl TipRequest {} -impl ToString for TipRequest { - fn to_string(&self) -> String { +impl fmt::Display for TipRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Self::UseLatestAnchoredTip => "".to_string(), - Self::UseLatestUnconfirmedTip => "latest".to_string(), - Self::SpecificTip(ref tip) => format!("{}", tip), + Self::UseLatestAnchoredTip => write!(f, ""), + Self::UseLatestUnconfirmedTip => write!(f, "latest"), + Self::SpecificTip(ref tip) => write!(f, "{tip}"), } } } @@ -316,7 +316,7 @@ impl HttpRequestContentsExtensions for HttpRequestContents { /// Use a particular tip request fn for_tip(mut self, tip_req: TipRequest) -> Self { if tip_req != TipRequest::UseLatestAnchoredTip { - self.query_arg("tip".to_string(), format!("{}", &tip_req.to_string())) + self.query_arg("tip".to_string(), tip_req.to_string()) } else { let _ = self.take_query_arg(&"tip".to_string()); self @@ -475,11 +475,11 @@ impl StacksHttpRequest { } let (decoded_path, _) = decode_request_path(&preamble.path_and_query_str)?; let full_query_string = contents.get_full_query_string(); - if full_query_string.len() > 0 { - preamble.path_and_query_str = format!("{}?{}", &decoded_path, &full_query_string); + preamble.path_and_query_str = if full_query_string.is_empty() { + decoded_path } else { - preamble.path_and_query_str = decoded_path; - } + format!("{decoded_path}?{full_query_string}") + }; Ok(Self { preamble, @@ -1039,7 +1039,7 @@ impl StacksHttp { let payload = match handler.try_parse_request( preamble, &captures, - if query.len() > 0 { Some(&query) } else { None }, + if query.is_empty() { None } else { Some(&query) }, body, ) { Ok(p) => p, @@ -1078,7 +1078,7 @@ impl StacksHttp { let payload = match request.try_parse_request( preamble, &captures, - if query.len() > 0 { Some(&query) } else { None }, + if query.is_empty() { None } else { Some(&query) }, body, ) { Ok(p) => p, diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 32cb92a387..782ce8a876 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -175,8 +175,8 @@ impl PeerBlocksInv { assert!(block_height >= self.first_block_height); let sortition_height = block_height - self.first_block_height; - self.num_sortitions = if self.num_sortitions < sortition_height + (bitlen as u64) { - sortition_height + (bitlen as u64) + self.num_sortitions = if self.num_sortitions < sortition_height + bitlen { + sortition_height + bitlen } else { self.num_sortitions }; @@ -2402,7 +2402,7 @@ impl PeerNetwork { &network.local_peer, inv_state.block_sortition_start, ); - if !inv_state.hint_learned_data && inv_state.block_stats.len() > 0 { + if !inv_state.hint_learned_data && !inv_state.block_stats.is_empty() { // did a full scan without learning anything new inv_state.last_rescanned_at = get_epoch_time_secs(); inv_state.hint_do_rescan = false; @@ -2729,7 +2729,7 @@ impl PeerNetwork { // always-allowed peer? let mut finished_always_allowed_inv_sync = false; - if always_allowed.len() == 0 { + if always_allowed.is_empty() { // vacuously, we are done so we can return return true; } diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index e832b70184..c103f16eb7 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -241,7 +241,7 @@ impl InvGenerator { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh); - if self.processed_tenures.get(&tip_block_id).is_none() { + if !self.processed_tenures.contains_key(&tip_block_id) { // this tip has no known table. // does it have an ancestor with a table? If so, then move its ancestor's table to this // tip. Otherwise, make a new table. diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs index 2a4232ad2f..27253180d4 100644 --- a/stackslib/src/net/mempool/mod.rs +++ b/stackslib/src/net/mempool/mod.rs @@ -170,7 +170,7 @@ impl MempoolSync { continue; } // has a data URL? - if convo.data_url.len() == 0 { + if convo.data_url.is_empty() { continue; } // already resolved? diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3b9c1518d2..415f74c739 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -637,7 +637,7 @@ pub struct RPCHandlerArgs<'a> { pub coord_comms: Option<&'a CoordinatorChannels>, } -impl<'a> RPCHandlerArgs<'a> { +impl RPCHandlerArgs<'_> { pub fn get_estimators_ref( &self, ) -> Option<(&dyn CostEstimator, &dyn FeeEstimator, &dyn CostMetric)> { @@ -1800,7 +1800,7 @@ impl NetworkResult { } retain }); - mblocks.len() > 0 + !mblocks.is_empty() }); newer .confirmed_microblocks @@ -1828,7 +1828,7 @@ impl NetworkResult { } retain }); - if tx_data.len() == 0 { + if tx_data.is_empty() { continue; } @@ -1850,9 +1850,9 @@ impl NetworkResult { } retain }); - block_data.blocks.len() > 0 + !block_data.blocks.is_empty() }); - if block_list.len() == 0 { + if block_list.is_empty() { continue; } @@ -1873,9 +1873,9 @@ impl NetworkResult { } retain }); - mblock_data.microblocks.len() > 0 + !mblock_data.microblocks.is_empty() }); - if microblock_data.len() == 0 { + if microblock_data.is_empty() { continue; } @@ -1896,9 +1896,9 @@ impl NetworkResult { } retain }); - naka_blocks.blocks.len() > 0 + !naka_blocks.blocks.is_empty() }); - if nakamoto_block_data.len() == 0 { + if nakamoto_block_data.is_empty() { continue; } @@ -1927,7 +1927,7 @@ impl NetworkResult { retain }); - blk_data.blocks.len() > 0 + !blk_data.blocks.is_empty() }); self.uploaded_microblocks.retain_mut(|ref mut mblock_data| { mblock_data.microblocks.retain(|mblk| { @@ -1938,7 +1938,7 @@ impl NetworkResult { retain }); - mblock_data.microblocks.len() > 0 + !mblock_data.microblocks.is_empty() }); self.uploaded_nakamoto_blocks.retain(|nblk| { let retain = !newer_naka_blocks.contains(&nblk.block_id()); @@ -2067,38 +2067,37 @@ impl NetworkResult { } pub fn has_blocks(&self) -> bool { - self.blocks.len() > 0 || self.pushed_blocks.len() > 0 + !self.blocks.is_empty() || !self.pushed_blocks.is_empty() } pub fn has_microblocks(&self) -> bool { - self.confirmed_microblocks.len() > 0 - || self.pushed_microblocks.len() > 0 - || self.uploaded_microblocks.len() > 0 + !self.confirmed_microblocks.is_empty() + || !self.pushed_microblocks.is_empty() + || !self.uploaded_microblocks.is_empty() } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 - || self.pushed_nakamoto_blocks.len() > 0 - || self.uploaded_nakamoto_blocks.len() > 0 + !self.nakamoto_blocks.is_empty() + || !self.pushed_nakamoto_blocks.is_empty() + || !self.uploaded_nakamoto_blocks.is_empty() } pub fn has_transactions(&self) -> bool { - self.pushed_transactions.len() > 0 - || self.uploaded_transactions.len() > 0 - || self.synced_transactions.len() > 0 + !self.pushed_transactions.is_empty() + || !self.uploaded_transactions.is_empty() + || !self.synced_transactions.is_empty() } pub fn has_attachments(&self) -> bool { - self.attachments.len() > 0 + !self.attachments.is_empty() } pub fn has_stackerdb_chunks(&self) -> bool { self.stacker_db_sync_results .iter() - .fold(0, |acc, x| acc + x.chunks_to_store.len()) - > 0 - || self.uploaded_stackerdb_chunks.len() > 0 - || self.pushed_stackerdb_chunks.len() > 0 + .any(|x| !x.chunks_to_store.is_empty()) + || !self.uploaded_stackerdb_chunks.is_empty() + || !self.pushed_stackerdb_chunks.is_empty() } pub fn transactions(&self) -> Vec { @@ -2973,7 +2972,7 @@ pub mod test { debug!("Not setting aggregate public key"); } // add test-specific boot code - if conf.setup_code.len() > 0 { + if !conf.setup_code.is_empty() { let receipt = clarity_tx.connection().as_transaction(|clarity| { let boot_code_addr = boot_code_test_addr(); let boot_code_account = StacksAccount { @@ -3852,7 +3851,7 @@ pub mod test { &mut self, microblocks: &Vec, ) -> Result { - assert!(microblocks.len() > 0); + assert!(!microblocks.is_empty()); let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let res = { diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index 0289875f11..ebf83af962 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -195,7 +195,7 @@ pub trait NeighborWalkDB { ) .map_err(net_error::DBError)?; - if neighbors.len() == 0 { + if neighbors.is_empty() { debug!( "{:?}: No neighbors available in the peer DB newer than {}!", network.get_local_peer(), @@ -205,7 +205,7 @@ pub trait NeighborWalkDB { &network.peerdb_conn(), network.get_local_peer().network_id, )?; - if seed_nodes.len() == 0 { + if seed_nodes.is_empty() { return Err(net_error::NoSuchNeighbor); } return Ok(seed_nodes); @@ -261,7 +261,7 @@ pub trait NeighborWalkDB { }) .collect(); - if next_neighbors.len() == 0 { + if next_neighbors.is_empty() { return Err(net_error::NoSuchNeighbor); } @@ -295,7 +295,7 @@ impl PeerDBNeighborWalk { let mut slots = PeerDB::peer_slots(conn, nk.network_id, &nk.addrbytes, nk.port) .map_err(net_error::DBError)?; - if slots.len() == 0 { + if slots.is_empty() { // not present return Ok(None); } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 450dc04463..cc3fd73db8 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -118,7 +118,7 @@ impl PeerNetwork { fn new_outbound_or_pingback_walk( &self, ) -> Result, net_error> { - if self.get_walk_pingbacks().len() == 0 { + if self.get_walk_pingbacks().is_empty() { debug!( "{:?}: no walk pingbacks, so instantiate a normal neighbor walk", self.get_local_peer() diff --git a/stackslib/src/net/neighbors/neighbor.rs b/stackslib/src/net/neighbors/neighbor.rs index 4a8b7baf18..64a033ce9c 100644 --- a/stackslib/src/net/neighbors/neighbor.rs +++ b/stackslib/src/net/neighbors/neighbor.rs @@ -49,9 +49,9 @@ impl Neighbor { /// Update this peer in the DB. /// If there's no DB entry for this peer, then do nothing. /// Updates last-contact-time to now, since this is only called when we get back a Handshake - pub fn save_update<'a>( + pub fn save_update( &mut self, - tx: &DBTx<'a>, + tx: &DBTx<'_>, stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result<(), net_error> { self.last_contact_time = get_epoch_time_secs(); @@ -66,9 +66,9 @@ impl Neighbor { /// Updates last-contact-time to now, since this is only called when we get back a Handshake /// Return true if saved. /// Return false if not saved -- i.e. the frontier is full and we should try evicting neighbors. - pub fn save<'a>( + pub fn save( &mut self, - tx: &DBTx<'a>, + tx: &DBTx<'_>, stacker_dbs: Option<&[QualifiedContractIdentifier]>, ) -> Result { self.last_contact_time = get_epoch_time_secs(); diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 955eeb0685..f16483b361 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -348,7 +348,7 @@ impl NeighborWalk { network: &PeerNetwork, ) -> Result, net_error> { let event_ids: Vec<_> = network.iter_peer_event_ids().collect(); - if event_ids.len() == 0 { + if event_ids.is_empty() { debug!( "{:?}: failed to begin inbound neighbor walk: no one's connected to us", network.get_local_peer() @@ -429,7 +429,7 @@ impl NeighborWalk { comms: NC, network: &PeerNetwork, ) -> Result, net_error> { - if network.get_walk_pingbacks().len() == 0 { + if network.get_walk_pingbacks().is_empty() { debug!("{:?}: no walk pingbacks", network.get_local_peer()); return Err(net_error::NoSuchNeighbor); } @@ -1043,7 +1043,7 @@ impl NeighborWalk { } } - if still_pending.len() > 0 { + if !still_pending.is_empty() { // try again self.pending_neighbor_addrs = Some(still_pending); return Ok(false); @@ -1390,7 +1390,7 @@ impl NeighborWalk { exclude: Option<&Neighbor>, ) -> Option { let mut rnd = thread_rng(); - if frontier.len() == 0 || (exclude.is_some() && frontier.len() == 1) { + if frontier.is_empty() || (exclude.is_some() && frontier.len() == 1) { return None; } // select a random neighbor index, if exclude is set, and matches this @@ -1456,7 +1456,7 @@ impl NeighborWalk { let mut rnd = thread_rng(); // step to a node in cur_neighbor's frontier, per MHRWDA - let next_neighbor_opt = if self.frontier.len() == 0 { + let next_neighbor_opt = if self.frontier.is_empty() { // stay here for now -- we don't yet know this neighbor's // frontier if self.walk_outbound { @@ -1467,7 +1467,7 @@ impl NeighborWalk { } else { // continuing the walk let next_neighbor = - Self::pick_random_neighbor(&self.frontier, None).expect("BUG: empty frontier size"); // won't panic since self.frontier.len() > 0 + Self::pick_random_neighbor(&self.frontier, None).expect("BUG: empty frontier size"); // won't panic since !self.frontier.is_empty() let walk_prob: f64 = rnd.gen(); if walk_prob < self @@ -1603,7 +1603,7 @@ impl NeighborWalk { } self.network_pingbacks = still_pending; - if self.network_pingbacks.len() > 0 { + if !self.network_pingbacks.is_empty() { // still connecting debug!( "{:?}: Still trying to pingback-handshake with {} neighbors", diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c724a681b8..9300fa9150 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -474,7 +474,7 @@ impl PeerNetwork { ); let pub_ip = connection_opts.public_ip_address.clone(); let pub_ip_learned = pub_ip.is_none(); - local_peer.public_ip_address = pub_ip.clone(); + local_peer.public_ip_address.clone_from(&pub_ip); if connection_opts.disable_inbound_handshakes { debug!("{:?}: disable inbound handshakes", &local_peer); @@ -765,7 +765,7 @@ impl PeerNetwork { } /// Create a transaction against the PeerDB - pub fn peerdb_tx_begin<'a>(&'a mut self) -> Result, db_error> { + pub fn peerdb_tx_begin(&mut self) -> Result, db_error> { self.peerdb.tx_begin() } @@ -1745,7 +1745,7 @@ impl PeerNetwork { self.can_register_peer(nk, outbound).and_then(|_| { let other_events = self.get_pubkey_events(pubkh); - if other_events.len() > 0 { + if !other_events.is_empty() { for event_id in other_events.into_iter() { if let Some(convo) = self.peers.get(&event_id) { // only care if we're trying to connect in the same direction @@ -2551,7 +2551,7 @@ impl PeerNetwork { // flush each outgoing conversation let mut relay_handles = std::mem::replace(&mut self.relay_handles, HashMap::new()); for (event_id, handle_list) in relay_handles.iter_mut() { - if handle_list.len() == 0 { + if handle_list.is_empty() { debug!("No handles for event {}", event_id); drained.push(*event_id); continue; @@ -2563,7 +2563,7 @@ impl PeerNetwork { event_id ); - while handle_list.len() > 0 { + while !handle_list.is_empty() { debug!("Flush {} relay handles", handle_list.len()); let res = self.with_p2p_convo(*event_id, |_network, convo, client_sock| { if let Some(handle) = handle_list.front_mut() { @@ -2654,7 +2654,7 @@ impl PeerNetwork { /// Return Err(..) on failure #[cfg_attr(test, mutants::skip)] fn begin_learn_public_ip(&mut self) -> Result { - if self.peers.len() == 0 { + if self.peers.is_empty() { return Err(net_error::NoSuchNeighbor); } @@ -3268,13 +3268,13 @@ impl PeerNetwork { } let reward_cycle_start = self.antientropy_start_reward_cycle; - let reward_cycle_finish = - self.antientropy_start_reward_cycle - .saturating_sub(self.connection_opts.inv_reward_cycles) as u64; + let reward_cycle_finish = self + .antientropy_start_reward_cycle + .saturating_sub(self.connection_opts.inv_reward_cycles); self.antientropy_start_reward_cycle = reward_cycle_finish; - if neighbor_keys.len() == 0 { + if neighbor_keys.is_empty() { return; } @@ -4118,7 +4118,8 @@ impl PeerNetwork { /// Get the local peer from the peer DB, but also preserve the public IP address pub fn load_local_peer(&self) -> Result { let mut lp = PeerDB::get_local_peer(&self.peerdb.conn())?; - lp.public_ip_address = self.local_peer.public_ip_address.clone(); + lp.public_ip_address + .clone_from(&self.local_peer.public_ip_address); Ok(lp) } @@ -5656,7 +5657,7 @@ mod test { p2p.process_connecting_sockets(&mut p2p_poll_state); let mut banned = p2p.process_bans().unwrap(); - if banned.len() > 0 { + if !banned.is_empty() { test_debug!("Banned {} peer(s)", banned.len()); } @@ -5686,7 +5687,7 @@ mod test { } let banned = rx.recv().unwrap(); - assert!(banned.len() >= 1); + assert!(!banned.is_empty()); p2p_thread.join().unwrap(); test_debug!("dispatcher thread joined"); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 38a76bf4b6..96edb12c2a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -269,11 +269,11 @@ impl PeerNetwork { while num_outbound - (ret.len() as u64) > self.connection_opts.soft_num_neighbors { let mut weighted_sample: HashMap = HashMap::new(); for (org, neighbor_info) in org_neighbors.iter() { - if neighbor_info.len() > 0 { + if !neighbor_info.is_empty() { weighted_sample.insert(*org, neighbor_info.len()); } } - if weighted_sample.len() == 0 { + if weighted_sample.is_empty() { // nothing to do break; } @@ -449,7 +449,7 @@ impl PeerNetwork { #[cfg(test)] { - if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { + if !pruned_by_ip.is_empty() || !pruned_by_org.is_empty() { let (mut inbound, mut outbound) = self.dump_peer_table(); inbound.sort(); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 86358e7be2..9121bac2c9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -319,7 +319,7 @@ impl RelayerStats { } // prune stale - while relayed.len() > 0 { + while !relayed.is_empty() { let head_ts = match relayed.front() { Some((ts, _)) => *ts, None => { @@ -342,7 +342,7 @@ impl RelayerStats { let mut to_remove = vec![]; for (ts, old_nk) in self.recent_updates.iter() { self.recent_messages.remove(old_nk); - if self.recent_messages.len() <= (MAX_RELAYER_STATS as usize) - 1 { + if self.recent_messages.len() <= MAX_RELAYER_STATS - 1 { break; } to_remove.push(*ts); @@ -406,7 +406,7 @@ impl RelayerStats { // look up ASNs let mut asns = HashMap::new(); for nk in neighbors.iter() { - if asns.get(nk).is_none() { + if !asns.contains_key(nk) { match PeerDB::asn_lookup(conn, &nk.addrbytes)? { Some(asn) => asns.insert((*nk).clone(), asn), None => asns.insert((*nk).clone(), 0), @@ -516,10 +516,10 @@ impl RelayerStats { if norm <= 1 { // there is one or zero options - if rankings_vec.len() > 0 { - return vec![rankings_vec[0].0.clone()]; - } else { + if rankings_vec.is_empty() { return vec![]; + } else { + return vec![rankings_vec[0].0.clone()]; } } @@ -1150,7 +1150,7 @@ impl Relayer { for (anchored_block_hash, (relayers, mblocks_map)) in new_microblocks.into_iter() { for (_, mblock) in mblocks_map.into_iter() { - if mblocks_data.get(&anchored_block_hash).is_none() { + if !mblocks_data.contains_key(&anchored_block_hash) { mblocks_data.insert(anchored_block_hash.clone(), vec![]); } @@ -1437,7 +1437,7 @@ impl Relayer { for (consensus_hash, microblock_stream, _download_time) in network_result.confirmed_microblocks.iter() { - if microblock_stream.len() == 0 { + if microblock_stream.is_empty() { continue; } let anchored_block_hash = microblock_stream[0].header.prev_block.clone(); @@ -1798,7 +1798,7 @@ impl Relayer { } } - if accepted_blocks.len() > 0 { + if !accepted_blocks.is_empty() { pushed_blocks.push(AcceptedNakamotoBlocks { relayers: relayers.clone(), blocks: accepted_blocks, @@ -2078,7 +2078,9 @@ impl Relayer { Relayer::preprocess_pushed_microblocks(&sort_ic, network_result, chainstate)?; bad_neighbors.append(&mut new_bad_neighbors); - if new_blocks.len() > 0 || new_microblocks.len() > 0 || new_confirmed_microblocks.len() > 0 + if !new_blocks.is_empty() + || !new_microblocks.is_empty() + || !new_confirmed_microblocks.is_empty() { info!( "Processing newly received Stacks blocks: {}, microblocks: {}, confirmed microblocks: {}", @@ -2237,7 +2239,7 @@ impl Relayer { } filtered_tx_data.push((relayers, tx)); } - if filtered_tx_data.len() > 0 { + if !filtered_tx_data.is_empty() { filtered_pushed_transactions.insert(nk, filtered_tx_data); } } @@ -2608,7 +2610,7 @@ impl Relayer { let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) .unwrap_or(BlocksAvailableMap::new()); - if available.len() > 0 { + if !available.is_empty() { debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { warn!("Failed to advertize new blocks: {:?}", &e); @@ -2622,7 +2624,7 @@ impl Relayer { .collect(); let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) .unwrap_or(BlocksAvailableMap::new()); - if mblocks_available.len() > 0 { + if !mblocks_available.is_empty() { debug!( "{:?}: Confirmed microblock streams available: {}", &_local_peer, @@ -2637,7 +2639,7 @@ impl Relayer { } // have the p2p thread forward all new unconfirmed microblocks - if new_microblocks.len() > 0 { + if !new_microblocks.is_empty() { debug!( "{:?}: Unconfirmed microblocks: {}", &_local_peer, @@ -2685,7 +2687,7 @@ impl Relayer { // attempt to relay messages (note that this is all best-effort). // punish bad peers - if bad_block_neighbors.len() > 0 { + if !bad_block_neighbors.is_empty() { debug!( "{:?}: Ban {} peers", &_local_peer, @@ -2776,7 +2778,7 @@ impl Relayer { for blocks_and_relayers in accepted_blocks.into_iter() { let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; - if blocks.len() == 0 { + if blocks.is_empty() { continue; } @@ -2817,7 +2819,7 @@ impl Relayer { &relayers ); - if relay_blocks.len() == 0 { + if relay_blocks.is_empty() { continue; } @@ -2883,7 +2885,7 @@ impl Relayer { .unwrap_or(u64::MAX); // don't panic if we somehow receive more than u64::MAX blocks // punish bad peers - if bad_neighbors.len() > 0 { + if !bad_neighbors.is_empty() { debug!("{:?}: Ban {} peers", &local_peer, bad_neighbors.len()); if let Err(e) = self.p2p.ban_peers(bad_neighbors) { warn!("Failed to ban bad-block peers: {:?}", &e); @@ -2891,7 +2893,7 @@ impl Relayer { } // relay if not IBD - if !ibd && accepted_blocks.len() > 0 { + if !ibd && !accepted_blocks.is_empty() { self.relay_epoch3_blocks(local_peer, sortdb, accepted_blocks); } num_new_nakamoto_blocks @@ -2932,7 +2934,7 @@ impl Relayer { ) .unwrap_or(vec![]); - if new_txs.len() > 0 { + if !new_txs.is_empty() { debug!( "{:?}: Send {} transactions to neighbors", &_local_peer, diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 60a2d37275..e515934738 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -481,7 +481,7 @@ impl ConversationHttp { self.pending_response.is_none() && self.connection.inbox_len() == 0 && self.connection.outbox_len() == 0 - && self.reply_streams.len() == 0 + && self.reply_streams.is_empty() } /// Is the conversation out of pending data? diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index b7261c14db..78f0f6fbb5 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -779,7 +779,7 @@ mod test { let mut resp = vec![]; match sock.read_to_end(&mut resp) { Ok(_) => { - if resp.len() == 0 { + if resp.is_empty() { test_debug!("Client {} did not receive any data", i); client_sx.send(Err(net_error::PermanentlyDrained)).unwrap(); return; @@ -1106,7 +1106,7 @@ mod test { }, |client_id, http_response_bytes_res| { match http_response_bytes_res { - Ok(bytes) => bytes.len() == 0, // should not have gotten any data + Ok(bytes) => bytes.is_empty(), // should not have gotten any data Err(net_error::PermanentlyDrained) => true, Err(err) => { // should have failed diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 0ecdc8199e..0faf5bbe03 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -191,7 +191,7 @@ fn inner_get_slot_validation( query_row(conn, &sql, args).map_err(|e| e.into()) } -impl<'a> StackerDBTx<'a> { +impl StackerDBTx<'_> { pub fn commit(self) -> Result<(), db_error> { self.sql_tx.commit().map_err(db_error::from) } @@ -527,10 +527,7 @@ impl StackerDBs { /// Open a transaction on the Stacker DB. /// The config would be obtained from a DBSelector instance - pub fn tx_begin<'a>( - &'a mut self, - config: StackerDBConfig, - ) -> Result, db_error> { + pub fn tx_begin(&mut self, config: StackerDBConfig) -> Result, db_error> { let sql_tx = tx_begin_immediate(&mut self.conn)?; Ok(StackerDBTx { sql_tx, config }) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 9d1b25af51..899990402d 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -347,7 +347,7 @@ impl StackerDBs { &e ); } - } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + } else if (new_config != stackerdb_config && !new_config.signers.is_empty()) || (new_config == stackerdb_config && new_config.signers.len() != self.get_slot_versions(&stackerdb_contract_id)?.len()) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 237f582d26..7dfeb809c7 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -225,7 +225,7 @@ impl StackerDBSync { let mut eviction_index = None; if self.last_eviction_time + 60 < get_epoch_time_secs() { self.last_eviction_time = get_epoch_time_secs(); - if self.replicas.len() > 0 { + if !self.replicas.is_empty() { eviction_index = Some(thread_rng().gen_range(0..self.replicas.len())); } } @@ -558,7 +558,7 @@ impl StackerDBSync { self.chunk_fetch_priorities .retain(|(chunk, ..)| chunk.slot_id != slot_id); - if self.chunk_fetch_priorities.len() > 0 { + if !self.chunk_fetch_priorities.is_empty() { let next_chunk_fetch_priority = self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len(); self.next_chunk_fetch_priority = next_chunk_fetch_priority; @@ -611,7 +611,7 @@ impl StackerDBSync { self.chunk_push_priorities .retain(|(chunk, ..)| chunk.chunk_data.slot_id != slot_id); - if self.chunk_push_priorities.len() > 0 { + if !self.chunk_push_priorities.is_empty() { let next_chunk_push_priority = self.next_chunk_push_priority % self.chunk_push_priorities.len(); self.next_chunk_push_priority = next_chunk_push_priority; @@ -700,7 +700,7 @@ impl StackerDBSync { /// Returns Err(NoSuchNeighbor) if we don't have anyone to talk to /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.replicas.len() == 0 { + if self.replicas.is_empty() { // find some from the peer DB let replicas = self.find_qualified_replicas(network)?; self.replicas = replicas; @@ -713,7 +713,7 @@ impl StackerDBSync { network.get_num_p2p_convos(); "replicas" => ?self.replicas ); - if self.replicas.len() == 0 { + if self.replicas.is_empty() { // nothing to do return Err(net_error::NoSuchNeighbor); } @@ -776,7 +776,7 @@ impl StackerDBSync { } } } - Ok(self.connected_replicas.len() > 0) + Ok(!self.connected_replicas.is_empty()) } /// Finish up connecting to our replicas. @@ -866,7 +866,7 @@ impl StackerDBSync { return Ok(false); } - if self.connected_replicas.len() == 0 { + if self.connected_replicas.is_empty() { // no one to talk to debug!( "{:?}: {}: connect_try_finish: no valid replicas", @@ -996,7 +996,7 @@ impl StackerDBSync { /// Return Ok(true) if we processed all requested chunks /// Return Ok(false) if there are still some requests to make pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_fetch_priorities.len() == 0 { + if self.chunk_fetch_priorities.is_empty() { // done debug!( "{:?}: {}: getchunks_begin: no chunks prioritized", @@ -1083,7 +1083,7 @@ impl StackerDBSync { self.next_chunk_fetch_priority = cur_priority; - Ok(self.chunk_fetch_priorities.len() == 0) + Ok(self.chunk_fetch_priorities.is_empty()) } /// Collect chunk replies from neighbors @@ -1157,13 +1157,13 @@ impl StackerDBSync { /// Returns true if there are no more chunks to push. /// Returns false if there are pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_push_priorities.len() == 0 && self.push_round != self.rounds { + if self.chunk_push_priorities.is_empty() && self.push_round != self.rounds { // only do this once per round let priorities = self.make_chunk_push_schedule(&network)?; self.chunk_push_priorities = priorities; self.push_round = self.rounds; } - if self.chunk_push_priorities.len() == 0 { + if self.chunk_push_priorities.is_empty() { // done debug!( "{:?}:{}: pushchunks_begin: no chunks prioritized", @@ -1334,7 +1334,7 @@ impl StackerDBSync { network: &PeerNetwork, ) -> Result<(), net_error> { // figure out the new expected versions - let mut expected_versions = vec![0u32; self.num_slots as usize]; + let mut expected_versions = vec![0u32; self.num_slots]; for (_, chunk_inv) in self.chunk_invs.iter() { for (slot_id, slot_version) in chunk_inv.slot_versions.iter().enumerate() { expected_versions[slot_id] = (*slot_version).max(expected_versions[slot_id]); diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 5f6e8a7bed..511201f245 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -135,7 +135,7 @@ fn setup_stackerdb(peer: &mut TestPeer, idx: usize, fill: bool, num_slots: usize thread_rng().fill(&mut inner_data[..]); let mut chunk_data = StackerDBChunkData::new(i as u32, 1, inner_data); - chunk_data.sign(&pks[i as usize]).unwrap(); + chunk_data.sign(&pks[i]).unwrap(); let chunk_md = chunk_data.get_slot_metadata(); tx.try_replace_chunk(contract_id, &chunk_md, &chunk_data.data) @@ -167,13 +167,13 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { let chunk_metadata = peer .network .stackerdbs - .get_slot_metadata(&peer.config.stacker_dbs[idx], i as u32) + .get_slot_metadata(&peer.config.stacker_dbs[idx], i) .unwrap() .unwrap(); let chunk = peer .network .stackerdbs - .get_latest_chunk(&peer.config.stacker_dbs[idx], i as u32) + .get_latest_chunk(&peer.config.stacker_dbs[idx], i) .unwrap() .unwrap_or(vec![]); ret.push((chunk_metadata, chunk)); @@ -246,14 +246,14 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { assert_eq!(peer_1_db_chunks.len(), 1); assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); - assert!(peer_1_db_chunks[0].1.len() > 0); + assert!(!peer_1_db_chunks[0].1.is_empty()); // verify that peer 2 did NOT get the data let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); assert_eq!(peer_2_db_chunks.len(), 1); assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); - assert!(peer_2_db_chunks[0].1.len() == 0); + assert!(peer_2_db_chunks[0].1.is_empty()); let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); @@ -362,14 +362,14 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { assert_eq!(peer_1_db_chunks.len(), 1); assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); - assert!(peer_1_db_chunks[0].1.len() > 0); + assert!(!peer_1_db_chunks[0].1.is_empty()); // verify that peer 2 did NOT get the data let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); assert_eq!(peer_2_db_chunks.len(), 1); assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); - assert!(peer_2_db_chunks[0].1.len() == 0); + assert!(peer_2_db_chunks[0].1.is_empty()); let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); @@ -404,7 +404,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { + if !sync_res.stale.is_empty() { peer_1_stale = true; } } @@ -433,7 +433,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { + if !sync_res.stale.is_empty() { peer_2_stale = true; } } @@ -593,7 +593,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 did NOT get the data @@ -602,7 +602,7 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -725,7 +725,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 and 3 did NOT get the data @@ -734,7 +734,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); @@ -742,7 +742,7 @@ fn test_stackerdb_push_relayer() { for i in 0..10 { assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); - assert!(peer_3_db_chunks[i].1.len() == 0); + assert!(peer_3_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -921,7 +921,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); - assert!(peer_1_db_chunks[i].1.len() > 0); + assert!(!peer_1_db_chunks[i].1.is_empty()); } // verify that peer 2 and 3 did NOT get the data @@ -930,7 +930,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); - assert!(peer_2_db_chunks[i].1.len() == 0); + assert!(peer_2_db_chunks[i].1.is_empty()); } let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); @@ -938,7 +938,7 @@ fn test_stackerdb_push_relayer_late_chunks() { for i in 0..10 { assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); - assert!(peer_3_db_chunks[i].1.len() == 0); + assert!(peer_3_db_chunks[i].1.is_empty()); } let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); @@ -1124,7 +1124,7 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for j in 0..10 { assert_eq!(peer_db_chunks[j].0.slot_id, j as u32); assert_eq!(peer_db_chunks[j].0.slot_version, 1); - assert!(peer_db_chunks[j].1.len() > 0); + assert!(!peer_db_chunks[j].1.is_empty()); } } else { // everyone else gets nothing @@ -1136,7 +1136,7 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, for j in 0..10 { assert_eq!(peer_db_chunks[j].0.slot_id, j as u32); assert_eq!(peer_db_chunks[j].0.slot_version, 0); - assert!(peer_db_chunks[j].1.len() == 0); + assert!(peer_db_chunks[j].1.is_empty()); } } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a479dad07a..7469d3c33b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -102,7 +102,7 @@ impl NakamotoDownloadStateMachine { } } -impl<'a> NakamotoStagingBlocksConnRef<'a> { +impl NakamotoStagingBlocksConnRef<'_> { pub fn load_nakamoto_tenure( &self, tip: &StacksBlockId, @@ -474,8 +474,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .unwrap() .unwrap(); - assert!(unconfirmed_tenure.len() > 0); - assert!(last_confirmed_tenure.len() > 0); + assert!(!unconfirmed_tenure.is_empty()); + assert!(!last_confirmed_tenure.is_empty()); assert_eq!( unconfirmed_tenure.first().as_ref().unwrap().block_id(), @@ -1182,7 +1182,7 @@ fn test_tenure_start_end_from_inventory() { for (i, wt) in wanted_tenures.iter().enumerate() { if i >= (rc_len - 1).into() { // nothing here - assert!(available.get(&wt.tenure_id_consensus_hash).is_none()); + assert!(!available.contains_key(&wt.tenure_id_consensus_hash)); continue; } diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 4bcf52605c..3aec8d5e5d 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -316,7 +316,7 @@ fn test_http_request_type_codec() { str::from_utf8(&expected_bytes).unwrap() ); - if expected_http_body.len() > 0 { + if !expected_http_body.is_empty() { expected_http_preamble.set_content_type(HttpContentType::Bytes); expected_http_preamble.set_content_length(expected_http_body.len() as u32) } @@ -767,11 +767,11 @@ fn test_http_response_type_codec() { match preamble { StacksHttpPreamble::Response(ref mut req) => { assert_eq!(req.headers.len(), 5); - assert!(req.headers.get("access-control-allow-headers").is_some()); - assert!(req.headers.get("access-control-allow-methods").is_some()); - assert!(req.headers.get("access-control-allow-origin").is_some()); - assert!(req.headers.get("server").is_some()); - assert!(req.headers.get("date").is_some()); + assert!(req.headers.contains_key("access-control-allow-headers")); + assert!(req.headers.contains_key("access-control-allow-methods")); + assert!(req.headers.contains_key("access-control-allow-origin")); + assert!(req.headers.contains_key("server")); + assert!(req.headers.contains_key("date")); req.headers.clear(); } StacksHttpPreamble::Request(_) => { diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index e31b6dc593..aed43bdcba 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1248,7 +1248,7 @@ fn test_inv_sync_start_reward_cycle() { let mut peer_1 = TestPeer::new(peer_1_config); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; for i in 0..num_blocks { let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); peer_1.next_burnchain_block(burn_ops.clone()); @@ -1298,7 +1298,7 @@ fn test_inv_sync_check_peer_epoch2x_synced() { let mut peer_1 = TestPeer::new(peer_1_config); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; for i in 0..num_blocks { let (burn_ops, stacks_block, microblocks) = peer_1.make_default_tenure(); peer_1.next_burnchain_block(burn_ops.clone()); @@ -1340,7 +1340,7 @@ fn test_sync_inv_2_peers_plain() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) @@ -1517,7 +1517,7 @@ fn test_sync_inv_2_peers_stale() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) @@ -1625,7 +1625,7 @@ fn test_sync_inv_2_peers_unstable() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 2) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { let sn = @@ -1838,7 +1838,7 @@ fn test_sync_inv_2_peers_different_pox_vectors() { peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); - let num_blocks = (GETPOXINV_MAX_BITLEN * 3) as u64; + let num_blocks = GETPOXINV_MAX_BITLEN * 3; let first_stacks_block_height = { let sn = diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 5f889cde3e..3a29d453ae 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -596,7 +596,7 @@ fn check_inv_state( tenure_inv.get(bit.try_into().unwrap()).unwrap_or(false) }; - let burn_block_height = (*tenure_rc as u64) * u64::from(rc_len) + (bit as u64); + let burn_block_height = *tenure_rc * u64::from(rc_len) + (bit as u64); if burn_block_height < nakamoto_start_burn_height { // inv doesn't cover epoch 2 assert!( @@ -912,7 +912,7 @@ fn test_nakamoto_inv_sync_state_machine() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } @@ -938,7 +938,7 @@ fn test_nakamoto_inv_sync_state_machine() { loop { let _ = other_peer.step_with_ibd(false); let ev_ids: Vec<_> = other_peer.network.iter_peer_event_ids().collect(); - if ev_ids.len() == 0 { + if ev_ids.is_empty() { // disconnected panic!("Disconnected"); } @@ -1043,7 +1043,7 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index d3f30aca19..558dddb63e 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -307,7 +307,7 @@ fn test_mempool_sync_2_peers() { // peer 2 has none of the old ones for tx in peer_2_mempool_txs { assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(old_txs.get(&tx.tx.txid()).is_none()); + assert!(!old_txs.contains_key(&tx.tx.txid())); } } @@ -1144,7 +1144,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { .map(|e_id| *e_id) .collect(); - if event_ids.len() > 0 && other_event_ids.len() > 0 { + if !event_ids.is_empty() && !other_event_ids.is_empty() { break; } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 53d6ec9fa1..3a07ed006c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -352,10 +352,10 @@ impl NakamotoBootPlan { /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto_peers<'a>( + fn boot_nakamoto_peers( mut self, - observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec>) { + observer: Option<&TestEventObserver>, + ) -> (TestPeer<'_>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); @@ -662,11 +662,11 @@ impl NakamotoBootPlan { debug!("========================\n\n"); } - pub fn boot_into_nakamoto_peers<'a>( + pub fn boot_into_nakamoto_peers( self, boot_plan: Vec, - observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec>) { + observer: Option<&TestEventObserver>, + ) -> (TestPeer<'_>, Vec>) { let test_signers = self.test_signers.clone(); let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); @@ -690,7 +690,7 @@ impl NakamotoBootPlan { match plan_tenure { NakamotoBootTenure::NoSortition(boot_steps) => { - assert!(boot_steps.len() > 0); + assert!(!boot_steps.is_empty()); // just extend the last sortition let (burn_ops, tenure_change_extend, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::Extended); @@ -732,7 +732,7 @@ impl NakamotoBootPlan { match next_step { NakamotoBootStep::TenureExtend(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); if let Some(last_block) = last_block_opt { let tenure_extension = tenure_change.extend( next_consensus_hash.clone(), @@ -749,7 +749,7 @@ impl NakamotoBootPlan { debug!("\n\nExtend current tenure in empty tenure {} (blocks so far: {}, blocks_since_last_tenure = {}, steps so far: {})\n\n", &next_consensus_hash, blocks_so_far.len(), blocks_since_last_tenure, i); } NakamotoBootStep::Block(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); debug!("\n\nMake block {} with {} transactions in empty tenure {}\n\n", blocks_so_far.len(), transactions.len(), &next_consensus_hash); txs.extend_from_slice(&transactions[..]); num_expected_transactions += transactions.len(); @@ -789,7 +789,7 @@ impl NakamotoBootPlan { all_blocks.push(blocks); } NakamotoBootTenure::Sortition(boot_steps) => { - assert!(boot_steps.len() > 0); + assert!(!boot_steps.is_empty()); let (burn_ops, mut tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let (burn_ht, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); @@ -833,7 +833,7 @@ impl NakamotoBootPlan { match next_step { NakamotoBootStep::TenureExtend(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); if let Some(last_block) = last_block_opt { let tenure_extension = tenure_change.extend( consensus_hash.clone(), @@ -850,7 +850,7 @@ impl NakamotoBootPlan { debug!("\n\nExtend current tenure {} (blocks so far: {}, steps so far: {})\n\n", &consensus_hash, blocks_so_far.len(), i); } NakamotoBootStep::Block(transactions) => { - assert!(transactions.len() > 0); + assert!(!transactions.is_empty()); debug!("\n\nMake block {} with {} transactions in tenure {}\n\n", blocks_so_far.len(), transactions.len(), &consensus_hash); txs.extend_from_slice(&transactions[..]); num_expected_transactions += transactions.len(); @@ -1017,11 +1017,11 @@ impl NakamotoBootPlan { (peer, other_peers) } - pub fn boot_into_nakamoto_peer<'a>( + pub fn boot_into_nakamoto_peer( self, boot_plan: Vec, - observer: Option<&'a TestEventObserver>, - ) -> TestPeer<'a> { + observer: Option<&TestEventObserver>, + ) -> TestPeer<'_> { self.boot_into_nakamoto_peers(boot_plan, observer).0 } } diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 6a1ef7a4e9..d1be0fdf70 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -581,7 +581,7 @@ fn test_step_walk_1_neighbor_bootstrapping() { assert_eq!(w.result.replaced_neighbors.len(), 0); // peer 2 never gets added to peer 1's frontier - assert!(w.frontier.get(&neighbor_2.addr).is_none()); + assert!(!w.frontier.contains_key(&neighbor_2.addr)); } None => {} }; @@ -597,7 +597,7 @@ fn test_step_walk_1_neighbor_bootstrapping() { i += 1; } - debug!("Completed walk round {} step(s)", i); + debug!("Completed walk round {i} step(s)"); // peer 1 contacted peer 2 let stats_1 = peer_1 @@ -673,7 +673,7 @@ fn test_step_walk_1_neighbor_behind() { assert_eq!(w.result.replaced_neighbors.len(), 0); // peer 1 never gets added to peer 2's frontier - assert!(w.frontier.get(&neighbor_1.addr).is_none()); + assert!(!w.frontier.contains_key(&neighbor_1.addr)); } None => {} }; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index c4d4f7ee31..1106721e38 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -819,7 +819,7 @@ fn http_rpc(peer_http: u16, request: StacksHttpRequest) -> Result { - if resp.len() == 0 { + if resp.is_empty() { test_debug!("Client did not receive any data"); return Err(net_error::PermanentlyDrained); } @@ -1200,11 +1200,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( let original_block_data = original_blocks_and_microblocks.borrow(); let mut next_idx = idx.borrow_mut(); let data_to_push = { - if block_data.len() > 0 { - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } else { + if block_data.is_empty() { // start over (can happen if a message gets // dropped due to a timeout) test_debug!("Reset block transmission (possible timeout)"); @@ -1213,6 +1209,10 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( let (consensus_hash, block, microblocks) = block_data[*next_idx].clone(); Some((consensus_hash, block, microblocks)) + } else { + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) } }; @@ -1259,7 +1259,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( if pushed_block && pushed_microblock { block_data.remove(*next_idx); - if block_data.len() > 0 { + if !block_data.is_empty() { *next_idx = thread_rng().gen::() % block_data.len(); } *sent_blocks = false; @@ -2123,8 +2123,8 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { let ((tip_consensus_hash, tip_block, _), idx) = { let block_data = blocks_and_microblocks.borrow(); - let idx = blocks_idx.borrow(); - (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) + let idx: usize = *blocks_idx.borrow(); + (block_data[idx.saturating_sub(1)].clone(), idx) }; if idx > 0 { @@ -2596,7 +2596,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { peers[1].network.pending_messages.iter() { debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); - if pending.len() >= 1 { + if !pending.is_empty() { update_sortition = true; } } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 231e0a91af..e7f1c256a4 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -1149,7 +1149,7 @@ impl PeerNetwork { unsolicited: HashMap>, ) -> PendingMessages { unsolicited.into_iter().filter_map(|(event_id, messages)| { - if messages.len() == 0 { + if messages.is_empty() { // no messages for this event return None; } @@ -1256,7 +1256,7 @@ impl PeerNetwork { } true }); - messages.len() > 0 + !messages.is_empty() }); unsolicited } @@ -1283,7 +1283,7 @@ impl PeerNetwork { buffer: bool, ) -> HashMap<(usize, NeighborKey), Vec> { unsolicited.retain(|(event_id, neighbor_key), messages| { - if messages.len() == 0 { + if messages.is_empty() { // no messages for this node return false; } @@ -1319,7 +1319,7 @@ impl PeerNetwork { } true }); - messages.len() > 0 + !messages.is_empty() }); unsolicited } diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index dab873fa90..2e9e5c4b1c 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -162,15 +162,15 @@ impl From for Error { } pub trait FromRow { - fn from_row<'a>(row: &'a Row) -> Result; + fn from_row(row: &Row) -> Result; } pub trait FromColumn { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result; + fn from_column(row: &Row, column_name: &str) -> Result; } impl FromRow for u64 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: i64 = row.get(0)?; if x < 0 { return Err(Error::ParseError); @@ -180,28 +180,28 @@ impl FromRow for u64 { } impl FromRow for u32 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: u32 = row.get(0)?; Ok(x) } } impl FromRow for String { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: String = row.get(0)?; Ok(x) } } impl FromRow> for Vec { - fn from_row<'a>(row: &'a Row) -> Result, Error> { + fn from_row(row: &Row) -> Result, Error> { let x: Vec = row.get(0)?; Ok(x) } } impl FromColumn for u64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: i64 = row.get(column_name)?; if x < 0 { return Err(Error::ParseError); @@ -211,7 +211,7 @@ impl FromColumn for u64 { } impl FromRow for StacksAddress { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let addr_str: String = row.get(0)?; let addr = StacksAddress::from_string(&addr_str).ok_or(Error::ParseError)?; Ok(addr) @@ -219,7 +219,7 @@ impl FromRow for StacksAddress { } impl FromColumn> for u64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result, Error> { + fn from_column(row: &Row, column_name: &str) -> Result, Error> { let x: Option = row.get(column_name)?; match x { Some(x) => { @@ -234,31 +234,28 @@ impl FromColumn> for u64 { } impl FromRow for i64 { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: i64 = row.get(0)?; Ok(x) } } impl FromColumn for i64 { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let x: i64 = row.get(column_name)?; Ok(x) } } impl FromColumn for QualifiedContractIdentifier { - fn from_column<'a>( - row: &'a Row, - column_name: &str, - ) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let value: String = row.get(column_name)?; QualifiedContractIdentifier::parse(&value).map_err(|_| Error::ParseError) } } impl FromRow for bool { - fn from_row<'a>(row: &'a Row) -> Result { + fn from_row(row: &Row) -> Result { let x: bool = row.get(0)?; Ok(x) } @@ -266,7 +263,7 @@ impl FromRow for bool { /// Make public keys loadable from a sqlite database impl FromColumn for Secp256k1PublicKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let pubkey_hex: String = row.get(column_name)?; let pubkey = Secp256k1PublicKey::from_hex(&pubkey_hex).map_err(|_e| Error::ParseError)?; Ok(pubkey) @@ -275,7 +272,7 @@ impl FromColumn for Secp256k1PublicKey { /// Make private keys loadable from a sqlite database impl FromColumn for Secp256k1PrivateKey { - fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { + fn from_column(row: &Row, column_name: &str) -> Result { let privkey_hex: String = row.get(column_name)?; let privkey = Secp256k1PrivateKey::from_hex(&privkey_hex).map_err(|_e| Error::ParseError)?; @@ -510,14 +507,14 @@ where let mut rows = stmt.query(sql_args)?; let mut row_data = vec![]; while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { - if row_data.len() > 0 { + if !row_data.is_empty() { return Err(Error::Overflow); } let i: i64 = row.get(0)?; row_data.push(i); } - if row_data.len() == 0 { + if row_data.is_empty() { return Err(Error::NotFoundError); } @@ -630,7 +627,7 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { } } -impl<'a, C, T: MarfTrieId> Deref for IndexDBConn<'a, C, T> { +impl Deref for IndexDBConn<'_, C, T> { type Target = DBConn; fn deref(&self) -> &DBConn { self.conn() @@ -664,7 +661,7 @@ pub fn tx_busy_handler(run_count: i32) -> bool { /// Begin an immediate-mode transaction, and handle busy errors with exponential backoff. /// Handling busy errors when the tx begins is preferable to doing it when the tx commits, since /// then we don't have to worry about any extra rollback logic. -pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Error> { +pub fn tx_begin_immediate(conn: &mut Connection) -> Result, Error> { tx_begin_immediate_sqlite(conn).map_err(Error::from) } @@ -672,7 +669,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Handling busy errors when the tx begins is preferable to doing it when the tx commits, since /// then we don't have to worry about any extra rollback logic. /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. -pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { +pub fn tx_begin_immediate_sqlite(conn: &mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; update_lock_table(tx.deref()); @@ -945,7 +942,7 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { } } -impl<'a, C: Clone, T: MarfTrieId> Drop for IndexDBTx<'a, C, T> { +impl Drop for IndexDBTx<'_, C, T> { fn drop(&mut self) { if let Some((ref parent, ref child)) = self.block_linkage { let index_tx = self diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index ac5c0224d8..14882c2fb9 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -216,7 +216,7 @@ pub mod pox4 { ); result .expect("FATAL: failed to execute contract call") - .expect_buff(32 as usize) + .expect_buff(32) .expect("FATAL: expected buff result") }) } diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index 0486e6bf81..d1fb48c86b 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -58,7 +58,7 @@ pub struct StacksString(Vec); pub struct VecDisplay<'a, T: fmt::Display>(pub &'a [T]); -impl<'a, T: fmt::Display> fmt::Display for VecDisplay<'a, T> { +impl fmt::Display for VecDisplay<'_, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "[")?; for (ix, val) in self.0.iter().enumerate() { @@ -139,7 +139,7 @@ impl StacksMessageCodec for UrlString { } // must be a valid block URL, or empty string - if self.as_bytes().len() > 0 { + if !self.as_bytes().is_empty() { let _ = self.parse_to_block_url()?; } @@ -172,7 +172,7 @@ impl StacksMessageCodec for UrlString { })?; // must be a valid block URL, or empty string - if url.len() > 0 { + if !url.is_empty() { let _ = url.parse_to_block_url()?; } Ok(url) @@ -207,7 +207,7 @@ impl StacksString { // This is 0x20 through 0x7e, inclusive, as well as '\t' and '\n' // TODO: DRY up with vm::representations for c in s.as_bytes().iter() { - if (*c < 0x20 && *c != ('\t' as u8) && *c != ('\n' as u8)) || (*c > 0x7e) { + if (*c < 0x20 && *c != b'\t' && *c != b'\n') || *c > 0x7e { return false; } } @@ -254,7 +254,7 @@ impl UrlString { ))); } - if url.username().len() > 0 || url.password().is_some() { + if !url.username().is_empty() || url.password().is_some() { return Err(codec_error::DeserializeError( "Invalid URL: must not contain a username/password".to_string(), )); diff --git a/stx-genesis/src/lib.rs b/stx-genesis/src/lib.rs index 883eb8302b..27eba59e16 100644 --- a/stx-genesis/src/lib.rs +++ b/stx-genesis/src/lib.rs @@ -212,10 +212,10 @@ mod tests { #[test] fn test_names_read() { for name in GenesisData::new(false).read_names() { - assert!(name.owner.len() > 0); + assert!(!name.owner.is_empty()); } for name in GenesisData::new(true).read_names() { - assert!(name.owner.len() > 0); + assert!(!name.owner.is_empty()); } } diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 2f71838adb..da1668cdd2 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -162,6 +162,7 @@ pub struct MinedNakamotoBlockEvent { pub block_size: u64, pub cost: ExecutionCost, pub miner_signature: MessageSignature, + pub miner_signature_hash: Sha512Trunc256Sum, pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, pub signer_bitvec: String, @@ -952,9 +953,14 @@ impl ProposalCallbackReceiver for ProposalCallbackHandler { } impl MemPoolEventDispatcher for EventDispatcher { - fn mempool_txs_dropped(&self, txids: Vec, reason: MemPoolDropReason) { + fn mempool_txs_dropped( + &self, + txids: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ) { if !txids.is_empty() { - self.process_dropped_mempool_txs(txids, reason) + self.process_dropped_mempool_txs(txids, new_txid, reason) } } @@ -1528,6 +1534,7 @@ impl EventDispatcher { cost: consumed.clone(), tx_events, miner_signature: block.header.miner_signature, + miner_signature_hash: block.header.miner_signature_hash(), signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1582,7 +1589,12 @@ impl EventDispatcher { } } - pub fn process_dropped_mempool_txs(&self, txs: Vec, reason: MemPoolDropReason) { + pub fn process_dropped_mempool_txs( + &self, + txs: Vec, + new_txid: Option, + reason: MemPoolDropReason, + ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); @@ -1595,10 +1607,22 @@ impl EventDispatcher { .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); - let payload = json!({ - "dropped_txids": serde_json::Value::Array(dropped_txids), - "reason": reason.to_string(), - }); + let payload = match new_txid { + Some(id) => { + json!({ + "dropped_txids": serde_json::Value::Array(dropped_txids), + "reason": reason.to_string(), + "new_txid": format!("0x{}", &id), + }) + } + None => { + json!({ + "dropped_txids": serde_json::Value::Array(dropped_txids), + "reason": reason.to_string(), + "new_txid": null, + }) + } + }; for observer in interested_observers.iter() { observer.send_dropped_mempool_txs(&payload); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55e9c7d996..1b84b9c0cd 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9401,7 +9401,7 @@ fn v3_blockbyheight_api_endpoint() { assert!(block_data.status().is_success()); let block_bytes_vec = block_data.bytes().unwrap().to_vec(); - assert!(block_bytes_vec.len() > 0); + assert!(!block_bytes_vec.is_empty()); // does the block id of the returned blob matches ? let block_id = NakamotoBlockHeader::consensus_deserialize(&mut block_bytes_vec.as_slice()) @@ -9866,7 +9866,7 @@ fn test_shadow_recovery() { // fix node let shadow_blocks = shadow_chainstate_repair(&mut chainstate, &mut sortdb).unwrap(); - assert!(shadow_blocks.len() > 0); + assert!(!shadow_blocks.is_empty()); wait_for(30, || { let Some(info) = get_chain_info_opt(&naka_conf) else { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 482cfd1fc4..a3ce78eb24 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -578,7 +578,7 @@ pub mod test_observer { PROPOSAL_RESPONSES.lock().unwrap().clear(); } - /// Parse the StacksTransactions from a block (does not include burn ops) + /// Parse the StacksTransactions from a block (does not include burn ops or phantom txs) /// panics on any failures to parse pub fn parse_transactions(block: &serde_json::Value) -> Vec { block @@ -588,15 +588,20 @@ pub mod test_observer { .unwrap() .iter() .filter_map(|tx_json| { + // Filter out burn ops if let Some(burnchain_op_val) = tx_json.get("burnchain_op") { if !burnchain_op_val.is_null() { return None; } } + // Filter out phantom txs let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); let tx_bytes = hex_bytes(&tx_hex[2..]).unwrap(); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if tx.is_phantom() { + return None; + } Some(tx) }) .collect() diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 432b990667..6365579dfd 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -591,24 +591,21 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - if accepted.signer_signature_hash == *signer_signature_hash - && expected_signers.iter().any(|pk| { - pk.verify( - accepted.signer_signature_hash.bits(), - &accepted.signature, - ) - .expect("Failed to verify signature") - }) - { - Some(accepted.signature) - } else { - None - } + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message + { + if accepted.signer_signature_hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to verify signature") + }) + { + return Some(accepted.signature); } - _ => None, } + None }) .collect::>(); Ok(signatures.len() > expected_signers.len() * 7 / 10) @@ -672,11 +669,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockAccepted { - let block_response = self.get_latest_block_response(slot_id); - match block_response { - BlockResponse::Accepted(accepted) => accepted, - _ => panic!("Latest block response from slot #{slot_id} isn't a block acceptance"), - } + self.get_latest_block_response(slot_id) + .as_block_accepted() + .expect("Latest block response from slot #{slot_id} isn't a block acceptance") + .clone() } /// Get /v2/info from the node diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5200883667..044e5f0cbc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -42,7 +42,8 @@ use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::postblock_proposal::{ - ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, TEST_VALIDATE_STALL, + BlockValidateResponse, ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, + TEST_VALIDATE_STALL, }; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; @@ -62,7 +63,7 @@ use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, - TEST_SKIP_BLOCK_BROADCAST, + TEST_SKIP_BLOCK_BROADCAST, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, }; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -5795,15 +5796,13 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + return non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }); } + None }) .collect::>(); Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) @@ -6015,15 +6014,13 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + return non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }); } + None }) .collect::>(); Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) @@ -6257,16 +6254,12 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() == accepted.signer_signature_hash { - Some(accepted.signature) - } else { - None - } + if let SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) = message { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + return Some(accepted.signature); } - _ => None, } + None }) .collect::>(); Ok(signatures.len() >= num_signers * 7 / 10) @@ -9528,3 +9521,534 @@ fn global_acceptance_depends_on_block_announcement() { ); assert_ne!(sister_block, proposed_block); } + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Sortition occurs. Miner 1 wins. +/// Miner 1 proposes a block N +/// Signers accept and the stacks tip advances to N +/// Sortition occurs. Miner 2 wins. +/// Miner 2 proposes block N+1 +/// Sortition occurs. Miner 1 wins. +/// Miner 1 proposes block N+1' +/// N+1 passes signers initial checks and is submitted to the node for validation. +/// N+1' arrives at the signers and passes inital checks, but BEFORE N+1' can be submitted for validation: +/// N+1 finishes being processed at the node and sits in the signers queue. +/// Signers THEN submit N+1' for node validation. +/// Signers process N+1 validation response ok, followed immediately by the N+1' validation response ok. +/// Signers broadcast N+1 acceptance +/// Signers broadcast N+1' rejection +/// Miner 2 proposes a new N+2 block built upon N+1 +/// Asserts: +/// - N+1 is signed and broadcasted +/// - N+1' is rejected as a sortition view mismatch +/// - The tip advances to N+1 (Signed by Miner 1) +/// - The tip advances to N+2 (Signed by Miner 2) +#[test] +#[ignore] +fn no_reorg_due_to_successive_block_validation_ok() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(u64::MAX); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(u64::MAX); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(u64::MAX); + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_mined_blocks: blocks_mined2, + naka_rejected_blocks: rl2_rejections, + naka_proposed_blocks: rl2_proposals, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N (Globally Accepted) -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_before = get_chain_info(&conf); + let mined_before = test_observer::get_mined_nakamoto_blocks().len(); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + Ok(get_burn_height() > starting_burn_height + && signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > stacks_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && get_chain_info(&conf).stacks_tip_height > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().len() > mined_before) + }, + ) + .expect("Timed out waiting for Miner 1 to Mine Block N"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = blocks.last().unwrap().clone(); + let block_n_signature_hash = block_n.signer_signature_hash; + + let info_after = get_chain_info(&conf); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + assert_eq!(block_n.signer_signature_hash, block_n_signature_hash); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); + + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + debug!("Miner 1 mined block N: {block_n_signature_hash}"); + + info!("------------------------- Pause Block Validation Response of N+1 -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let proposals_before_2 = rl2_proposals.load(Ordering::SeqCst); + let rejections_before_2 = rl2_rejections.load(Ordering::SeqCst); + let blocks_before = test_observer::get_blocks().len(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + + // Force miner 1 to submit a block + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + let mut block_n_1 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.header.signer_signature_hash() != block_n_signature_hash + && proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_1) + .unwrap() + && proposal.block.header.chain_length == block_n.stacks_height + 1 + { + block_n_1 = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 1 to propose N+1"); + let block_n_1 = block_n_1.expect("Failed to find N+1 proposal"); + let block_n_1_signature_hash = block_n_1.header.signer_signature_hash(); + + assert_eq!( + block_n_1.header.parent_block_id.to_string(), + block_n.block_id + ); + debug!("Miner 1 proposed block N+1: {block_n_1_signature_hash}"); + + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for Miner 2 to submit its block commit"); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + + info!("------------------------- Pause Block Validation Submission of N+1'-------------------------"); + TEST_STALL_BLOCK_VALIDATION_SUBMISSION.set(true); + + info!("------------------------- Start Miner 2's Tenure-------------------------"); + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + Ok(get_burn_height() > burn_height_before + && rl2_proposals.load(Ordering::SeqCst) > proposals_before_2 + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }, + ) + .expect("Timed out waiting for burn block height to advance and Miner 2 to propose a block"); + + let mut block_n_1_prime = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_2) + .unwrap() + { + block_n_1_prime = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 2 to propose N+1'"); + + let block_n_1_prime = block_n_1_prime.expect("Failed to find N+1' proposal"); + let block_n_1_prime_signature_hash = block_n_1_prime.header.signer_signature_hash(); + + debug!("Miner 2 proposed N+1': {block_n_1_prime_signature_hash}"); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + // Make sure that the tip is still at block N + assert_eq!(tip.canonical_stacks_tip_height, block_n.stacks_height); + assert_eq!( + tip.canonical_stacks_tip_hash.to_string(), + block_n.block_hash + ); + + // Just a precaution to make sure no stacks blocks has been processed between now and our original pause + assert_eq!(rejections_before_2, rl2_rejections.load(Ordering::SeqCst)); + assert_eq!( + blocks_processed_before_1, + blocks_mined1.load(Ordering::SeqCst) + ); + assert_eq!( + blocks_processed_before_2, + blocks_mined2.load(Ordering::SeqCst) + ); + assert_eq!(blocks_before, test_observer::get_blocks().len()); + + info!("------------------------- Unpause Block Validation Response of N+1 -------------------------"); + + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + + // Verify that the node accepted the proposed N+1, sending back a validate ok response + wait_for(30, || { + for proposal in test_observer::get_proposal_responses() { + if let BlockValidateResponse::Ok(response) = proposal { + if response.signer_signature_hash == block_n_1_signature_hash { + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for validation response for N+1"); + + debug!( + "Node finished processing proposal validation request for N+1: {block_n_1_signature_hash}" + ); + + // This is awful but I can't gurantee signers have reached the submission stall and we need to ensure the event order is as expected. + sleep_ms(5_000); + + info!("------------------------- Unpause Block Validation Submission and Response for N+1' -------------------------"); + TEST_STALL_BLOCK_VALIDATION_SUBMISSION.set(false); + + info!("------------------------- Confirm N+1 is Accepted ------------------------"); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) = message + { + if signer_signature_hash == block_n_1_signature_hash { + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1 acceptance."); + + debug!("Miner 1 mined block N+1: {block_n_1_signature_hash}"); + + info!("------------------------- Confirm N+1' is Rejected ------------------------"); + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + .. + })) = message + { + if signer_signature_hash == block_n_1_prime_signature_hash { + return Ok(true); + } + } else if let SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) = message + { + assert!( + signer_signature_hash != block_n_1_prime_signature_hash, + "N+1' was accepted after N+1 was accepted. This should not be possible." + ); + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1' rejection."); + + info!("------------------------- Confirm N+2 Accepted ------------------------"); + + let mut block_n_2 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.header.chain_length == block_n_1.header.chain_length + 1 + && proposal + .block + .header + .recover_miner_pk() + .map(|pk| pk == mining_pk_2) + .unwrap() + { + block_n_2 = Some(proposal.block.clone()); + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for Miner 1 to propose N+2"); + let block_n_2 = block_n_2.expect("Failed to find N+2 proposal"); + + wait_for(30, || { + Ok(get_chain_info(&conf).stacks_tip_height >= block_n_2.header.chain_length) + }) + .expect("Timed out waiting for the stacks tip height to advance"); + + info!("------------------------- Confirm Stacks Chain is As Expected ------------------------"); + let info_after = get_chain_info(&conf); + assert_eq!(info_after.stacks_tip_height, block_n_2.header.chain_length); + assert_eq!(info_after.stacks_tip_height, starting_peer_height + 3); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_2.header.block_hash().to_string() + ); + assert_ne!( + info_after.stacks_tip_consensus_hash, + block_n_1.header.consensus_hash + ); + assert_eq!( + info_after.stacks_tip_consensus_hash, + block_n_2.header.consensus_hash + ); + assert_eq!( + block_n_2.header.parent_block_id, + block_n_1.header.block_id() + ); + assert_eq!( + block_n_1.header.parent_block_id.to_string(), + block_n.block_id + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +}