From 2f2cb53f66291d8ee46b2837f97bf5b97df82bfa Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:31:06 -0500 Subject: [PATCH 1/6] chore: Apply Clippy lint `redundant_pattern_matching` --- stackslib/src/burnchains/db.rs | 3 ++- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/chainstate/stacks/boot/contract_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- stackslib/src/chainstate/stacks/db/mod.rs | 5 +---- stackslib/src/chainstate/stacks/index/file.rs | 2 +- stackslib/src/chainstate/stacks/index/test/marf.rs | 2 +- stackslib/src/clarity_cli.rs | 8 ++++---- stackslib/src/clarity_vm/database/mod.rs | 6 ++++-- stackslib/src/main.rs | 2 +- stackslib/src/net/tests/inv/nakamoto.rs | 2 +- 11 files changed, 18 insertions(+), 18 deletions(-) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 2571c532ed..47af6b3ae6 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1193,9 +1193,10 @@ impl BurnchainDB { let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); for op in ops { - if let Some(_) = indexer + if indexer .find_burnchain_header_height(&op.burn_header_hash()) .expect("FATAL: burnchain DB query error") + .is_some() { // this is the op on the canonical fork return Some(op); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 7584af67d3..c15c48e3fd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2476,7 +2476,7 @@ impl NakamotoChainState { ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block - if let Some(_) = Self::get_block_header(headers_conn, &block.header.block_id())? { + if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { debug!("Already have block {}", &block.header.block_id()); return Ok(false); } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 2fb95a5ace..11cbc1fd46 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -486,7 +486,7 @@ impl BurnStateDB for TestSimBurnStateDB { height: u32, sortition_id: &SortitionId, ) -> Option<(Vec, u128)> { - if let Some(_) = self.get_burn_header_hash(height, sortition_id) { + if self.get_burn_header_hash(height, sortition_id).is_some() { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 30b38c10cc..d3a4dc8a5a 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5147,7 +5147,7 @@ impl StacksChainState { ) { Ok(miner_rewards_opt) => miner_rewards_opt, Err(e) => { - if let Some(_) = miner_id_opt { + if miner_id_opt.is_some() { return Err(e); } else { let msg = format!("Failed to load miner rewards: {:?}", &e); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6853ec0ee9..c09b2fcbab 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1839,10 +1839,7 @@ impl StacksChainState { let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; - let init_required = match fs::metadata(&clarity_state_index_marf) { - Ok(_) => false, - Err(_) => true, - }; + let init_required = fs::metadata(&clarity_state_index_marf).is_err(); let state_index = StacksChainState::open_db(mainnet, chain_id, &header_index_root)?; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 5a7da69e52..3940cb594e 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -213,7 +213,7 @@ impl TrieFile { let mut set_sqlite_tmpdir = false; let mut old_tmpdir_opt = None; if let Some(parent_path) = Path::new(db_path).parent() { - if let Err(_) = env::var("SQLITE_TMPDIR") { + if env::var("SQLITE_TMPDIR").is_err() { debug!( "Sqlite will store temporary migration state in '{}'", parent_path.display() diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 63b2b58968..4f2b06a480 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -2190,7 +2190,7 @@ fn test_marf_begin_from_sentinel_twice() { #[test] fn test_marf_unconfirmed() { let marf_path = "/tmp/test_marf_unconfirmed"; - if let Ok(_) = std::fs::metadata(marf_path) { + if std::fs::metadata(marf_path).is_ok() { std::fs::remove_file(marf_path).unwrap(); } let marf_opts = MARFOpenOpts::default(); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 9862281b6c..a580e90ee9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -645,7 +645,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -660,7 +660,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -674,7 +674,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -690,7 +690,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 6f770f5927..e03149dba4 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -737,13 +737,15 @@ fn get_first_block_in_tenure( } } None => { - if let Some(_) = get_stacks_header_column_from_table( + if get_stacks_header_column_from_table( conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), false, - ) { + ) + .is_some() + { return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2e63d0d128..90f6dfeecd 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1523,7 +1523,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if let Some(_) = stacks_blocks_available.get(&stacks_block_id) { + if stacks_blocks_available.get(&stacks_block_id).is_some() { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index cb09236ccb..c313ede598 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -146,7 +146,7 @@ pub fn peer_get_nakamoto_invs<'a>( loop { peer.step_with_ibd(false).unwrap(); - if let Ok(..) = shutdown_recv.try_recv() { + if shutdown_recv.try_recv().is_ok() { break; } } From da64ceca3f2562e07ce6484e508dbd5c2de0650f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:47:38 -0500 Subject: [PATCH 2/6] chore: Apply Clippy lint `single_match` --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +- stackslib/src/burnchains/tests/mod.rs | 9 +- stackslib/src/chainstate/stacks/block.rs | 57 ++-- .../src/chainstate/stacks/boot/pox_2_tests.rs | 29 +- .../src/chainstate/stacks/boot/pox_3_tests.rs | 15 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 15 +- stackslib/src/chainstate/stacks/db/blocks.rs | 30 +-- stackslib/src/chainstate/stacks/db/mod.rs | 21 +- .../src/chainstate/stacks/index/cache.rs | 14 +- stackslib/src/chainstate/stacks/index/file.rs | 14 +- stackslib/src/chainstate/stacks/index/marf.rs | 5 +- .../src/chainstate/stacks/index/proofs.rs | 12 +- .../src/chainstate/stacks/index/storage.rs | 5 +- .../src/chainstate/stacks/index/test/marf.rs | 16 +- stackslib/src/chainstate/stacks/miner.rs | 39 ++- stackslib/src/chainstate/stacks/mod.rs | 7 +- stackslib/src/chainstate/stacks/tests/mod.rs | 21 +- .../src/chainstate/stacks/transaction.rs | 48 ++-- stackslib/src/core/mempool.rs | 7 +- stackslib/src/net/atlas/mod.rs | 69 +++-- stackslib/src/net/chat.rs | 29 +- stackslib/src/net/connection.rs | 36 ++- stackslib/src/net/dns.rs | 33 +-- stackslib/src/net/download/epoch2x.rs | 28 +- stackslib/src/net/http/request.rs | 34 +-- stackslib/src/net/httpcore.rs | 29 +- stackslib/src/net/inv/epoch2x.rs | 41 ++- stackslib/src/net/mod.rs | 28 +- stackslib/src/net/neighbors/mod.rs | 7 +- stackslib/src/net/p2p.rs | 67 ++--- stackslib/src/net/prune.rs | 37 +-- stackslib/src/net/relay.rs | 79 +++--- stackslib/src/net/server.rs | 87 +++--- stackslib/src/net/tests/convergence.rs | 85 +++--- stackslib/src/net/tests/download/epoch2x.rs | 88 ++---- stackslib/src/net/tests/inv/epoch2x.rs | 198 ++++++-------- stackslib/src/net/tests/inv/nakamoto.rs | 22 +- stackslib/src/net/tests/neighbors.rs | 255 ++++++------------ stackslib/src/net/tests/relay/epoch2x.rs | 25 +- stackslib/src/net/unsolicited.rs | 23 +- stackslib/src/util_lib/mod.rs | 11 +- 41 files changed, 663 insertions(+), 1033 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 129a4b5a91..509eb61e79 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -282,11 +282,8 @@ impl BitcoinIndexer { btc_error::ConnectionError })?; - match self.runtime.sock.take() { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = self.runtime.sock.take() { + let _ = s.shutdown(Shutdown::Both); } self.runtime.sock = Some(s); @@ -294,11 +291,8 @@ impl BitcoinIndexer { } Err(_e) => { let s = self.runtime.sock.take(); - match s { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = s { + let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) } @@ -932,11 +926,8 @@ impl BitcoinIndexer { impl Drop for BitcoinIndexer { fn drop(&mut self) { - match self.runtime.sock { - Some(ref mut s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(ref mut s) = self.runtime.sock { + let _ = s.shutdown(Shutdown::Both); } } } diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 23232ac3b4..91ae93bb3f 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -580,12 +580,9 @@ impl TestBurnchainBlock { assert_eq!(parent_snapshot.block_height + 1, self.block_height); for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); } } } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index a335e21894..1c231b8efc 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -353,16 +353,13 @@ impl StacksMessageCodec for StacksBlock { // must be only one coinbase let mut coinbase_count = 0; for tx in txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - coinbase_count += 1; - if coinbase_count > 1 { - return Err(codec_error::DeserializeError( - "Invalid block: multiple coinbases found".to_string(), - )); - } + if let TransactionPayload::Coinbase(..) = tx.payload { + coinbase_count += 1; + if coinbase_count > 1 { + return Err(codec_error::DeserializeError( + "Invalid block: multiple coinbases found".to_string(), + )); } - _ => {} } } @@ -518,26 +515,23 @@ impl StacksBlock { let mut found_coinbase = false; let mut coinbase_index = 0; for (i, tx) in txs.iter().enumerate() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - if !check_present { - warn!("Found unexpected coinbase tx {}", tx.txid()); - return false; - } - - if found_coinbase { - warn!("Found duplicate coinbase tx {}", tx.txid()); - return false; - } - - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { - warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); - return false; - } - found_coinbase = true; - coinbase_index = i; + if let TransactionPayload::Coinbase(..) = tx.payload { + if !check_present { + warn!("Found unexpected coinbase tx {}", tx.txid()); + return false; + } + + if found_coinbase { + warn!("Found duplicate coinbase tx {}", tx.txid()); + return false; } - _ => {} + + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { + warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); + return false; + } + found_coinbase = true; + coinbase_index = i; } } @@ -1150,11 +1144,8 @@ mod test { let mut txs_anchored = vec![]; for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 3313e80c7f..4256fba3b9 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1371,23 +1371,20 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == charlie_address { - assert!( - r.execution_cost != ExecutionCost::ZERO, - "Execution cost is not zero!" - ); - charlie_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + assert!( + r.execution_cost != ExecutionCost::ZERO, + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 5c52297969..d42095b923 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -930,16 +930,13 @@ fn pox_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 840e7a2c54..5005dd8781 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -9184,16 +9184,13 @@ fn missed_slots_no_unlock() { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index d3a4dc8a5a..6c3c745a45 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11206,15 +11206,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = @@ -11889,15 +11886,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index c09b2fcbab..1d7c97b676 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,11 +2746,8 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances @@ -2866,11 +2863,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = @@ -2956,11 +2950,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index d5ba5ae5f6..2d5cd556b8 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -258,12 +258,11 @@ impl TrieCache { TrieCache::Everything(ref mut state) => { state.store_node_and_hash(block_id, trieptr, node, hash); } - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node_and_hash(block_id, trieptr, TrieNodeType::Node256(data), hash); } - _ => {} - }, + } } } @@ -273,12 +272,11 @@ impl TrieCache { match self { TrieCache::Noop(_) => {} TrieCache::Everything(ref mut state) => state.store_node(block_id, trieptr, node), - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node(block_id, trieptr, TrieNodeType::Node256(data)) } - _ => {} - }, + } } } diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 3940cb594e..52f571aa1f 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -194,11 +194,8 @@ impl TrieFile { .map(|stat| Some(stat.len())) .unwrap_or(None); - match (size_before_opt, size_after_opt) { - (Some(sz_before), Some(sz_after)) => { - debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); - } - _ => {} + if let (Some(sz_before), Some(sz_after)) = (size_before_opt, size_after_opt) { + debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); } Ok(()) @@ -461,11 +458,8 @@ impl TrieFile { self.write_all(buf)?; self.flush()?; - match self { - TrieFile::Disk(ref mut data) => { - data.fd.sync_data()?; - } - _ => {} + if let TrieFile::Disk(ref mut data) = self { + data.fd.sync_data()?; } Ok(offset) } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index de1488d057..cfb5a97594 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1291,9 +1291,8 @@ impl MARF { // used in testing in order to short-circuit block-height lookups // when the trie struct is tested outside of marf.rs usage if height == 0 { - match storage.test_genesis_block { - Some(ref s) => return Ok(Some(s.clone())), - _ => {} + if let Some(ref s) = storage.test_genesis_block { + return Ok(Some(s.clone())); } } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 37ff420437..e7ba01a6bf 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -1213,12 +1213,12 @@ impl TrieMerkleProof { }; // next proof item should be part of a segment proof - match proof[i] { - TrieMerkleProofType::Shunt(_) => { - test_debug!("Malformed proof -- exepcted segment proof following first shunt proof head at {}", i); - return false; - } - _ => {} + if let TrieMerkleProofType::Shunt(_) = proof[i] { + test_debug!( + "Malformed proof -- exepcted segment proof following first shunt proof head at {}", + i + ); + return false; } while i < proof.len() { diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 79b391ce42..fb9637c799 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -1887,9 +1887,8 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // blow away db trie_sql::clear_tables(self.sqlite_tx())?; - match self.data.uncommitted_writes { - Some((_, ref mut trie_storage)) => trie_storage.format()?, - None => {} + if let Some((_, ref mut trie_storage)) = self.data.uncommitted_writes { + trie_storage.format()? }; self.data.set_block(T::sentinel(), None); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 4f2b06a480..7102527ba8 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,11 +1282,8 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); let f = TrieFileStorage::open(&path, marf_opts).unwrap(); @@ -1567,12 +1564,9 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - match fs::metadata(&path) { - Err(_) => { - eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); - return; - } - Ok(_) => {} + if let Err(_) = fs::metadata(&path) { + eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); + return; }; let marf_opts = MARFOpenOpts::default(); let mut f_store = TrieFileStorage::new_memory(marf_opts).unwrap(); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 43fd6b3c18..aca3f9d84c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1147,24 +1147,20 @@ impl<'a> StacksMicroblockBuilder<'a> { TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { error, .. }) => { test_debug!("Exclude tx {} from microblock", tx.txid()); - match &error { - Error::BlockTooBigError => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - test_debug!("Block budget exceeded on tx {}", &tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - test_debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit - == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - test_debug!( - "Stop mining microblock block due to limit exceeded" - ); - break; - } + if let Error::BlockTooBigError = &error { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + test_debug!("Block budget exceeded on tx {}", &tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + test_debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + test_debug!( + "Stop mining microblock block due to limit exceeded" + ); + break; } - _ => {} } continue; } @@ -1198,12 +1194,9 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.considered.replace(considered); self.runtime.num_mined = num_txs; - match result { - Err(e) => { - warn!("Error producing microblock: {}", e); - return Err(e); - } - _ => {} + if let Err(e) = result { + warn!("Error producing microblock: {}", e); + return Err(e); } return self.make_next_microblock(txs_included, miner_key, tx_events, None); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 23990fe199..546ab6bd08 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1590,11 +1590,8 @@ pub mod test { } for tx in all_txs.into_iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); if txs_anchored.len() >= num_txs { diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 54dcea1c7e..d119dacd8e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,11 +338,8 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - match fs::metadata(&chainstate_path(new_test_name)) { - Ok(_) => { - fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } copy_dir( @@ -525,17 +522,14 @@ impl TestStacksNode { miner: &TestMiner, ) -> Option { for commit_op in miner.block_commits.iter().rev() { - match SortitionDB::get_block_snapshot_for_winning_stacks_block( + if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() { - Some(sn) => { - return Some(sn); - } - None => {} + return Some(sn); } } return None; @@ -1424,11 +1418,8 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index c0fa7f1727..0308a8124b 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1130,17 +1130,14 @@ impl StacksTransactionSigner { } pub fn sign_sponsor(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { - match self.tx.auth { - TransactionAuth::Sponsored(_, ref sponsor_condition) => { - if self.check_oversign - && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() - { - return Err(net_error::SigningError( - "Sponsor would have too many signatures".to_string(), - )); - } + if let TransactionAuth::Sponsored(_, ref sponsor_condition) = self.tx.auth { + if self.check_oversign + && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() + { + return Err(net_error::SigningError( + "Sponsor would have too many signatures".to_string(), + )); } - _ => {} } let next_sighash = self.tx.sign_next_sponsor(&self.sighash, privk)?; @@ -1933,24 +1930,21 @@ mod test { // test_debug!("mutate byte {}", &i); let mut cursor = io::Cursor::new(&tx_bytes); let mut reader = LogReader::from_reader(&mut cursor); - match StacksTransaction::consensus_deserialize(&mut reader) { - Ok(corrupt_tx) => { - let mut corrupt_tx_bytes = vec![]; - corrupt_tx - .consensus_serialize(&mut corrupt_tx_bytes) - .unwrap(); - if corrupt_tx_bytes.len() < tx_bytes.len() { - // didn't parse fully; the block-parsing logic would reject this block. - tx_bytes[i] = next_byte as u8; - continue; - } - if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { - eprintln!("corrupt tx: {:#?}", &corrupt_tx); - eprintln!("signed tx: {:#?}", &signed_tx); - assert!(false); - } + if let Ok(corrupt_tx) = StacksTransaction::consensus_deserialize(&mut reader) { + let mut corrupt_tx_bytes = vec![]; + corrupt_tx + .consensus_serialize(&mut corrupt_tx_bytes) + .unwrap(); + if corrupt_tx_bytes.len() < tx_bytes.len() { + // didn't parse fully; the block-parsing logic would reject this block. + tx_bytes[i] = next_byte as u8; + continue; + } + if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { + eprintln!("corrupt tx: {:#?}", &corrupt_tx); + eprintln!("signed tx: {:#?}", &signed_tx); + assert!(false); } - Err(_) => {} } // restore tx_bytes[i] = next_byte as u8; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 589b624abe..17848fa2d2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1092,11 +1092,8 @@ impl NonceCache { }; // In-memory cache - match self.cache.get_mut(&address) { - Some(nonce) => { - *nonce = value; - } - None => (), + if let Some(nonce) = self.cache.get_mut(&address) { + *nonce = value; } success diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c382aa618d..49d1036a0b 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -195,45 +195,42 @@ impl AttachmentInstance { ) -> Option { if let Value::Tuple(ref attachment) = value { if let Ok(Value::Tuple(ref attachment_data)) = attachment.get("attachment") { - match ( + if let ( + Ok(Value::Sequence(SequenceData::Buffer(content_hash))), + Ok(Value::UInt(attachment_index)), + ) = ( attachment_data.get("hash"), attachment_data.get("attachment-index"), ) { - ( - Ok(Value::Sequence(SequenceData::Buffer(content_hash))), - Ok(Value::UInt(attachment_index)), - ) => { - let content_hash = if content_hash.data.is_empty() { - Hash160::empty() - } else { - match Hash160::from_bytes(&content_hash.data[..]) { - Some(content_hash) => content_hash, - _ => return None, - } - }; - let metadata = match attachment_data.get("metadata") { - Ok(metadata) => { - let mut serialized = vec![]; - metadata - .consensus_serialize(&mut serialized) - .expect("FATAL: invalid metadata"); - to_hex(&serialized[..]) - } - _ => String::new(), - }; - let instance = AttachmentInstance { - index_block_hash, - content_hash, - attachment_index: *attachment_index as u32, - stacks_block_height, - metadata, - contract_id: contract_id.clone(), - tx_id, - canonical_stacks_tip_height, - }; - return Some(instance); - } - _ => {} + let content_hash = if content_hash.data.is_empty() { + Hash160::empty() + } else { + match Hash160::from_bytes(&content_hash.data[..]) { + Some(content_hash) => content_hash, + _ => return None, + } + }; + let metadata = match attachment_data.get("metadata") { + Ok(metadata) => { + let mut serialized = vec![]; + metadata + .consensus_serialize(&mut serialized) + .expect("FATAL: invalid metadata"); + to_hex(&serialized[..]) + } + _ => String::new(), + }; + let instance = AttachmentInstance { + index_block_hash, + content_hash, + attachment_index: *attachment_index as u32, + stacks_block_height, + metadata, + contract_id: contract_id.clone(), + tx_id, + canonical_stacks_tip_height, + }; + return Some(instance); } } } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 2dea34245b..0ce27038cd 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -515,13 +515,12 @@ impl Neighbor { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer use std::env; - match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) { - Ok(asn_str) => { - neighbor.asn = asn_str.parse().unwrap(); - neighbor.org = neighbor.asn; - test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); - } - Err(_) => {} + if let Ok(asn_str) = + env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) + { + neighbor.asn = asn_str.parse().unwrap(); + neighbor.org = neighbor.asn; + test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); }; } @@ -544,13 +543,10 @@ impl Neighbor { let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes).map_err(net_error::DBError)?; - match asn_opt { - Some(a) => { - if a != 0 { - peer.asn = a; - } + if let Some(a) = asn_opt { + if a != 0 { + peer.asn = a; } - None => {} }; } Ok(Some(peer)) @@ -3110,11 +3106,8 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 1d0eabdd14..0fe48a678b 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -926,19 +926,16 @@ impl ConnectionInbox

{ let bytes_consumed = if let Some(ref mut preamble) = preamble_opt { let (message_opt, bytes_consumed) = self.consume_payload(protocol, preamble, &buf[offset..])?; - match message_opt { - Some(message) => { - // queue up - test_debug!( - "Consumed message '{}' (request {}) in {} bytes", - message.get_message_name(), - message.request_id(), - bytes_consumed - ); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!( + "Consumed message '{}' (request {}) in {} bytes", + message.get_message_name(), + message.request_id(), + bytes_consumed + ); + self.inbox.push_back(message); + consumed_message = true; }; bytes_consumed @@ -982,14 +979,11 @@ impl ConnectionInbox

{ if let Some(ref mut preamble) = preamble_opt { let (message_opt, _bytes_consumed) = self.consume_payload(protocol, preamble, &[])?; - match message_opt { - Some(message) => { - // queue up - test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); + self.inbox.push_back(message); + consumed_message = true; } } self.preamble = preamble_opt; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 6529001d7d..77c401d5e2 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -377,13 +377,10 @@ mod test { let mut resolved_addrs = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("www.google.com", 80).unwrap() { - Some(addrs) => { - test_debug!("addrs: {:?}", &addrs); - resolved_addrs = Some(addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup("www.google.com", 80).unwrap() { + test_debug!("addrs: {:?}", &addrs); + resolved_addrs = Some(addrs); + break; } sleep_ms(100); } @@ -423,13 +420,10 @@ mod test { if resolved_addrs.contains_key(&name.to_string()) { continue; } - match client.poll_lookup(name, 80).unwrap() { - Some(addrs) => { - test_debug!("name {} addrs: {:?}", name, &addrs); - resolved_addrs.insert(name.to_string(), addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup(name, 80).unwrap() { + test_debug!("name {} addrs: {:?}", name, &addrs); + resolved_addrs.insert(name.to_string(), addrs); + break; } } @@ -452,13 +446,10 @@ mod test { let mut resolved_error = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("asdfjkl;", 80).unwrap() { - Some(resp) => { - test_debug!("addrs: {:?}", &resp); - resolved_error = Some(resp); - break; - } - None => {} + if let Some(resp) = client.poll_lookup("asdfjkl;", 80).unwrap() { + test_debug!("addrs: {:?}", &resp); + resolved_error = Some(resp); + break; } sleep_ms(100); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 6d0bb63d5a..06f4e146fa 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -1045,9 +1045,8 @@ impl PeerNetwork { /// Pass a hint to the downloader to re-scan pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { - match self.block_downloader { - Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), - None => {} + if let Some(ref mut dl) = self.block_downloader { + dl.hint_download_rescan(target_height, ibd) } } @@ -1978,11 +1977,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2016,11 +2014,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2480,9 +2477,8 @@ impl PeerNetwork { if done { // reset state if we're done - match self.block_downloader { - Some(ref mut downloader) => downloader.reset(), - None => {} + if let Some(ref mut downloader) = self.block_downloader { + downloader.reset() } } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index e2d0fd16f3..13daa56cab 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -273,29 +273,23 @@ impl StacksMessageCodec for HttpRequestPreamble { .map_err(CodecError::WriteError)?; // content-type - match self.content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(c.to_string().as_str().as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(ref c) = self.content_type { + fd.write_all("Content-Type: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(c.to_string().as_str().as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // content-length - match self.content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(l) = self.content_length { + fd.write_all("Content-Length: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(format!("{}", l).as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // keep-alive diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index a38f35c005..919d7ffa1c 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1232,25 +1232,22 @@ impl StacksHttp { /// This method will set up this state machine to consume the message associated with this /// premable, if the response is chunked. fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() && !self.allow_arbitrary_response { - return Err(NetError::DeserializeError( - "Unexpected HTTP response: no active request handler".to_string(), - )); + if let StacksHttpPreamble::Response(ref http_response_preamble) = preamble { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); } - if http_response_preamble.is_chunked() { - // we can only receive one response at a time - if self.reply.is_some() { - test_debug!("Have pending reply already"); - return Err(NetError::InProgress); - } - self.set_pending(http_response_preamble); - } + self.set_pending(http_response_preamble); } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 9b9e7b3682..449d2e26e7 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1534,15 +1534,12 @@ impl PeerNetwork { } // does the peer agree with our PoX view up to this reward cycle? - match stats.inv.pox_inv_cmp(&self.pox_id) { - Some((disagreed, _, _)) => { - if disagreed < target_block_reward_cycle { - // can't proceed - debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); - return Ok(0); - } + if let Some((disagreed, _, _)) = stats.inv.pox_inv_cmp(&self.pox_id) { + if disagreed < target_block_reward_cycle { + // can't proceed + debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); + return Ok(0); } - None => {} } let target_block_height = self @@ -2523,13 +2520,10 @@ impl PeerNetwork { let mut cur_neighbors = HashSet::new(); for (nk, event_id) in self.events.iter() { // only outbound authenticated peers - match self.peers.get(event_id) { - Some(convo) => { - if convo.is_outbound() && convo.is_authenticated() { - cur_neighbors.insert(nk.clone()); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.is_outbound() && convo.is_authenticated() { + cur_neighbors.insert(nk.clone()); } - None => {} } } @@ -2543,17 +2537,14 @@ impl PeerNetwork { /// Set a hint that we learned something new, and need to sync invs again pub fn hint_sync_invs(&mut self, target_height: u64) { - match self.inv_state { - Some(ref mut inv_state) => { - debug!( - "Awaken inv sync to re-scan peer block inventories at height {}", - target_height - ); - inv_state.hint_learned_data = true; - inv_state.hint_do_rescan = true; - inv_state.hint_learned_data_height = target_height; - } - None => {} + if let Some(ref mut inv_state) = self.inv_state { + debug!( + "Awaken inv sync to re-scan peer block inventories at height {}", + target_height + ); + inv_state.hint_learned_data = true; + inv_state.hint_do_rescan = true; + inv_state.hint_learned_data_height = target_height; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index cfefa2c5fe..a2461631a6 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2376,11 +2376,8 @@ pub mod test { if self.closed { return Ok(0); } - match self.read_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.read_error { + return Err(io::Error::from((*e).clone())); } let sz = self.c.read(buf)?; @@ -2403,11 +2400,8 @@ pub mod test { if self.closed { return Err(io::Error::from(ErrorKind::Other)); // EBADF } - match self.write_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.write_error { + return Err(io::Error::from((*e).clone())); } self.c.write(buf) } @@ -2799,11 +2793,8 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); @@ -3559,11 +3550,8 @@ pub mod test { ch: &ConsensusHash, ) { for op in blockstack_ops.iter_mut() { - match op { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - data.consensus_hash = (*ch).clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); } } } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index cc3fd73db8..f0d3cf18b7 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -388,11 +388,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier table size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier table size: {}", &self.local_peer, count); }; debug!("{:?}: Walk finished ===================", &self.local_peer); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 78c8982106..3180c3a3dd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1145,13 +1145,10 @@ impl PeerNetwork { ) -> u64 { let mut ret = 0; for (_, socket) in sockets.iter() { - match socket.peer_addr() { - Ok(addr) => { - if addr.ip() == ipaddr.ip() { - ret += 1; - } + if let Ok(addr) = socket.peer_addr() { + if addr.ip() == ipaddr.ip() { + ret += 1; } - Err(_) => {} }; } ret @@ -1378,12 +1375,9 @@ impl PeerNetwork { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { info!("Request to ban {:?}", neighbor_key); - match self.events.get(neighbor_key) { - Some(event_id) => { - debug!("Will ban {:?} (event {})", neighbor_key, event_id); - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor_key) { + debug!("Will ban {:?} (event {})", neighbor_key, event_id); + self.bans.insert(*event_id); } } Ok(()) @@ -1466,28 +1460,25 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { - match self.handles.get(i) { - Some(ref handle) => { - loop { - // drain all inbound requests - let inbound_request_res = handle.chan_in.try_recv(); - match inbound_request_res { - Ok(inbound_request) => { - messages.push((i, inbound_request)); - } - Err(TryRecvError::Empty) => { - // nothing to do - break; - } - Err(TryRecvError::Disconnected) => { - // dead; remove - to_remove.push(i); - break; - } + if let Some(ref handle) = self.handles.get(i) { + loop { + // drain all inbound requests + let inbound_request_res = handle.chan_in.try_recv(); + match inbound_request_res { + Ok(inbound_request) => { + messages.push((i, inbound_request)); + } + Err(TryRecvError::Empty) => { + // nothing to do + break; + } + Err(TryRecvError::Disconnected) => { + // dead; remove + to_remove.push(i); + break; } } } - None => {} } } @@ -1885,11 +1876,8 @@ impl PeerNetwork { /// Deregister a socket from our p2p network instance. fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { - match self.network { - Some(ref mut network) => { - let _ = network.deregister(event_id, &socket); - } - None => {} + if let Some(ref mut network) = self.network { + let _ = network.deregister(event_id, &socket); } } @@ -1969,11 +1957,8 @@ impl PeerNetwork { /// Deregister and ban a neighbor pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); - match self.events.get(neighbor) { - Some(event_id) => { - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor) { + self.bans.insert(*event_id); } self.relayer_stats.process_neighbor_ban(neighbor); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index f178ea719a..c58e1b210a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -322,18 +322,15 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(&event_id) { - Some(ref convo) => { - if !convo.stats.outbound { - let stats = convo.stats.clone(); - if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { - entry.push((*event_id, nk.clone(), stats)); - } else { - ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); - } + if let Some(ref convo) = self.peers.get(&event_id) { + if !convo.stats.outbound { + let stats = convo.stats.clone(); + if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { + entry.push((*event_id, nk.clone(), stats)); + } else { + ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); } } - None => {} } } @@ -378,15 +375,12 @@ impl PeerNetwork { let mut outbound: Vec = vec![]; for (nk, event_id) in self.events.iter() { - match self.peers.get(event_id) { - Some(convo) => { - if convo.stats.outbound { - outbound.push(format!("{:?}", &nk)); - } else { - inbound.push(format!("{:?}", &nk)); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.stats.outbound { + outbound.push(format!("{:?}", &nk)); + } else { + inbound.push(format!("{:?}", &nk)); } - None => {} } } (inbound, outbound) @@ -464,11 +458,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier size: {}", &self.local_peer, count); }; } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4569585b79..cadfb75f1e 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1826,52 +1826,49 @@ impl Relayer { &tx.txid(), &ast_rules ); - match tx.payload { - TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) => { - let clarity_version = - clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = + tx.payload + { + let clarity_version = + clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); + + if ast_rules == ASTRules::PrecheckSize { + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = QualifiedContractIdentifier::new( - issuer_principal, - smart_contract.name.clone(), - ); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 78f0f6fbb5..2459f64c00 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -429,56 +429,52 @@ impl HttpPeer { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; let recv_res = convo.recv(client_sock); - match recv_res { - Err(e) => { - match e { - net_error::PermanentlyDrained => { - // socket got closed, but we might still have pending unsolicited messages - debug!( - "Remote HTTP peer disconnected event {} (socket {:?})", - event_id, &client_sock - ); - convo_dead = true; - } - net_error::InvalidMessage => { - // got sent bad data. If this was an inbound conversation, send it a HTTP - // 400 and close the socket. - debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error(StacksHttpResponse::new_empty_error( - &HttpBadRequest::new( - "Received an HTTP message that the node could not decode" - .to_string(), - ), - )) { - Ok(_) => { - // prime the socket - if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - // convo_dead = true; - } - } - Err(e) => { + if let Err(e) = recv_res { + match e { + net_error::PermanentlyDrained => { + // socket got closed, but we might still have pending unsolicited messages + debug!( + "Remote HTTP peer disconnected event {} (socket {:?})", + event_id, &client_sock + ); + convo_dead = true; + } + net_error::InvalidMessage => { + // got sent bad data. If this was an inbound conversation, send it a HTTP + // 400 and close the socket. + debug!("Got a bad HTTP message on socket {:?}", &client_sock); + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode".to_string(), + ), + )) { + Ok(_) => { + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { debug!( - "Failed to reply HTTP 400 to socket {:?}: {:?}", + "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } + Err(e) => { + debug!( + "Failed to reply HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; + } } - _ => { - debug!( - "Failed to receive HTTP data on event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + } + _ => { + debug!( + "Failed to receive HTTP data on event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } - Ok(_) => {} } // react to inbound messages -- do we need to send something out, or fulfill requests @@ -730,11 +726,8 @@ mod test { peer.step().unwrap(); // asked to yield? - match http_rx.try_recv() { - Ok(_) => { - break; - } - Err(_) => {} + if let Ok(_) = http_rx.try_recv() { + break; } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index be35c4e1f1..a607298d74 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -218,7 +218,7 @@ fn test_walk_ring_15_org_biased() { let peers = test_walk_ring(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -226,11 +226,8 @@ fn test_walk_ring_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -398,7 +395,7 @@ fn test_walk_line_15_org_biased() { let peers = test_walk_line(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -406,11 +403,8 @@ fn test_walk_line_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -634,7 +628,7 @@ fn test_walk_star_15_org_biased() { let peers = test_walk_star(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -642,11 +636,8 @@ fn test_walk_star_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -849,14 +840,11 @@ fn dump_peers(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); } - None => {} } } @@ -882,16 +870,13 @@ fn dump_peer_histograms(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); } - None => {} } } for inbound in inbound_neighbor_index.iter() { @@ -1001,32 +986,26 @@ fn run_topology_test_ex( debug!("Step peer {:?}", &nk); // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } + if let Some(ref peer_list) = initial_allowed.get(&nk) { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); } } - None => {} }; // denied peers are never connected - match initial_denied.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } + if let Some(ref peer_list) = initial_denied.get(&nk) { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); } } - None => {} }; // all ports are unique in the p2p socket table diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5c13a12a50..e21ce19c35 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -171,20 +171,14 @@ fn test_get_block_availability() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -566,12 +560,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -847,12 +838,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -938,12 +926,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1012,12 +997,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1094,12 +1076,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1173,12 +1152,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1260,11 +1236,8 @@ pub fn test_get_blocks_and_microblocks_ban_url() { |_| {}, |peer| { let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + blocked = dl.blocked_urls.len(); } if blocked >= 1 { // NOTE: this is the success criterion @@ -1481,12 +1454,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 44a4bf3967..b2136bd1f0 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1390,22 +1390,16 @@ fn test_sync_inv_2_peers_plain() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -1553,46 +1547,38 @@ fn test_sync_inv_2_peers_stale() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { - if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - for i in 0..first_stacks_block_height { - assert!(!peer_2_inv.inv.has_ith_block(i)); - assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); - } - peer_2_check = true; + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if peer_2_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + for i in 0..first_stacks_block_height { + assert!(!peer_2_inv.inv.has_ith_block(i)); + assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); } + peer_2_check = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { - if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - peer_1_check = true; - } + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if peer_1_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + peer_1_check = true; } } - None => {} } round += 1; @@ -1703,54 +1689,48 @@ fn test_sync_inv_2_peers_unstable() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_1_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_1_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { - peer_1_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { - peer_1_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_1_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_1_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { + peer_1_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { + peer_1_block_cycle = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_2_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_2_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { - peer_2_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { - peer_2_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_2_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_2_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { + peer_2_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { + peer_2_block_cycle = true; } } - None => {} } round += 1; @@ -1917,42 +1897,30 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let _ = peer_2.step(); // peer 1 should see that peer 2 has all blocks for reward cycles 5 through 9 - match peer_1.network.inv_state { - Some(ref inv) => { - inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); - peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); + peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); }; // peer 2 should see that peer 1 has all blocks up to where we stopped feeding them to // it - match peer_2.network.inv_state { - Some(ref inv) => { - inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); - peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); + peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index c313ede598..94f1eb8124 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -1089,22 +1089,16 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .unwrap_or(0); // nothing should break - match peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match other_peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = other_peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1e3fa76cb..8c56b48b0d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -68,20 +68,14 @@ fn test_step_walk_1_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -184,22 +178,16 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; if let Some(s) = peer_1 @@ -306,20 +294,14 @@ fn test_step_walk_1_neighbor_denied() { walk_1_retries = peer_1.network.walk_retries; walk_2_retries = peer_2.network.walk_retries; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -400,20 +382,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { walk_1_retries = peer_1.network.walk_attempts; walk_2_retries = peer_2.network.walk_attempts; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -463,20 +439,14 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -573,23 +543,17 @@ fn test_step_walk_1_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 2 never gets added to peer 1's frontier - assert!(!w.frontier.contains_key(&neighbor_2.addr)); - } - None => {} + // peer 2 never gets added to peer 1's frontier + assert!(!w.frontier.contains_key(&neighbor_2.addr)); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -657,23 +621,17 @@ fn test_step_walk_1_neighbor_behind() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 1 never gets added to peer 2's frontier - assert!(!w.frontier.contains_key(&neighbor_1.addr)); - } - None => {} + // peer 1 never gets added to peer 2's frontier + assert!(!w.frontier.contains_key(&neighbor_1.addr)); }; i += 1; @@ -789,20 +747,14 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -942,20 +894,14 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; steps += 1; @@ -1091,20 +1037,14 @@ fn test_step_walk_2_neighbors_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -1371,28 +1311,19 @@ fn test_step_walk_3_neighbors_inbound() { ); test_debug!("========"); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_3.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_3.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; for (i, peer) in [&peer_1, &peer_2, &peer_3].iter().enumerate() { @@ -1542,20 +1473,14 @@ fn test_step_walk_2_neighbors_rekey() { let _ = peer_1.step(); let _ = peer_2.step(); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; } @@ -1649,20 +1574,14 @@ fn test_step_walk_2_neighbors_different_networks() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index ddf4e92598..8fdbfb846d 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1702,23 +1702,17 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut peer_0_to_1 = false; let mut peer_1_to_0 = false; for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } + if let Some(convo) = peers[0].network.peers.get(event_id) { + if *nk == peer_1_nk { + peer_0_to_1 = true; } - None => {} } } for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } + if let Some(convo) = peers[1].network.peers.get(event_id) { + if *nk == peer_0_nk { + peer_1_to_0 = true; } - None => {} } } @@ -3732,17 +3726,14 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( + if let Err(e) = node.chainstate.will_admit_mempool_tx( &sortdb.index_handle(&tip.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, versioned_contract_len as u64, ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} + panic!("will_admit_mempool_tx {:?}", &e); }; peer.sortdb = Some(sortdb); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e7f1c256a4..922332bedd 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -481,21 +481,18 @@ impl PeerNetwork { if need_block { // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); + if let Some(ref mut downloader) = self.block_downloader { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); - } - self.have_data_to_download = true; + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } - None => {} + self.have_data_to_download = true; } } } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 87031676db..af9a4d98a7 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -32,13 +32,10 @@ pub mod test { let mut done = false; while get_epoch_time_secs() <= deadline { sleep_ms(1000); - match rx.try_recv() { - Ok(success) => { - assert!(success); - done = true; - break; - } - Err(_) => {} + if let Ok(success) = rx.try_recv() { + assert!(success); + done = true; + break; } } From 34e34ef5f5ec489d5a3040bc866c5b49b9c3fc10 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:56:01 -0500 Subject: [PATCH 3/6] chore: Apply Clippy lint `redundant_pattern_matching` again --- stackslib/src/chainstate/stacks/db/mod.rs | 6 +++--- stackslib/src/chainstate/stacks/index/test/marf.rs | 4 ++-- stackslib/src/chainstate/stacks/tests/mod.rs | 4 ++-- stackslib/src/net/chat.rs | 2 +- stackslib/src/net/mod.rs | 2 +- stackslib/src/net/server.rs | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1d7c97b676..5821f47394 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,7 +2746,7 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2863,7 +2863,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2950,7 +2950,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 7102527ba8..a721b2dce4 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,7 +1282,7 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); @@ -1564,7 +1564,7 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - if let Err(_) = fs::metadata(&path) { + if fs::metadata(&path).is_err() { eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); return; }; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index d119dacd8e..29207dce6a 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,7 +338,7 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + if fs::metadata(&chainstate_path(new_test_name)).is_ok() { fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } @@ -1418,7 +1418,7 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 0ce27038cd..8fcc7cdf41 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3106,7 +3106,7 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index a2461631a6..0959d2ff35 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2793,7 +2793,7 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 2459f64c00..05d831ca7a 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -726,7 +726,7 @@ mod test { peer.step().unwrap(); // asked to yield? - if let Ok(_) = http_rx.try_recv() { + if http_rx.try_recv().is_ok() { break; } } From 7e2d60e3f9e6759918e55cb490e60eb9b2e732e8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 12:09:50 -0500 Subject: [PATCH 4/6] chore: Address PR comments from Aaron --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +++++++++------------ stackslib/src/burnchains/tests/mod.rs | 4 ++-- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 2a19074aef..899c96390c 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,34 +264,31 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + s.set_nodelay(true).map_err(|e| { + test_debug!("Failed to set TCP_NODELAY: {e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP read timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP read timeout: {e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP write timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP write timeout: {e:?}"); btc_error::ConnectionError })?; - if let Some(s) = self.runtime.sock.take() { - let _ = s.shutdown(Shutdown::Both); + if let Some(s_old) = self.runtime.sock.replace(s) { + let _ = s_old.shutdown(Shutdown::Both); } - - self.runtime.sock = Some(s); Ok(()) } Err(_e) => { - let s = self.runtime.sock.take(); - if let Some(s) = s { + if let Some(s) = self.runtime.sock.take() { let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2f6d3112b7..c716f9f4e3 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -579,8 +579,8 @@ impl TestBurnchainBlock { pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); - for i in 0..self.txs.len() { - if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + for tx in self.txs.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = tx { assert_eq!(data.block_height, self.block_height); data.consensus_hash = parent_snapshot.consensus_hash.clone(); } From 82313d350276c5bc12e54c049a5421c70d8bc82e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 15:17:55 -0500 Subject: [PATCH 5/6] chore: Apply PR comment from Aaron --- stackslib/src/chainstate/stacks/tests/mod.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 77ebc89ff5..80b1d17a62 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -521,18 +521,14 @@ impl TestStacksNode { fork_tip: &BlockSnapshot, miner: &TestMiner, ) -> Option { - for commit_op in miner.block_commits.iter().rev() { - if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( + miner.block_commits.iter().rev().find_map(|commit_op| { + SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() - { - return Some(sn); - } - } - return None; + }) } pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { From c9cbd23dea29d065d1a05cc542a90f093af851bb Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 20:45:44 -0500 Subject: [PATCH 6/6] fix: Undo `_e` => `e` variable rename --- stackslib/src/burnchains/bitcoin/indexer.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 899c96390c..69ba63c240 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,21 +264,21 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|e| { - test_debug!("Failed to set TCP_NODELAY: {e:?}"); + s.set_nodelay(true).map_err(|_e| { + test_debug!("Failed to set TCP_NODELAY: {_e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP read timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP read timeout: {_e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP write timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP write timeout: {_e:?}"); btc_error::ConnectionError })?;