Skip to content

Commit

Permalink
spellcheck: fix issues in tools (#12854)
Browse files Browse the repository at this point in the history
Fix all spellchecker errors in `neacore/tools`.
  • Loading branch information
Trisfald authored Jan 31, 2025
1 parent 72999fa commit 33b4907
Show file tree
Hide file tree
Showing 30 changed files with 112 additions and 83 deletions.
23 changes: 23 additions & 0 deletions cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"addrs",
"adversenet",
"AFAIU",
"AFAICT",
"akhi",
"Aleksandr",
"allocs",
Expand All @@ -29,6 +30,7 @@
"archivals",
"autofail",
"backpressure",
"backtest",
"Banasik",
"BASEPOINT",
"behaviour",
Expand All @@ -43,13 +45,15 @@
"bufbuild",
"bytesize",
"calimero",
"camino",
"chacha",
"Checkpointing",
"chrono",
"Chudas",
"Clippy",
"cloneable",
"clusterfuzz",
"cmdline",
"codegen",
"colddb",
"Condvar",
Expand All @@ -72,6 +76,7 @@
"demuxed",
"Deque",
"desynchronized",
"devnet",
"DEVNOTE",
"Diop",
"disktrie",
Expand All @@ -83,6 +88,8 @@
"dontcare",
"doomslug",
"doomslugs",
"ellipsifies",
"ellipsify",
"fastforward",
"finalizable",
"flamegraph",
Expand All @@ -94,6 +101,7 @@
"Gbit",
"gced",
"Ggas",
"Giga",
"gprusak",
"hashlib",
"icount",
Expand All @@ -106,15 +114,18 @@
"itoa",
"jakmeier",
"jbajic",
"journalctl",
"Justfile",
"kaiching",
"Kbps",
"keccak",
"keypair",
"keypairs",
"Kgas",
"kickout",
"Kickouts",
"kkuuue",
"kmerge",
"libc",
"libcore",
"libfuzzer",
Expand Down Expand Up @@ -145,6 +156,7 @@
"Millinear",
"millis",
"mixeddb",
"Mgas",
"moar",
"mocknet",
"multiexp",
Expand Down Expand Up @@ -178,6 +190,7 @@
"ords",
"pagodaplatform",
"pathlib",
"patternfly",
"peekable",
"peermanager",
"perc",
Expand All @@ -189,6 +202,7 @@
"pkill",
"pmap",
"postprocesses",
"posvyatokum",
"Prefetcher",
"prefetchers",
"prefunded",
Expand All @@ -202,6 +216,7 @@
"PYTHONPATH",
"QUIC",
"RAII",
"RCPTS",
"readwrite",
"rebalance",
"rebalances",
Expand All @@ -212,6 +227,7 @@
"refcounting",
"reindexing",
"replayability",
"replayable",
"replaydb",
"repr",
"reqwest",
Expand All @@ -225,6 +241,7 @@
"restake",
"restaked",
"restaker",
"restakes",
"Restaking",
"retryable",
"ripemd",
Expand Down Expand Up @@ -297,6 +314,7 @@
"unittests",
"unlabel",
"unorphaned",
"unrequested",
"unstake",
"unstaked",
"unstakes",
Expand All @@ -316,14 +334,19 @@
"wasmer",
"wasms",
"Wasmtime",
"webfonts",
"webrtc",
"wmem",
"Xarrow",
"xarrows",
"xoraddr",
"xorshift",
"Xwrapper",
"yansi",
"yapf",
"yocto",
"yoctonear",
"zadd",
"zstd",
"Zulip",
"deser"
Expand Down
2 changes: 2 additions & 0 deletions tools/amend-genesis/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,7 @@ mod test {
let genesis_config = GenesisConfig {
protocol_version: PROTOCOL_VERSION,
genesis_time: from_timestamp(Clock::real().now_utc().unix_timestamp_nanos() as u64),
// cspell:words rusttestnet
chain_id: "rusttestnet".to_string(),
genesis_height: 0,
num_block_producer_seats: near_chain_configs::NUM_BLOCK_PRODUCER_SEATS,
Expand Down Expand Up @@ -694,6 +695,7 @@ mod test {
}
}

// cspell:words SQDK Tsena Hvcnutu
static TEST_CASES: &[TestCase] = &[
// first one adds one validator (foo2), bumps up another's balance (foo0), and adds an extra account (extra-account.near)
TestCase {
Expand Down
2 changes: 1 addition & 1 deletion tools/cold-store/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ for archival storage.
Run that binary for at least `gc_num_epochs_to_keep`.
Epochs are larger on testnet/mainnet, so just give it a few days.
You can use `sudo systemctl start neard` to run `/home/ubuntu/neard`
and `jornalctl -u neard` to check logs.
and `journalctl -u neard` to check logs.
Be careful with what binary is at `/home/ubuntu/neard`.
**TODO** some kind of system to maintain a bunch of local binaries.

Expand Down
4 changes: 2 additions & 2 deletions tools/cold-store/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ enum SubCommand {
/// You can provide maximum depth and/or maximum number of vertices to traverse for each root.
/// Trie is traversed using DFS with randomly shuffled kids for every node.
CheckStateRoot(CheckStateRootCmd),
/// Modifies cold db from config to be considered not initialised.
/// Modifies cold db from config to be considered not initialized.
/// Doesn't actually delete any data, except for HEAD and COLD_HEAD in BlockMisc.
ResetCold(ResetColdCmd),
}
Expand Down Expand Up @@ -361,7 +361,7 @@ impl PrepareHotCmd {

// TODO may be worth doing some simple sanity check that the rpc store
// and the cold store contain the same chain. Keep in mind that the
// responsibility of ensuring that the rpc backupd can be trusted lies
// responsibility of ensuring that the rpc backup can be trusted lies
// with the node owner still. We don't want to do a full check here
// as it would take too long.

Expand Down
2 changes: 1 addition & 1 deletion tools/congestion-model/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ fn run_model(
let mut max_queues = ShardQueueLengths::default();

// Set the start time to a half hour ago to make it visible by default in
// grafana. Each round is 1 virtual second so hald an hour is good for
// grafana. Each round is 1 virtual second so half an hour is good for
// looking at a maximum of 1800 rounds, beyond that you'll need to customize
// the grafana time range.
let start_time = Utc::now() - Duration::from_secs(1 * 60 * 60);
Expand Down
2 changes: 1 addition & 1 deletion tools/congestion-model/src/strategy/global_tx_stop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl crate::CongestionStrategy for GlobalTxStopShard {
}
}

// stop accepting transacions when any shard is congested
// stop accepting transactions when any shard is congested
if !any_shard_congested {
while ctx.gas_burnt() < TX_GAS_LIMIT {
if let Some(tx) = ctx.incoming_transactions().pop_front() {
Expand Down
8 changes: 4 additions & 4 deletions tools/database/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,16 @@

A set of tools useful when working with the underlying database.

## Analyse data size distribution
## Analyze data size distribution

The analyse database script provides an efficient way to assess the size distribution
The analyze database script provides an efficient way to assess the size distribution
of keys and values within RocksDB.

### Usage

To run the script, use the following example:
```bash
cargo run --bin neard -- --home /home/ubuntu/.near database analyse-data-size-distribution --column State --top_k 50
cargo run --bin neard -- --home /home/ubuntu/.near database analyze-data-size-distribution --column State --top_k 50
```
The arguments are as follows:

Expand Down Expand Up @@ -78,7 +78,7 @@ cargo run --bin neard -- database compact-database

Makes a copy of a DB (hot store only) at a specified location. If the
destination is within the same filesystem, the copy will be made instantly and
take no additional disk space due to hardlinking all the files.
take no additional disk space due to hard-linking all the files.

Example usage:
```bash
Expand Down
1 change: 1 addition & 0 deletions tools/database/src/analyze_contract_sizes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use std::sync::Arc;
pub(crate) struct AnalyzeContractSizesCommand {
/// Show top N contracts by size.
#[arg(short, long, default_value_t = 50)]
// cspell:words topn
topn: usize,

/// Compress contract code before calculating size.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ use strum::IntoEnumIterator;
use crate::utils::{open_rocksdb, resolve_column};

#[derive(Parser)]
pub(crate) struct AnalyseDataSizeDistributionCommand {
/// If specified only this column will be analysed
pub(crate) struct AnalyzeDataSizeDistributionCommand {
/// If specified only this column will be analyzed
#[arg(short, long)]
column: Option<String>,

Expand Down Expand Up @@ -189,7 +189,7 @@ fn get_column_families(input_col: &Option<String>) -> anyhow::Result<Vec<DBCol>>
}
}

impl AnalyseDataSizeDistributionCommand {
impl AnalyzeDataSizeDistributionCommand {
pub(crate) fn run(&self, home: &PathBuf) -> anyhow::Result<()> {
let db = open_rocksdb(home, near_store::Mode::ReadOnly)?;
let column_families = get_column_families(&self.column)?;
Expand Down
24 changes: 12 additions & 12 deletions tools/database/src/analyze_delayed_receipt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ use nearcore::open_storage;
/// Analyze delayed receipts in a piece of history of the blockchain to understand congestion of each shard
#[derive(Parser)]
pub(crate) struct AnalyzeDelayedReceiptCommand {
/// Analyse the last N blocks in the blockchain
/// Analyze the last N blocks in the blockchain
#[arg(long)]
last_blocks: Option<u64>,

/// Analyse blocks from the given block height, inclusive
/// Analyze blocks from the given block height, inclusive
#[arg(long)]
from_block_height: Option<BlockHeight>,

/// Analyse blocks up to the given block height, inclusive
/// Analyze blocks up to the given block height, inclusive
#[arg(long)]
to_block_height: Option<BlockHeight>,
}
Expand Down Expand Up @@ -65,7 +65,7 @@ impl AnalyzeDelayedReceiptCommand {
FlatStorageManager::new(store.flat_store()),
StateSnapshotConfig::default(),
);
// Create an iterator over the blocks that should be analysed
// Create an iterator over the blocks that should be analyzed
let blocks_iter_opt = make_block_iterator_from_command_args(
CommandArgs {
last_blocks: self.last_blocks,
Expand All @@ -84,16 +84,16 @@ impl AnalyzeDelayedReceiptCommand {
};

let mut blocks_count: usize = 0;
let mut first_analysed_block: Option<(BlockHeight, CryptoHash)> = None;
let mut last_analysed_block: Option<(BlockHeight, CryptoHash)> = None;
let mut first_analyzed_block: Option<(BlockHeight, CryptoHash)> = None;
let mut last_analyzed_block: Option<(BlockHeight, CryptoHash)> = None;
let mut shard_id_to_congested = HashMap::new();

for block in blocks_iter {
blocks_count += 1;
if first_analysed_block.is_none() {
first_analysed_block = Some((block.header().height(), *block.hash()));
if first_analyzed_block.is_none() {
first_analyzed_block = Some((block.header().height(), *block.hash()));
}
last_analysed_block = Some((block.header().height(), *block.hash()));
last_analyzed_block = Some((block.header().height(), *block.hash()));
let shard_layout = epoch_manager.get_shard_layout(block.header().epoch_id()).unwrap();

for chunk_header in block.chunks().iter_deprecated() {
Expand All @@ -115,11 +115,11 @@ impl AnalyzeDelayedReceiptCommand {
}
}

println!("Analysed {} blocks between:", blocks_count);
if let Some((block_height, block_hash)) = first_analysed_block {
println!("Analyzed {} blocks between:", blocks_count);
if let Some((block_height, block_hash)) = first_analyzed_block {
println!("Block: height = {block_height}, hash = {block_hash}");
}
if let Some((block_height, block_hash)) = last_analysed_block {
if let Some((block_height, block_hash)) = last_analyzed_block {
println!("Block: height = {block_height}, hash = {block_hash}");
}

Expand Down
Loading

0 comments on commit 33b4907

Please sign in to comment.