Skip to content

Commit

Permalink
Less cloning and pattern simplifications (#3216)
Browse files Browse the repository at this point in the history
* Less cloning and pattern simplifications

* Revert inclusive range and remove unecessary Error:From
  • Loading branch information
quentinlesceller authored Feb 5, 2020
1 parent a41965e commit c4e6971
Show file tree
Hide file tree
Showing 34 changed files with 153 additions and 263 deletions.
14 changes: 7 additions & 7 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ impl Chain {
pow_verifier,
verifier_cache,
archive_mode,
genesis: genesis.header.clone(),
genesis: genesis.header,
};

// DB migrations to be run prior to the chain being used.
Expand Down Expand Up @@ -308,7 +308,7 @@ impl Chain {
// but not yet committed the batch.
// A node shutdown at this point can be catastrophic...
// We prevent this via the stop_lock (see above).
if let Ok(_) = maybe_new_head {
if maybe_new_head.is_ok() {
ctx.batch.commit()?;
}

Expand All @@ -334,7 +334,7 @@ impl Chain {
added: Instant::now(),
};

&self.orphans.add(orphan);
self.orphans.add(orphan);

debug!(
"process_block: orphan: {:?}, # orphans {}{}",
Expand Down Expand Up @@ -364,7 +364,7 @@ impl Chain {
b.header.height,
e
);
Err(ErrorKind::Other(format!("{:?}", e).to_owned()).into())
Err(ErrorKind::Other(format!("{:?}", e)).into())
}
},
}
Expand Down Expand Up @@ -807,7 +807,7 @@ impl Chain {
while let Ok(header) = current {
// break out of the while loop when we find a header common
// between the header chain and the current body chain
if let Ok(_) = self.is_on_current_chain(&header) {
if self.is_on_current_chain(&header).is_ok() {
oldest_height = header.height;
oldest_hash = header.hash();
break;
Expand Down Expand Up @@ -992,7 +992,7 @@ impl Chain {

// Move sandbox to overwrite
txhashset.release_backend_files();
txhashset::txhashset_replace(sandbox_dir.clone(), PathBuf::from(self.db_root.clone()))?;
txhashset::txhashset_replace(sandbox_dir, PathBuf::from(self.db_root.clone()))?;

// Re-open on db root dir
txhashset = txhashset::TxHashSet::open(
Expand Down Expand Up @@ -1434,7 +1434,7 @@ impl Chain {
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
Err(ErrorKind::Other("not on current chain".to_string()).into())
}
}

Expand Down
12 changes: 6 additions & 6 deletions chain/src/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
if !(ctx.pow_verifier)(header).is_ok() {
if (ctx.pow_verifier)(header).is_err() {
error!(
"pipe: error validating header with cuckoo edge_bits {}",
header.pow.edge_bits(),
Expand Down Expand Up @@ -385,7 +385,7 @@ fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error
let prev = ctx.batch.get_previous_header(&block.header)?;
block
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
.map_err(ErrorKind::InvalidBlockProof)?;
Ok(())
}

Expand Down Expand Up @@ -489,7 +489,7 @@ pub fn rewind_and_apply_header_fork(
) -> Result<(), Error> {
let mut fork_hashes = vec![];
let mut current = header.clone();
while current.height > 0 && !ext.is_on_current_chain(&current, batch).is_ok() {
while current.height > 0 && ext.is_on_current_chain(&current, batch).is_err() {
fork_hashes.push(current.hash());
current = batch.get_previous_header(&current)?;
}
Expand Down Expand Up @@ -530,9 +530,9 @@ pub fn rewind_and_apply_fork(
// Rewind the txhashset extension back to common ancestor based on header MMR.
let mut current = batch.head_header()?;
while current.height > 0
&& !header_extension
&& header_extension
.is_on_current_chain(&current, batch)
.is_ok()
.is_err()
{
current = batch.get_previous_header(&current)?;
}
Expand All @@ -552,7 +552,7 @@ pub fn rewind_and_apply_fork(
for h in fork_hashes {
let fb = batch
.get_block(&h)
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
.map_err(|e| ErrorKind::StoreErr(e, "getting forked blocks".to_string()))?;

// Re-verify coinbase maturity along this fork.
verify_coinbase_maturity(&fb, ext, batch)?;
Expand Down
2 changes: 1 addition & 1 deletion chain/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use grin_store as store;
use grin_store::{option_to_not_found, to_key, Error, SerIterator};
use std::sync::Arc;

const STORE_SUBPATH: &'static str = "chain";
const STORE_SUBPATH: &str = "chain";

const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
const BLOCK_PREFIX: u8 = 'b' as u8;
Expand Down
2 changes: 1 addition & 1 deletion chain/src/txhashset/bitmap_accumulator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ impl BitmapAccumulator {
let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1);
let rewind_pos = chunk_pos.saturating_sub(1);
pmmr.rewind(rewind_pos, &Bitmap::create())
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
Ok(())
}

Expand Down
61 changes: 29 additions & 32 deletions chain/src/txhashset/txhashset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;

const TXHASHSET_SUBDIR: &'static str = "txhashset";
const TXHASHSET_SUBDIR: &str = "txhashset";

const OUTPUT_SUBDIR: &'static str = "output";
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel";
const OUTPUT_SUBDIR: &str = "output";
const RANGE_PROOF_SUBDIR: &str = "rangeproof";
const KERNEL_SUBDIR: &str = "kernel";

const TXHASHSET_ZIP: &'static str = "txhashset_snapshot";
const TXHASHSET_ZIP: &str = "txhashset_snapshot";

/// Convenience wrapper around a single prunable MMR backend.
pub struct PMMRHandle<T: PMMRable> {
Expand All @@ -65,9 +65,9 @@ impl<T: PMMRable> PMMRHandle<T> {
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(root_dir).join(sub_dir).join(file_name);
fs::create_dir_all(path.clone())?;
let path_str = path.to_str().ok_or(Error::from(ErrorKind::Other(
"invalid file path".to_owned(),
)))?;
let path_str = path
.to_str()
.ok_or_else(|| ErrorKind::Other("invalid file path".to_owned()))?;
let backend = PMMRBackend::new(path_str.to_string(), prunable, version, header)?;
let last_pos = backend.unpruned_size();
Ok(PMMRHandle { backend, last_pos })
Expand All @@ -82,22 +82,22 @@ impl PMMRHandle<BlockHeader> {
if let Some(entry) = header_pmmr.get_data(pos) {
Ok(entry.hash())
} else {
Err(ErrorKind::Other(format!("get header hash by height")).into())
Err(ErrorKind::Other("get header hash by height".to_string()).into())
}
}

/// Get the header hash for the head of the header chain based on current MMR state.
/// Find the last leaf pos based on MMR size and return its header hash.
pub fn head_hash(&self) -> Result<Hash, Error> {
if self.last_pos == 0 {
return Err(ErrorKind::Other(format!("MMR empty, no head")).into());
return Err(ErrorKind::Other("MMR empty, no head".to_string()).into());
}
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos);
let leaf_pos = pmmr::bintree_rightmost(self.last_pos);
if let Some(entry) = header_pmmr.get_data(leaf_pos) {
Ok(entry.hash())
} else {
Err(ErrorKind::Other(format!("failed to find head hash")).into())
Err(ErrorKind::Other("failed to find head hash".to_string()).into())
}
}
}
Expand Down Expand Up @@ -200,7 +200,7 @@ impl TxHashSet {
commit_index,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("failed to open kernel PMMR")).into())
Err(ErrorKind::TxHashSetErr("failed to open kernel PMMR".to_string()).into())
}
}

Expand Down Expand Up @@ -236,14 +236,14 @@ impl TxHashSet {
height: block_height,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("txhashset hash mismatch")).into())
Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into())
}
} else {
Err(ErrorKind::OutputNotFound.into())
}
}
Err(grin_store::Error::NotFoundErr(_)) => Err(ErrorKind::OutputNotFound.into()),
Err(e) => Err(ErrorKind::StoreErr(e, format!("txhashset unspent check")).into()),
Err(e) => Err(ErrorKind::StoreErr(e, "txhashset unspent check".to_string()).into()),
}
}

Expand Down Expand Up @@ -739,7 +739,7 @@ impl<'a> HeaderExtension<'a> {
if let Some(hash) = self.get_header_hash(pos) {
Ok(batch.get_block_header(&hash)?)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
Err(ErrorKind::Other("get header by height".to_string()).into())
}
}

Expand All @@ -751,13 +751,13 @@ impl<'a> HeaderExtension<'a> {
batch: &Batch<'_>,
) -> Result<(), Error> {
if header.height > self.head.height {
return Err(ErrorKind::Other(format!("not on current chain, out beyond")).into());
return Err(ErrorKind::Other("not on current chain, out beyond".to_string()).into());
}
let chain_header = self.get_header_by_height(header.height, batch)?;
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
Err(ErrorKind::Other("not on current chain".to_string()).into())
}
}

Expand Down Expand Up @@ -966,7 +966,7 @@ impl<'a> Extension<'a> {
if let Some(hash) = self.output_pmmr.get_hash(pos) {
if hash != input.hash_with_index(pos - 1) {
return Err(
ErrorKind::TxHashSetErr(format!("output pmmr hash mismatch")).into(),
ErrorKind::TxHashSetErr("output pmmr hash mismatch".to_string()).into(),
);
}
}
Expand All @@ -978,7 +978,7 @@ impl<'a> Extension<'a> {
Ok(true) => {
self.rproof_pmmr
.prune(pos)
.map_err(|e| ErrorKind::TxHashSetErr(e))?;
.map_err(ErrorKind::TxHashSetErr)?;
Ok(pos)
}
Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
Expand Down Expand Up @@ -1016,13 +1016,13 @@ impl<'a> Extension<'a> {
{
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different sizes")).into(),
ErrorKind::Other("output vs rproof MMRs different sizes".to_string()).into(),
);
}

if output_pos != rproof_pos {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different pos")).into(),
ErrorKind::Other("output vs rproof MMRs different pos".to_string()).into(),
);
}
}
Expand Down Expand Up @@ -1067,10 +1067,10 @@ impl<'a> Extension<'a> {
let header = batch.get_block_header(&self.head.last_block_h)?;
self.output_pmmr
.snapshot(&header)
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
self.rproof_pmmr
.snapshot(&header)
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
Ok(())
}

Expand Down Expand Up @@ -1244,7 +1244,7 @@ impl<'a> Extension<'a> {

if self.head.height == 0 {
let zero_commit = secp_static::commit_to_zero_value();
return Ok((zero_commit.clone(), zero_commit.clone()));
return Ok((zero_commit, zero_commit));
}

// The real magicking happens here. Sum of kernel excesses should equal
Expand Down Expand Up @@ -1312,7 +1312,7 @@ impl<'a> Extension<'a> {
let kernel = self
.kernel_pmmr
.get_data(n)
.ok_or::<Error>(ErrorKind::TxKernelNotFound.into())?;
.ok_or_else(|| ErrorKind::TxKernelNotFound)?;
tx_kernels.push(kernel);
}

Expand Down Expand Up @@ -1379,7 +1379,7 @@ impl<'a> Extension<'a> {
}

// remaining part which not full of 1000 range proofs
if proofs.len() > 0 {
if !proofs.is_empty() {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
Expand Down Expand Up @@ -1509,7 +1509,7 @@ pub fn zip_write(
header: &BlockHeader,
) -> Result<(), Error> {
debug!("zip_write on path: {:?}", root_dir);
let txhashset_path = root_dir.clone().join(TXHASHSET_SUBDIR);
let txhashset_path = root_dir.join(TXHASHSET_SUBDIR);
fs::create_dir_all(&txhashset_path)?;

// Explicit list of files to extract from our zip archive.
Expand All @@ -1531,12 +1531,9 @@ pub fn txhashset_replace(from: PathBuf, to: PathBuf) -> Result<(), Error> {
clean_txhashset_folder(&to);

// rename the 'from' folder as the 'to' folder
if let Err(e) = fs::rename(
from.clone().join(TXHASHSET_SUBDIR),
to.clone().join(TXHASHSET_SUBDIR),
) {
if let Err(e) = fs::rename(from.join(TXHASHSET_SUBDIR), to.join(TXHASHSET_SUBDIR)) {
error!("hashset_replace fail on {}. err: {}", TXHASHSET_SUBDIR, e);
Err(ErrorKind::TxHashSetErr(format!("txhashset replacing fail")).into())
Err(ErrorKind::TxHashSetErr("txhashset replacing fail".to_string()).into())
} else {
Ok(())
}
Expand Down
4 changes: 2 additions & 2 deletions chain/src/txhashset/utxo_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ impl<'a> UTXOView<'a> {

// Find the "cutoff" pos in the output MMR based on the
// header from 1,000 blocks ago.
let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0);
let cutoff_height = height.saturating_sub(global::coinbase_maturity());
let cutoff_header = self.get_header_by_height(cutoff_height, batch)?;
let cutoff_pos = cutoff_header.output_mmr_size;

Expand Down Expand Up @@ -168,7 +168,7 @@ impl<'a> UTXOView<'a> {
let header = batch.get_block_header(&hash)?;
Ok(header)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
Err(ErrorKind::Other("get header by height".to_string()).into())
}
}
}
2 changes: 1 addition & 1 deletion core/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ pub fn header_version(height: u64) -> HeaderVersion {
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
return height < 3 * HARD_FORK_INTERVAL && version == header_version(height);
height < 3 * HARD_FORK_INTERVAL && version == header_version(height)
}

/// Number of blocks used to calculate difficulty adjustments
Expand Down
2 changes: 1 addition & 1 deletion core/src/core/id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ impl<H: Hashed> ShortIdentifiable for H {
}

/// Short id for identifying inputs/outputs/kernels
#[derive(Clone, Serialize, Deserialize, Hash)]
#[derive(Clone, Serialize, Deserialize)]
pub struct ShortId([u8; 6]);

impl DefaultHashable for ShortId {}
Expand Down
2 changes: 1 addition & 1 deletion core/src/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ where
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderInfo::from_ts_diff(last_ts, last_diff.clone()));
last_n.push(HeaderInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
Expand Down
2 changes: 1 addition & 1 deletion core/src/libtx/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ where

// Store the kernel offset (k2) on the tx.
// Commitments will sum correctly when accounting for the offset.
tx.offset = k2.clone();
tx.offset = k2;

// Set the kernel on the tx.
let tx = tx.replace_kernel(kern);
Expand Down
Loading

0 comments on commit c4e6971

Please sign in to comment.