diff --git a/Cargo.toml b/Cargo.toml index 6af8b4e42..ffb135a98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,14 +35,14 @@ byteorder = "1.3.4" cpu-time = "1.0.0" criterion = "0.3.2" dialoguer = "0.10.0" -fdlimit = "0.2.0" +fdlimit = "0.3" ff = "0.13.0" fil_logger = "0.1.6" generic-array = "0.14.4" gperftools = "0.2" hex = "0.4.2" humansize = "1.1.0" -itertools = "0.10.3" +itertools = "0.13" lazy_static = "1.2" log = "0.4.7" memmap2 = "0.5.6" @@ -58,5 +58,5 @@ serde_json = "1.0" sha2 = "0.10.2" structopt = "0.3.12" tempfile = "3" -thiserror = "1.0.6" +thiserror = "2" typenum = "1.11.2" \ No newline at end of file diff --git a/FUNDING.json b/FUNDING.json index e96118887..9471446da 100644 --- a/FUNDING.json +++ b/FUNDING.json @@ -2,6 +2,9 @@ "drips": { "ethereum": { "ownedBy": "0xDDa061De7284C07B02bf26E12874171eDB95D987" + }, + "filecoin": { + "ownedBy": "0xDDa061De7284C07B02bf26E12874171eDB95D987" } } } diff --git a/build.rs b/build.rs index 522e223b9..4bf21fe81 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,5 @@ +println!("cargo::rustc-check-cfg=cfg(nightly)"); + fn is_compiled_for_64_bit_arch() -> bool { cfg!(target_pointer_width = "64") } diff --git a/fil-proofs-param/Cargo.toml b/fil-proofs-param/Cargo.toml index 22e2387b3..c0560c4da 100644 --- a/fil-proofs-param/Cargo.toml +++ b/fil-proofs-param/Cargo.toml @@ -29,7 +29,7 @@ lazy_static.workspace = true log.workspace = true pbr = "1.0" rand.workspace = true -reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "native-tls-vendored"] } +reqwest = { version = "0.12", default-features = false, features = ["blocking", "native-tls-vendored"] } serde_json.workspace = true structopt.workspace = true tar = "0.4.26" diff --git a/fil-proofs-param/src/bin/paramfetch.rs b/fil-proofs-param/src/bin/paramfetch.rs index e6b447073..3bb3576a4 100644 --- a/fil-proofs-param/src/bin/paramfetch.rs +++ b/fil-proofs-param/src/bin/paramfetch.rs @@ -51,9 +51,8 @@ struct FetchProgress<R> { impl<R: Read> Read for FetchProgress<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { - self.reader.read(buf).map(|n| { - self.progress_bar.add(n as u64); - n + self.reader.read(buf).inspect(|n| { + self.progress_bar.add(*n as u64); }) } } diff --git a/fil-proofs-param/tests/paramfetch/session.rs b/fil-proofs-param/tests/paramfetch/session.rs index 9397a9cd9..aa595dd21 100644 --- a/fil-proofs-param/tests/paramfetch/session.rs +++ b/fil-proofs-param/tests/paramfetch/session.rs @@ -77,7 +77,7 @@ impl ParamFetchSessionBuilder { s.push_str(&wl.join(",")); s }) - .unwrap_or_else(|| "".to_string()); + .unwrap_or_default(); let json_argument = if self.manifest.is_some() { format!("--json={:?}", self.manifest.expect("missing manifest")) diff --git a/fil-proofs-tooling/src/bin/benchy/porep.rs b/fil-proofs-tooling/src/bin/benchy/porep.rs index 6658b5998..697cf178b 100644 --- a/fil-proofs-tooling/src/bin/benchy/porep.rs +++ b/fil-proofs-tooling/src/bin/benchy/porep.rs @@ -94,7 +94,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( OpenOptions::new().read(true).write(true).open(&staged_file_path) } else { info!("*** Creating staged file"); - OpenOptions::new().read(true).write(true).create(true).open(&staged_file_path) + OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&staged_file_path) }?; let sealed_file_path = cache_dir.join(SEALED_FILE); @@ -103,7 +103,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( OpenOptions::new().read(true).write(true).open(&sealed_file_path) } else { info!("*** Creating sealed file"); - OpenOptions::new().read(true).write(true).create(true).open(&sealed_file_path) + OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&sealed_file_path) }?; let sector_size_unpadded_bytes_amount = @@ -120,7 +120,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( .collect(); info!("*** Created piece file"); - let mut piece_file = OpenOptions::new().read(true).write(true).create(true).open(&piece_file_path)?; + let mut piece_file = OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&piece_file_path)?; piece_file.write_all(&piece_bytes)?; piece_file.sync_all()?; piece_file.rewind()?; diff --git a/fil-proofs-tooling/src/bin/benchy/window_post.rs b/fil-proofs-tooling/src/bin/benchy/window_post.rs index cdda6f8f4..59949ef7f 100644 --- a/fil-proofs-tooling/src/bin/benchy/window_post.rs +++ b/fil-proofs-tooling/src/bin/benchy/window_post.rs @@ -101,7 +101,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( OpenOptions::new().read(true).write(true).open(&staged_file_path) } else { info!("*** Creating staged file"); - OpenOptions::new().read(true).write(true).create(true).open(&staged_file_path) + OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&staged_file_path) }?; let sealed_file_path = cache_dir.join(SEALED_FILE); @@ -110,7 +110,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( OpenOptions::new().read(true).write(true).open(&sealed_file_path) } else { info!("*** Creating sealed file"); - OpenOptions::new().read(true).write(true).create(true).open(&sealed_file_path) + OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&sealed_file_path) }?; let sector_size_unpadded_bytes_amount = @@ -128,7 +128,7 @@ fn run_pre_commit_phases<Tree: 'static + MerkleTreeTrait>( .collect(); info!("*** Created piece file"); - let mut piece_file = OpenOptions::new().read(true).write(true).create(true).open(&piece_file_path)?; + let mut piece_file = OpenOptions::new().read(true).write(true).create(true).truncate(true).open(&piece_file_path)?; piece_file.write_all(&piece_bytes)?; piece_file.sync_all()?; piece_file.rewind()?; diff --git a/fil-proofs-tooling/src/bin/benchy/winning_post.rs b/fil-proofs-tooling/src/bin/benchy/winning_post.rs index fd2a713b9..f0dcb92a5 100644 --- a/fil-proofs-tooling/src/bin/benchy/winning_post.rs +++ b/fil-proofs-tooling/src/bin/benchy/winning_post.rs @@ -63,8 +63,8 @@ pub fn run_fallback_post_bench<Tree: 'static + MerkleTreeTrait>( create_replica::<Tree>(sector_size, fake_replica, api_version, api_features); // Store the replica's private and publicly facing info for proving and verifying respectively. - let pub_replica_info = vec![(sector_id, replica_output.public_replica_info.clone())]; - let priv_replica_info = vec![(sector_id, replica_output.private_replica_info.clone())]; + let pub_replica_info = [(sector_id, replica_output.public_replica_info.clone())]; + let priv_replica_info = [(sector_id, replica_output.private_replica_info.clone())]; let post_config = PoStConfig { sector_size: sector_size.into(), diff --git a/fil-proofs-tooling/src/bin/fdlimit/main.rs b/fil-proofs-tooling/src/bin/fdlimit/main.rs index 551b3a288..0e278c54b 100644 --- a/fil-proofs-tooling/src/bin/fdlimit/main.rs +++ b/fil-proofs-tooling/src/bin/fdlimit/main.rs @@ -1,6 +1,15 @@ fn main() { fil_logger::init(); - let res = fdlimit::raise_fd_limit().expect("failed to raise fd limit"); - println!("File descriptor limit was raised to {}", res); + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { from, to }) => { + println!("File descriptor limit was raised from {from} to {to}"); + } + Ok(fdlimit::Outcome::Unsupported) => { + panic!("failed to raise fd limit: unsupported") + } + Err(e) => { + panic!("failed to raise fd limit: {}", e) + } + } } diff --git a/fil-proofs-tooling/src/bin/gpu-cpu-test/main.rs b/fil-proofs-tooling/src/bin/gpu-cpu-test/main.rs index 8d2e63446..0242a43b8 100644 --- a/fil-proofs-tooling/src/bin/gpu-cpu-test/main.rs +++ b/fil-proofs-tooling/src/bin/gpu-cpu-test/main.rs @@ -104,7 +104,7 @@ fn thread_fun( ) -> RunInfo { let timing = Instant::now(); let mut iteration = 0; - while iteration < std::u8::MAX { + while iteration < u8::MAX { info!("iter {}", iteration); // This is the higher priority proof, get it on the GPU even if there is one running diff --git a/fil-proofs-tooling/src/shared.rs b/fil-proofs-tooling/src/shared.rs index 2124f0940..ca6c76aef 100644 --- a/fil-proofs-tooling/src/shared.rs +++ b/fil-proofs-tooling/src/shared.rs @@ -298,7 +298,7 @@ pub fn create_replicas<Tree: 'static + MerkleTreeTrait>( let priv_infos = sealed_files .iter() .zip(seal_pre_commit_outputs.return_value.iter()) - .zip(cache_dirs.into_iter()) + .zip(cache_dirs) .map(|((sealed_file, seal_pre_commit_output), cache_dir)| { PrivateReplicaInfo::new( sealed_file.to_path_buf(), diff --git a/filecoin-proofs/src/api/mod.rs b/filecoin-proofs/src/api/mod.rs index 7c29145a6..0789f46fd 100644 --- a/filecoin-proofs/src/api/mod.rs +++ b/filecoin-proofs/src/api/mod.rs @@ -367,7 +367,7 @@ where /// # Arguments /// /// * `source` - a readable source of unprocessed piece bytes. The piece's commitment will be -/// generated for the bytes read from the source plus any added padding. +/// generated for the bytes read from the source plus any added padding. /// * `piece_size` - the number of unpadded user-bytes which can be read from source before EOF. pub fn generate_piece_commitment<T: Read>( source: T, diff --git a/filecoin-proofs/src/chunk_iter.rs b/filecoin-proofs/src/chunk_iter.rs index bc0038f48..3329fc5f3 100644 --- a/filecoin-proofs/src/chunk_iter.rs +++ b/filecoin-proofs/src/chunk_iter.rs @@ -38,7 +38,7 @@ impl<R: Read> Iterator for ChunkIterator<R> { match self.reader.read_many(&mut buffer) { Ok(bytes_read) if bytes_read == self.chunk_size => Some(Ok(buffer)), // A position of 0 indicates end of file. - Ok(bytes_read) if bytes_read == 0 => None, + Ok(0) => None, Ok(bytes_read) => Some(Ok(buffer[..bytes_read].to_vec())), Err(error) => Some(Err(error)), } diff --git a/filecoin-proofs/src/types/private_replica_info.rs b/filecoin-proofs/src/types/private_replica_info.rs index ea3b5553a..920d9977b 100644 --- a/filecoin-proofs/src/types/private_replica_info.rs +++ b/filecoin-proofs/src/types/private_replica_info.rs @@ -78,7 +78,7 @@ impl<Tree: MerkleTreeTrait> Ord for PrivateReplicaInfo<Tree> { impl<Tree: MerkleTreeTrait> PartialOrd for PrivateReplicaInfo<Tree> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - self.comm_r.as_ref().partial_cmp(other.comm_r.as_ref()) + Some(self.cmp(other)) } } diff --git a/filecoin-proofs/tests/api.rs b/filecoin-proofs/tests/api.rs index 2c8069047..ad9495577 100644 --- a/filecoin-proofs/tests/api.rs +++ b/filecoin-proofs/tests/api.rs @@ -653,7 +653,7 @@ fn test_seal_lifecycle_64gib_porep_id_v1_2_ni_top_8_8_2_api_v1_2() -> Result<()> #[cfg(feature = "big-tests")] #[test] -fn test_seal_lifecycle_upgrade_64gib_top_8_8_2_v1_1() -> Result<()> { +fn test_seal_lifecycle_upgrade_64gib_top_8_8_2_v1_2() -> Result<()> { let porep_config = PoRepConfig::new_groth16( SECTOR_SIZE_64_GIB, ARBITRARY_POREP_ID_V1_2_0, @@ -1454,7 +1454,7 @@ fn winning_post<Tree: 'static + MerkleTreeTrait>( assert_eq!(challenged_sectors.len(), sector_count); assert_eq!(challenged_sectors[0], 0); // with a sector_count of 1, the only valid index is 0 - let pub_replicas = vec![(sector_id, PublicReplicaInfo::new(comm_r)?)]; + let pub_replicas = [(sector_id, PublicReplicaInfo::new(comm_r)?)]; let private_replica_info = PrivateReplicaInfo::new(replica.path().into(), comm_r, cache_dir.path().into())?; @@ -1462,7 +1462,7 @@ fn winning_post<Tree: 'static + MerkleTreeTrait>( // The following methods of proof generation are functionally equivalent: // 1) // - let priv_replicas = vec![(sector_id, private_replica_info.clone())]; + let priv_replicas = [(sector_id, private_replica_info.clone())]; let proof = generate_winning_post::<Tree>(&config, &randomness, &priv_replicas[..], prover_id)?; let valid = @@ -2629,6 +2629,7 @@ fn create_seal_for_upgrade<R: Rng, Tree: 'static + MerkleTreeTrait<Hasher = Tree .read(true) .write(true) .create(true) + .truncate(true) .open(new_sealed_sector_file.path()) .with_context(|| format!("could not open path={:?}", new_sealed_sector_file.path()))?; f_sealed_sector.set_len(new_replica_target_len)?; @@ -2734,6 +2735,7 @@ fn create_seal_for_upgrade<R: Rng, Tree: 'static + MerkleTreeTrait<Hasher = Tree .read(true) .write(true) .create(true) + .truncate(true) .open(decoded_sector_file.path()) .with_context(|| format!("could not open path={:?}", decoded_sector_file.path()))?; f_decoded_sector.set_len(decoded_sector_target_len)?; @@ -2780,6 +2782,7 @@ fn create_seal_for_upgrade<R: Rng, Tree: 'static + MerkleTreeTrait<Hasher = Tree .read(true) .write(true) .create(true) + .truncate(true) .open(remove_encoded_file.path()) .with_context(|| format!("could not open path={:?}", remove_encoded_file.path()))?; f_remove_encoded.set_len(remove_encoded_target_len)?; @@ -2895,6 +2898,7 @@ fn create_seal_for_upgrade_aggregation< .read(true) .write(true) .create(true) + .truncate(true) .open(new_sealed_sector_file.path()) .with_context(|| format!("could not open path={:?}", new_sealed_sector_file.path()))?; f_sealed_sector.set_len(new_replica_target_len)?; diff --git a/filecoin-proofs/tests/pieces.rs b/filecoin-proofs/tests/pieces.rs index 48e5a70ed..8c010a069 100644 --- a/filecoin-proofs/tests/pieces.rs +++ b/filecoin-proofs/tests/pieces.rs @@ -266,14 +266,14 @@ fn test_verify_padded_pieces() { hash(&layer1[10], &layer1[11]), // 4 ]; - let layer3 = vec![ + let layer3 = [ hash(&layer2[0], &layer2[1]), // 8 hash(&layer2[2], &layer2[3]), // 8 layer2[4], // 8 hash(&layer2[5], &layer2[6]), // 8 ]; - let layer4 = vec![ + let layer4 = [ hash(&layer3[0], &layer3[1]), // 16 hash(&layer3[2], &layer3[3]), // 16 ]; diff --git a/fr32/src/convert.rs b/fr32/src/convert.rs index 65288ce3f..f057e2229 100644 --- a/fr32/src/convert.rs +++ b/fr32/src/convert.rs @@ -12,7 +12,7 @@ pub enum Error { /// Invariants: /// - Value of each 32-byte chunks MUST represent valid Frs. /// - Total length must be a multiple of 32. -/// That is to say: each 32-byte chunk taken alone must be a valid Fr32. +/// That is to say: each 32-byte chunk taken alone must be a valid Fr32. pub type Fr32Vec = Vec<u8>; /// Array whose little-endian value represents an Fr. diff --git a/fr32/src/padding.rs b/fr32/src/padding.rs index 21023b1b3..861ec53f8 100644 --- a/fr32/src/padding.rs +++ b/fr32/src/padding.rs @@ -563,6 +563,7 @@ need to handle the potential bit-level misalignments: // offset and num_bytes are based on the unpadded data, so // if [0, 1, ..., 255] was the original unpadded data, offset 3 and len 4 would return // [3, 4, 5, 6]. +#[allow(clippy::multiple_bound_locations)] pub fn write_unpadded<W: ?Sized>( source: &[u8], target: &mut W, @@ -630,6 +631,7 @@ The reader will generally operate with bit precision, even if the padded layout is byte-aligned (no extra bits) the data inside it isn't (since we pad at the bit-level). **/ +#[allow(clippy::multiple_bound_locations)] fn write_unpadded_aux<W: ?Sized>( padding_map: &PaddingMap, source: &[u8], diff --git a/rust-toolchain b/rust-toolchain index 832e9afb6..dbd41264a 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.70.0 +1.81.0 diff --git a/storage-proofs-core/Cargo.toml b/storage-proofs-core/Cargo.toml index f0dfa4058..a8e0ff942 100644 --- a/storage-proofs-core/Cargo.toml +++ b/storage-proofs-core/Cargo.toml @@ -22,7 +22,7 @@ blake2b_simd.workspace = true blstrs.workspace = true byteorder.workspace = true cbc = { version = "0.1.2", features = ["std"] } -config = { version = "0.12.0", default-features = false, features = ["toml"] } +config = { version = "0.14", default-features = false, features = ["toml"] } cpu-time = { workspace = true, optional = true } ff.workspace = true fs2 = "0.4" diff --git a/storage-proofs-core/src/gadgets/insertion.rs b/storage-proofs-core/src/gadgets/insertion.rs index 287fb4f6a..61b75e691 100644 --- a/storage-proofs-core/src/gadgets/insertion.rs +++ b/storage-proofs-core/src/gadgets/insertion.rs @@ -316,6 +316,7 @@ pub fn select<Scalar: PrimeField, CS: ConstraintSystem<Scalar>>( } /// Takes two allocated numbers (`a`, `b`) and returns `a` if the condition is true, and `b` otherwise. +#[allow(clippy::multiple_bound_locations)] pub fn pick<Scalar: PrimeField, CS: ConstraintSystem<Scalar>>( mut cs: CS, condition: &Boolean, diff --git a/storage-proofs-core/src/parameter_cache.rs b/storage-proofs-core/src/parameter_cache.rs index ee8be9650..9a383fd00 100644 --- a/storage-proofs-core/src/parameter_cache.rs +++ b/storage-proofs-core/src/parameter_cache.rs @@ -249,7 +249,7 @@ where let param_identifier = pub_params.identifier(); info!("parameter set identifier for cache: {}", param_identifier); let mut hasher = Sha256::default(); - hasher.update(¶m_identifier.into_bytes()); + hasher.update(param_identifier.into_bytes()); let circuit_hash = hasher.finalize(); format!( "{}-{:02x}", diff --git a/storage-proofs-core/src/test_helper.rs b/storage-proofs-core/src/test_helper.rs index d4b3abbc6..61cba68ef 100644 --- a/storage-proofs-core/src/test_helper.rs +++ b/storage-proofs-core/src/test_helper.rs @@ -9,6 +9,7 @@ pub fn setup_replica(data: &[u8], replica_path: &Path) -> MmapMut { .read(true) .write(true) .create(true) + .truncate(true) .open(replica_path) .expect("Failed to create replica"); f.write_all(data).expect("Failed to write data to replica"); diff --git a/storage-proofs-porep/Cargo.toml b/storage-proofs-porep/Cargo.toml index 5ea6d0818..f67edb7eb 100644 --- a/storage-proofs-porep/Cargo.toml +++ b/storage-proofs-porep/Cargo.toml @@ -84,6 +84,7 @@ multicore-sdr = ["hwloc"] # This feature enables a fixed number of discarded rows for TreeR. The `FIL_PROOFS_ROWS_TO_DISCARD` # setting is ignored, no `TemporaryAux` file will be written. fixed-rows-to-discard = ["storage-proofs-core/fixed-rows-to-discard"] +cpu-profile = [] [[bench]] name = "encode" diff --git a/storage-proofs-porep/build.rs b/storage-proofs-porep/build.rs index dc582b88e..a31ccf97c 100644 --- a/storage-proofs-porep/build.rs +++ b/storage-proofs-porep/build.rs @@ -8,4 +8,6 @@ fn cfg_if_nightly() { } #[rustversion::not(nightly)] -fn cfg_if_nightly() {} +fn cfg_if_nightly() { + println!("cargo::rustc-check-cfg=cfg(nightly)"); +} diff --git a/storage-proofs-porep/src/stacked/vanilla/cache.rs b/storage-proofs-porep/src/stacked/vanilla/cache.rs index 0a32e9465..2fb859167 100644 --- a/storage-proofs-porep/src/stacked/vanilla/cache.rs +++ b/storage-proofs-porep/src/stacked/vanilla/cache.rs @@ -1,4 +1,5 @@ use std::collections::{BTreeMap, HashSet}; +use std::fmt::Write; use std::fs::{remove_file, File}; use std::io; use std::path::{Path, PathBuf}; @@ -250,7 +251,10 @@ impl ParentCache { drop(data); let hash = hasher.finalize(); - digest_hex = hash.iter().map(|x| format!("{:01$x}", x, 2)).collect(); + digest_hex = hash.iter().fold(String::new(), |mut output, x| { + let _ = write!(output, "{:01$x}", x, 2); + output + }); info!( "[open] parent cache: calculated consistency digest: {:?}", @@ -343,7 +347,10 @@ impl ParentCache { let mut hasher = Sha256::new(); hasher.update(&data); let hash = hasher.finalize(); - digest_hex = hash.iter().map(|x| format!("{:01$x}", x, 2)).collect(); + digest_hex = hash.iter().fold(String::new(), |mut output, x| { + let _ = write!(output, "{:01$x}", x, 2); + output + }); info!( "[generate] parent cache: generated consistency digest: {:?}", digest_hex diff --git a/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs b/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs index 8f01287aa..7de45eb1f 100644 --- a/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs +++ b/storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs @@ -2,9 +2,10 @@ use std::convert::TryInto; use std::marker::PhantomData; use std::mem::{self, size_of}; use std::path::Path; +use std::rc::Rc; use std::sync::{ atomic::{AtomicU64, Ordering::SeqCst}, - Arc, MutexGuard, + MutexGuard, }; use std::thread; use std::time::Duration; @@ -208,7 +209,7 @@ fn create_layer_labels( exp_labels: Option<&mut MmapMut>, num_nodes: u64, cur_layer: u32, - core_group: Arc<Option<MutexGuard<'_, Vec<CoreIndex>>>>, + core_group: Rc<Option<MutexGuard<'_, Vec<CoreIndex>>>>, ) { info!("Creating labels for layer {}", cur_layer); // num_producers is the number of producer threads @@ -458,14 +459,14 @@ pub fn create_labels_for_encoding< let default_cache_size = DEGREE * 4 * cache_window_nodes; - let core_group = Arc::new(checkout_core_group()); + let core_group = Rc::new(checkout_core_group()); // When `_cleanup_handle` is dropped, the previous binding of thread will be restored. let _cleanup_handle = (*core_group).as_ref().map(|group| { // This could fail, but we will ignore the error if so. // It will be logged as a warning by `bind_core`. debug!("binding core in main thread"); - group.get(0).map(|core_index| bind_core(*core_index)) + group.first().map(|core_index| bind_core(*core_index)) }); // NOTE: this means we currently keep 2x sector size around, to improve speed @@ -556,14 +557,14 @@ pub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8] let default_cache_size = DEGREE * 4 * cache_window_nodes; - let core_group = Arc::new(checkout_core_group()); + let core_group = Rc::new(checkout_core_group()); // When `_cleanup_handle` is dropped, the previous binding of thread will be restored. let _cleanup_handle = (*core_group).as_ref().map(|group| { // This could fail, but we will ignore the error if so. // It will be logged as a warning by `bind_core`. debug!("binding core in main thread"); - group.get(0).map(|core_index| bind_core(*core_index)) + group.first().map(|core_index| bind_core(*core_index)) }); // NOTE: this means we currently keep 2x sector size around, to improve speed diff --git a/storage-proofs-porep/src/stacked/vanilla/proof.rs b/storage-proofs-porep/src/stacked/vanilla/proof.rs index cd65f0aba..387899917 100644 --- a/storage-proofs-porep/src/stacked/vanilla/proof.rs +++ b/storage-proofs-porep/src/stacked/vanilla/proof.rs @@ -192,12 +192,12 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr t_aux.synth_proofs_path(), partition_count, ) - .map_err(|error| { + .inspect_err(|error| { info!( - "failed to read porep proofs from synthetic proofs file: {:?}", + "failed to read porep proofs from synthetic proofs file: {:?} [{}]", t_aux.synth_proofs_path(), + error, ); - error }) } } @@ -1503,6 +1503,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr ); let mut f = OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(&tree_r_last_path) .expect("failed to open file for tree_r_last"); @@ -1663,10 +1664,13 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr let configs = split_config(tree_c_config.clone(), tree_count)?; match raise_fd_limit() { - Some(res) => { - info!("Building trees [{} descriptors max available]", res); + Ok(fdlimit::Outcome::LimitRaised { from, to }) => { + info!("Building trees [raised file descriptors from {from} to {to}]"); } - None => error!("Failed to raise the fd limit"), + Ok(fdlimit::Outcome::Unsupported) => { + error!("Failed to raise the fd limit: unsupported"); + } + Err(e) => error!("Failed to raise the fd limit: {e}"), }; let tree_c_root = match num_layers { @@ -1912,6 +1916,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr ); let mut f = OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(&tree_r_last_path) .expect("failed to open file for tree_r_last");