Skip to content

Commit

Permalink
Merge branch 'main' into obj-piece-list
Browse files Browse the repository at this point in the history
  • Loading branch information
teor2345 authored Nov 26, 2024
2 parents baff7b7 + 49d9638 commit 81101f9
Show file tree
Hide file tree
Showing 34 changed files with 657 additions and 524 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/domain-genesis-storage-snapshot-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ jobs:
packages: write

steps:
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1

- name: Build node image
id: build
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0
Expand Down
6 changes: 0 additions & 6 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,7 @@ on:
push:
branches:
- main
paths-ignore:
- "**.md"
- ".github/CODEOWNERS"
pull_request:
paths-ignore:
- "**.md"
- ".github/CODEOWNERS"
workflow_dispatch:
merge_group:

Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/snapshot-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ jobs:
- image: node
base-artifact: subspace-node
upload-executables: true
- image: gateway
base-artifact: subspace-gateway
upload-executables: false
- image: bootstrap-node
base-artifact: subspace-bootstrap-node
upload-executables: false
Expand Down Expand Up @@ -104,7 +107,6 @@ jobs:
cd executables
IMAGE="${{ fromJSON(steps.meta.outputs.json).tags[0] }}"
ARTIFACT="${{ matrix.build.base-artifact }}"
docker run --rm --platform linux/amd64 --entrypoint /bin/cat $IMAGE /$ARTIFACT > $ARTIFACT-ubuntu-x86_64-skylake-${{ github.ref_name }}
# TODO: Pull is a workaround for https://github.com/moby/moby/issues/48197#issuecomment-2472265028
docker pull --platform linux/amd64/v2 $IMAGE
Expand Down
43 changes: 15 additions & 28 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 15 additions & 4 deletions crates/sc-consensus-subspace/src/archiver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -630,18 +630,29 @@ where
best_block_to_archive = best_block_number;
}

// If the user chooses an object mapping start block we don't have the data for, we can't
// If the user chooses an object mapping start block we don't have data or state for, we can't
// create mappings for it, so the node must exit with an error.
let best_block_to_archive_hash = client
.hash(best_block_to_archive.into())?
.expect("just checked above; qed");
if client.block(best_block_to_archive_hash)?.is_none() {
let Some(best_block_data) = client.block(best_block_to_archive_hash)? else {
let error = format!(
"Missing data for mapping block {best_block_to_archive} hash {best_block_to_archive_hash},\
"Missing data for mapping block {best_block_to_archive} hash {best_block_to_archive_hash}, \
try a higher block number, or wipe your node and restart with `--sync full`"
);
return Err(sp_blockchain::Error::Application(error.into()));
}
};

// Similarly, state can be pruned, even if the data is present.
client
.runtime_api()
.extract_block_object_mapping(*best_block_data.block.header().parent_hash(), best_block_data.block.clone())
.map_err(|error| {
sp_blockchain::Error::Application(
format!("Missing state for mapping block {best_block_to_archive} hash {best_block_to_archive_hash}: {error}, \
try a higher block number, or wipe your node and restart with `--sync full`").into(),
)
})?;

let maybe_last_archived_block = find_last_archived_block(
client,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::commands::cluster::farmer::FARMER_IDENTIFICATION_BROADCAST_INTERVAL;
use crate::commands::shared::derive_libp2p_keypair;
use crate::commands::shared::network::{configure_network, NetworkArgs};
use anyhow::anyhow;
use async_lock::RwLock as AsyncRwLock;
use async_lock::{RwLock as AsyncRwLock, Semaphore};
use backoff::ExponentialBackoff;
use clap::{Parser, ValueHint};
use futures::stream::FuturesUnordered;
Expand Down Expand Up @@ -38,6 +38,8 @@ const PIECE_GETTER_MAX_RETRIES: u16 = 7;
const GET_PIECE_INITIAL_INTERVAL: Duration = Duration::from_secs(5);
/// Defines max duration between get_piece calls.
const GET_PIECE_MAX_INTERVAL: Duration = Duration::from_secs(40);
/// Multiplier on top of outgoing connections number for piece downloading purposes
const PIECE_PROVIDER_MULTIPLIER: usize = 10;

/// Arguments for controller
#[derive(Debug, Parser)]
Expand Down Expand Up @@ -137,6 +139,7 @@ pub(super) async fn controller(
.await
.map_err(|error| anyhow!("Failed to create caching proxy node client: {error}"))?;

let out_connections = network_args.out_connections;
let (node, mut node_runner) = {
if network_args.bootstrap_nodes.is_empty() {
network_args
Expand All @@ -161,6 +164,7 @@ pub(super) async fn controller(
let piece_provider = PieceProvider::new(
node.clone(),
SegmentCommitmentPieceValidator::new(node.clone(), node_client.clone(), kzg.clone()),
Semaphore::new(out_connections as usize * PIECE_PROVIDER_MULTIPLIER),
);

let piece_getter = FarmerPieceGetter::new(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
use crate::commands::shared::DiskFarm;
use anyhow::anyhow;
use async_lock::Mutex as AsyncMutex;
use async_lock::{Mutex as AsyncMutex, Semaphore};
use backoff::ExponentialBackoff;
use bytesize::ByteSize;
use clap::Parser;
Expand Down Expand Up @@ -36,7 +36,7 @@ use subspace_farmer::utils::{
use subspace_farmer_components::reading::ReadSectorRecordChunksMode;
use subspace_kzg::Kzg;
use subspace_proof_of_space::Table;
use tokio::sync::{Barrier, Semaphore};
use tokio::sync::Barrier;
use tracing::{error, info, info_span, warn, Instrument};

const FARM_ERROR_PRINT_INTERVAL: Duration = Duration::from_secs(30);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::commands::shared::PlottingThreadPriority;
use anyhow::anyhow;
use async_lock::Mutex as AsyncMutex;
use async_lock::{Mutex as AsyncMutex, Semaphore};
use clap::Parser;
use prometheus_client::registry::Registry;
use std::future::Future;
Expand Down Expand Up @@ -28,7 +28,6 @@ use subspace_farmer::utils::{
use subspace_farmer_components::PieceGetter;
use subspace_kzg::Kzg;
use subspace_proof_of_space::Table;
use tokio::sync::Semaphore;
use tracing::info;

const PLOTTING_RETRY_INTERVAL: Duration = Duration::from_secs(5);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::commands::shared::network::{configure_network, NetworkArgs};
use crate::commands::shared::{derive_libp2p_keypair, DiskFarm, PlottingThreadPriority};
use crate::utils::shutdown_signal;
use anyhow::anyhow;
use async_lock::{Mutex as AsyncMutex, RwLock as AsyncRwLock};
use async_lock::{Mutex as AsyncMutex, RwLock as AsyncRwLock, Semaphore};
use backoff::ExponentialBackoff;
use bytesize::ByteSize;
use clap::{Parser, ValueHint};
Expand Down Expand Up @@ -54,7 +54,7 @@ use subspace_kzg::Kzg;
use subspace_metrics::{start_prometheus_metrics_server, RegistryAdapter};
use subspace_networking::utils::piece_provider::PieceProvider;
use subspace_proof_of_space::Table;
use tokio::sync::{Barrier, Semaphore};
use tokio::sync::Barrier;
use tracing::{error, info, info_span, warn, Instrument};

/// Get piece retry attempts number.
Expand All @@ -68,6 +68,8 @@ const GET_PIECE_MAX_INTERVAL: Duration = Duration::from_secs(40);
const MAX_SPACE_PLEDGED_FOR_PLOT_CACHE_ON_WINDOWS: u64 = 7 * 1024 * 1024 * 1024 * 1024;
const FARM_ERROR_PRINT_INTERVAL: Duration = Duration::from_secs(30);
const PLOTTING_RETRY_INTERVAL: Duration = Duration::from_secs(5);
/// Multiplier on top of outgoing connections number for piece downloading purposes
const PIECE_PROVIDER_MULTIPLIER: usize = 10;

type FarmIndex = u8;

Expand Down Expand Up @@ -431,6 +433,7 @@ where
.await
.map_err(|error| anyhow!("Failed to create caching proxy node client: {error}"))?;

let out_connections = network_args.out_connections;
let (node, mut node_runner) = {
if network_args.bootstrap_nodes.is_empty() {
network_args
Expand Down Expand Up @@ -460,6 +463,7 @@ where
let piece_provider = PieceProvider::new(
node.clone(),
SegmentCommitmentPieceValidator::new(node.clone(), node_client.clone(), kzg.clone()),
Semaphore::new(out_connections as usize * PIECE_PROVIDER_MULTIPLIER),
);

let piece_getter = FarmerPieceGetter::new(
Expand Down
2 changes: 1 addition & 1 deletion crates/subspace-farmer/src/farmer_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ where
downloading_pieces_stream
// This allows to schedule new batch while previous batches partially completed, but
// avoids excessive memory usage like when all futures are created upfront
.buffer_unordered(SYNC_CONCURRENT_BATCHES * 2)
.buffer_unordered(SYNC_CONCURRENT_BATCHES * 10)
// Simply drain everything
.for_each(|()| async {})
.await;
Expand Down
Loading

0 comments on commit 81101f9

Please sign in to comment.