From 26906b68436a462cddd4e8500d532916bd87bedd Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 2 Aug 2023 22:38:29 -0400 Subject: [PATCH 01/55] Initial benchmark skeleton --- Cargo.lock | 7 + benches/Cargo.toml | 14 +- benches/benches/import.rs | 314 +++++++++++++++++++++++++++++ crates/services/sync/Cargo.toml | 4 + crates/services/sync/src/import.rs | 14 +- crates/services/sync/src/lib.rs | 2 +- crates/services/sync/src/ports.rs | 6 +- 7 files changed, 349 insertions(+), 12 deletions(-) create mode 100644 benches/benches/import.rs diff --git a/Cargo.lock b/Cargo.lock index 551ac676807..674f2e2f285 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1457,6 +1457,7 @@ dependencies = [ "ciborium", "clap 3.2.25", "criterion-plot", + "futures", "itertools", "lazy_static", "num-traits", @@ -1468,6 +1469,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -2663,18 +2665,23 @@ dependencies = [ name = "fuel-core-benches" version = "0.0.0" dependencies = [ + "anyhow", + "async-trait", "clap 4.3.19", "criterion", "ctrlc", "ethnum", "fuel-core", + "fuel-core-services", "fuel-core-storage", + "fuel-core-sync", "fuel-core-types", "rand 0.8.5", "serde", "serde_json", "serde_yaml", "tikv-jemallocator", + "tokio", ] [[package]] diff --git a/benches/Cargo.toml b/benches/Cargo.toml index f783ee896a9..587536783f7 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -6,18 +6,23 @@ publish = false version = "0.0.0" [dependencies] +anyhow = { worksspace = true } +async-trait = { workspace = true } clap = { workspace = true, features = ["derive"] } -criterion = { version = "0.4", features = ["html_reports"] } +criterion = { version = "0.4", features = ["async_tokio", "html_reports"] } ctrlc = "3.2.3" ethnum = "1.3" fuel-core = { path = "../crates/fuel-core", default-features = false, features = ["metrics", "rocksdb-production"] } +fuel-core-services = { path = "./../crates/services" } fuel-core-storage = { path = "./../crates/storage" } +fuel-core-sync = { path = "./../crates/services/sync", features = ["benchmarking"]} fuel-core-types = { path = "./../crates/types", features = ["test-helpers"] } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } serde_yaml = "0.9.13" tikv-jemallocator = { workspace = true } +tokio = { workspace = true } [[bench]] harness = false @@ -26,3 +31,10 @@ name = "state" [[bench]] harness = false name = "vm" + +[[bench]] +harness = false +name = "import" + +[features] +default = ["fuel-core/rocksdb"] diff --git a/benches/benches/import.rs b/benches/benches/import.rs new file mode 100644 index 00000000000..4890f6807c6 --- /dev/null +++ b/benches/benches/import.rs @@ -0,0 +1,314 @@ +use criterion::{ + async_executor::AsyncExecutor, + criterion_group, + criterion_main, + measurement::WallTime, + BenchmarkGroup, + Criterion, +}; +use fuel_core_services::{ + stream::BoxStream, + SharedMutex, + StateWatcher, +}; +use tokio::runtime::Runtime; + +use fuel_core_storage::InterpreterStorage; +use fuel_core_sync::{ + import::{ + Config, + Import, + }, + ports::{ + BlockImporterPort, + ConsensusPort, + MockBlockImporterPort, + MockConsensusPort, + MockPeerToPeerPort, + PeerToPeerPort, + }, + state::State, +}; +use fuel_core_types::{ + blockchain::{ + consensus::{ + Consensus, + Sealed, + }, + header::{ + BlockHeader, + GeneratedConsensusFields, + }, + primitives::{ + BlockHeight, + BlockId, + DaBlockHeight, + }, + SealedBlock, + SealedBlockHeader, + }, + fuel_tx::{ + Bytes32, + Transaction, + }, + fuel_types::ContractId, + services::p2p::SourcePeer, +}; +use rand::{ + rngs::StdRng, + thread_rng, + Rng, + SeedableRng, +}; +use std::{ + iter, + sync::{ + Arc, + Mutex, + }, + time::Duration, +}; +use tokio::sync::Notify; + +#[derive(Default)] +struct Input { + headers: Duration, + consensus: Duration, + transactions: Duration, + executes: Duration, +} + +#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] +struct Count { + headers: usize, + transactions: usize, + consensus: usize, + executes: usize, + blocks: usize, +} + +#[derive(Debug, Default, PartialEq, Eq)] +struct Counts { + now: Count, + max: Count, +} + +pub(crate) fn empty_header(h: BlockHeight) -> SourcePeer { + let mut header = BlockHeader::default(); + header.consensus.height = h; + let transaction_tree = + fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); + header.application.generated.transactions_root = transaction_tree.root().into(); + + let consensus = Consensus::default(); + let sealed = Sealed { + entity: header, + consensus, + }; + SourcePeer { + peer_id: vec![].into(), + data: sealed, + } +} + +type SharedCounts = SharedMutex; + +struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2], SharedCounts); +struct PressureBlockImporterPort(MockBlockImporterPort, Duration, SharedCounts); + +struct PressureConsensusPort(MockConsensusPort, Duration, SharedCounts); + +#[async_trait::async_trait] +impl PeerToPeerPort for PressurePeerToPeerPort { + fn height_stream(&self) -> BoxStream { + self.0.height_stream() + } + async fn get_sealed_block_header( + &self, + height: BlockHeight, + ) -> anyhow::Result>> { + tokio::time::sleep(self.1[0]).await; + self.2.apply(|c| c.inc_headers()); + self.2.apply(|c| { + println!( + "Getting headers:\nHeaders: {} - Transactions: {}\n", + &c.now.headers, &c.now.transactions + ); + }); + self.2.apply(|c| { + c.inc_blocks(); + }); + self.0.get_sealed_block_header(height).await + } + async fn get_transactions( + &self, + block_id: SourcePeer, + ) -> anyhow::Result>> { + tokio::time::sleep(self.1[1]).await; + self.2.apply(|c| c.inc_transactions()); + self.2.apply(|c| c.dec_headers()); + self.2.apply(|c| { + println!( + "Getting transactions:\nHeaders: {} - Transactions: {}\n", + &c.now.headers, &c.now.transactions + ); + }); + self.0.get_transactions(block_id).await + } +} + +#[async_trait::async_trait] +impl BlockImporterPort for PressureBlockImporterPort { + fn committed_height_stream(&self) -> BoxStream { + self.0.committed_height_stream() + } + + async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { + let timeout = self.1; + tokio::task::spawn_blocking(move || { + std::thread::sleep(timeout); + }) + .await + .unwrap(); + self.2.apply(|c| { + c.dec_transactions(); + c.inc_executes() + }); + self.2.apply(|c| { + println!( + "Executing block:\nHeaders: {} - Transactions: {}\n", + &c.now.headers, &c.now.transactions + ); + }); + self.2.apply(|c| { + c.dec_executes(); + c.dec_blocks(); + }); + self.0.execute_and_commit(block).await + } +} + +#[async_trait::async_trait] +impl ConsensusPort for PressureConsensusPort { + fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { + self.0.check_sealed_header(header) + } + + async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { + self.2.apply(|c| c.inc_consensus()); + tokio::time::sleep(self.1).await; + self.2.apply(|c| c.dec_consensus()); + self.0.await_da_height(da_height).await + } +} + +impl PressurePeerToPeerPort { + fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { + let mut mock = MockPeerToPeerPort::default(); + mock.expect_get_sealed_block_header() + .returning(|h| Ok(Some(empty_header(h)))); + mock.expect_get_transactions() + .returning(|_| Ok(Some(vec![]))); + Self(mock, delays, counts) + } +} + +impl PressureBlockImporterPort { + fn new(counts: SharedCounts, delays: Duration) -> Self { + let mut mock = MockBlockImporterPort::default(); + mock.expect_execute_and_commit().returning(move |_| Ok(())); + Self(mock, delays, counts) + } +} + +impl PressureConsensusPort { + fn new(counts: SharedCounts, delays: Duration) -> Self { + let mut mock = MockConsensusPort::default(); + mock.expect_await_da_height().returning(|_| Ok(())); + mock.expect_check_sealed_header().returning(|_| Ok(true)); + Self(mock, delays, counts) + } +} + +impl Counts { + fn inc_headers(&mut self) { + self.now.headers += 1; + self.max.headers = self.max.headers.max(self.now.headers); + } + fn dec_headers(&mut self) { + self.now.headers -= 1; + } + fn inc_transactions(&mut self) { + self.now.transactions += 1; + self.max.transactions = self.max.transactions.max(self.now.transactions); + } + fn dec_transactions(&mut self) { + self.now.transactions -= 1; + } + fn inc_consensus(&mut self) { + self.now.consensus += 1; + self.max.consensus = self.max.consensus.max(self.now.consensus); + } + fn dec_consensus(&mut self) { + self.now.consensus -= 1; + } + fn inc_executes(&mut self) { + self.now.executes += 1; + self.max.executes = self.max.executes.max(self.now.executes); + } + fn dec_executes(&mut self) { + self.now.executes -= 1; + } + fn inc_blocks(&mut self) { + self.now.blocks += 1; + self.max.blocks = self.max.blocks.max(self.now.blocks); + } + fn dec_blocks(&mut self) { + self.now.blocks -= 1; + } +} + +async fn test() { + let input = Input { + headers: Duration::from_millis(10), + transactions: Duration::from_millis(10), + executes: Duration::from_millis(100), + ..Default::default() + }; + let state = State::new(None, 50); + let params = Config { + max_get_header_requests: 10, + max_get_txns_requests: 10, + }; + + let counts = SharedCounts::new(Default::default()); + let state = SharedMutex::new(state); + + let p2p = Arc::new(PressurePeerToPeerPort::new( + counts.clone(), + [input.headers, input.transactions], + )); + let executor = Arc::new(PressureBlockImporterPort::new( + counts.clone(), + input.executes, + )); + let consensus = Arc::new(PressureConsensusPort::new(counts.clone(), input.consensus)); + let notify = Arc::new(Notify::new()); + + let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let mut watcher = shutdown.into(); + + let import = Import::new(state, notify, params, p2p, executor, consensus); + + import.notify.notify_one(); + import.import(&mut watcher).await.unwrap(); +} + +fn import_one(c: &mut Criterion) { + let rt = Runtime::new().unwrap(); + let mut group = c.benchmark_group("import"); + group.bench_function("import one", |b| b.to_async(&rt).iter(|| test())); +} + +criterion_group!(benches, import_one); +criterion_main!(benches); diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index a6cd607fb50..1b41e15436f 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -15,6 +15,7 @@ async-trait = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true } futures = { workspace = true } +mockall = { workspace = true, optional = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } @@ -23,3 +24,6 @@ fuel-core-trace = { path = "../../trace" } fuel-core-types = { path = "../../types", features = ["test-helpers"] } mockall = { workspace = true } test-case = { workspace = true } + +[features] +benchmarking = ["dep:mockall"] diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0f7b2d4001e..bc3a656d1d6 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -73,11 +73,12 @@ impl Default for Config { } } -pub(crate) struct Import { +/// Import +pub struct Import { /// Shared state between import and sync tasks. state: SharedMutex, /// Notify import when sync has new work. - notify: Arc, + pub notify: Arc, /// Configuration parameters. params: Config, /// Network port. @@ -89,7 +90,8 @@ pub(crate) struct Import { } impl Import { - pub(crate) fn new( + /// New Import + pub fn new( state: SharedMutex, notify: Arc, params: Config, @@ -114,10 +116,8 @@ where C: ConsensusPort + Send + Sync + 'static, { #[tracing::instrument(skip_all)] - pub(crate) async fn import( - &self, - shutdown: &mut StateWatcher, - ) -> anyhow::Result { + /// Import + pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { self.import_inner(shutdown).await?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) diff --git a/crates/services/sync/src/lib.rs b/crates/services/sync/src/lib.rs index 8ff9cab3ca4..9087e9aadc4 100644 --- a/crates/services/sync/src/lib.rs +++ b/crates/services/sync/src/lib.rs @@ -7,7 +7,7 @@ pub mod import; pub mod ports; pub mod service; -mod state; +pub mod state; pub mod sync; mod tracing_helpers; diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index f4c0a4e9688..3a1404e8c93 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -15,7 +15,7 @@ use fuel_core_types::{ services::p2p::SourcePeer, }; -#[cfg_attr(test, mockall::automock)] +#[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] #[async_trait::async_trait] /// Port for communication with the network. pub trait PeerToPeerPort { @@ -39,7 +39,7 @@ pub trait PeerToPeerPort { ) -> anyhow::Result>>; } -#[cfg_attr(test, mockall::automock)] +#[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] #[async_trait::async_trait] /// Port for communication with the consensus service. pub trait ConsensusPort { @@ -49,7 +49,7 @@ pub trait ConsensusPort { async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()>; } -#[cfg_attr(test, mockall::automock)] +#[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] #[async_trait::async_trait] /// Port for communication with the block importer. pub trait BlockImporterPort { From 9ace98afc2e298588dc6153b463963ca21bc3061 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 2 Aug 2023 23:03:18 -0400 Subject: [PATCH 02/55] Update --- benches/benches/import.rs | 30 +++++++----------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 4890f6807c6..6cb36477dc9 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -40,7 +40,6 @@ use fuel_core_types::{ GeneratedConsensusFields, }, primitives::{ - BlockHeight, BlockId, DaBlockHeight, }, @@ -51,7 +50,10 @@ use fuel_core_types::{ Bytes32, Transaction, }, - fuel_types::ContractId, + fuel_types::{ + BlockHeight, + ContractId, + }, services::p2p::SourcePeer, }; use rand::{ @@ -129,12 +131,6 @@ impl PeerToPeerPort for PressurePeerToPeerPort { ) -> anyhow::Result>> { tokio::time::sleep(self.1[0]).await; self.2.apply(|c| c.inc_headers()); - self.2.apply(|c| { - println!( - "Getting headers:\nHeaders: {} - Transactions: {}\n", - &c.now.headers, &c.now.transactions - ); - }); self.2.apply(|c| { c.inc_blocks(); }); @@ -147,12 +143,6 @@ impl PeerToPeerPort for PressurePeerToPeerPort { tokio::time::sleep(self.1[1]).await; self.2.apply(|c| c.inc_transactions()); self.2.apply(|c| c.dec_headers()); - self.2.apply(|c| { - println!( - "Getting transactions:\nHeaders: {} - Transactions: {}\n", - &c.now.headers, &c.now.transactions - ); - }); self.0.get_transactions(block_id).await } } @@ -174,12 +164,6 @@ impl BlockImporterPort for PressureBlockImporterPort { c.dec_transactions(); c.inc_executes() }); - self.2.apply(|c| { - println!( - "Executing block:\nHeaders: {} - Transactions: {}\n", - &c.now.headers, &c.now.transactions - ); - }); self.2.apply(|c| { c.dec_executes(); c.dec_blocks(); @@ -270,9 +254,9 @@ impl Counts { async fn test() { let input = Input { - headers: Duration::from_millis(10), - transactions: Duration::from_millis(10), - executes: Duration::from_millis(100), + headers: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), ..Default::default() }; let state = State::new(None, 50); From 8baf6521a3767c1ce126dc63ae94ae097e3bcc5c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 4 Aug 2023 14:13:40 -0400 Subject: [PATCH 03/55] Simplify --- benches/benches/import.rs | 112 +++++++------------------------------- 1 file changed, 19 insertions(+), 93 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 6cb36477dc9..35da1f7c77e 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -80,21 +80,6 @@ struct Input { executes: Duration, } -#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] -struct Count { - headers: usize, - transactions: usize, - consensus: usize, - executes: usize, - blocks: usize, -} - -#[derive(Debug, Default, PartialEq, Eq)] -struct Counts { - now: Count, - max: Count, -} - pub(crate) fn empty_header(h: BlockHeight) -> SourcePeer { let mut header = BlockHeader::default(); header.consensus.height = h; @@ -113,36 +98,29 @@ pub(crate) fn empty_header(h: BlockHeight) -> SourcePeer { } } -type SharedCounts = SharedMutex; - -struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2], SharedCounts); -struct PressureBlockImporterPort(MockBlockImporterPort, Duration, SharedCounts); - -struct PressureConsensusPort(MockConsensusPort, Duration, SharedCounts); +struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2]); +struct PressureBlockImporterPort(MockBlockImporterPort, Duration); +struct PressureConsensusPort(MockConsensusPort, Duration); #[async_trait::async_trait] impl PeerToPeerPort for PressurePeerToPeerPort { fn height_stream(&self) -> BoxStream { self.0.height_stream() } + async fn get_sealed_block_header( &self, height: BlockHeight, ) -> anyhow::Result>> { tokio::time::sleep(self.1[0]).await; - self.2.apply(|c| c.inc_headers()); - self.2.apply(|c| { - c.inc_blocks(); - }); self.0.get_sealed_block_header(height).await } + async fn get_transactions( &self, block_id: SourcePeer, ) -> anyhow::Result>> { tokio::time::sleep(self.1[1]).await; - self.2.apply(|c| c.inc_transactions()); - self.2.apply(|c| c.dec_headers()); self.0.get_transactions(block_id).await } } @@ -160,14 +138,6 @@ impl BlockImporterPort for PressureBlockImporterPort { }) .await .unwrap(); - self.2.apply(|c| { - c.dec_transactions(); - c.inc_executes() - }); - self.2.apply(|c| { - c.dec_executes(); - c.dec_blocks(); - }); self.0.execute_and_commit(block).await } } @@ -179,76 +149,36 @@ impl ConsensusPort for PressureConsensusPort { } async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { - self.2.apply(|c| c.inc_consensus()); tokio::time::sleep(self.1).await; - self.2.apply(|c| c.dec_consensus()); self.0.await_da_height(da_height).await } } impl PressurePeerToPeerPort { - fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { + fn new(delays: [Duration; 2]) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_header() .returning(|h| Ok(Some(empty_header(h)))); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); - Self(mock, delays, counts) + Self(mock, delays) } } impl PressureBlockImporterPort { - fn new(counts: SharedCounts, delays: Duration) -> Self { + fn new(delays: Duration) -> Self { let mut mock = MockBlockImporterPort::default(); mock.expect_execute_and_commit().returning(move |_| Ok(())); - Self(mock, delays, counts) + Self(mock, delays) } } impl PressureConsensusPort { - fn new(counts: SharedCounts, delays: Duration) -> Self { + fn new(delays: Duration) -> Self { let mut mock = MockConsensusPort::default(); mock.expect_await_da_height().returning(|_| Ok(())); mock.expect_check_sealed_header().returning(|_| Ok(true)); - Self(mock, delays, counts) - } -} - -impl Counts { - fn inc_headers(&mut self) { - self.now.headers += 1; - self.max.headers = self.max.headers.max(self.now.headers); - } - fn dec_headers(&mut self) { - self.now.headers -= 1; - } - fn inc_transactions(&mut self) { - self.now.transactions += 1; - self.max.transactions = self.max.transactions.max(self.now.transactions); - } - fn dec_transactions(&mut self) { - self.now.transactions -= 1; - } - fn inc_consensus(&mut self) { - self.now.consensus += 1; - self.max.consensus = self.max.consensus.max(self.now.consensus); - } - fn dec_consensus(&mut self) { - self.now.consensus -= 1; - } - fn inc_executes(&mut self) { - self.now.executes += 1; - self.max.executes = self.max.executes.max(self.now.executes); - } - fn dec_executes(&mut self) { - self.now.executes -= 1; - } - fn inc_blocks(&mut self) { - self.now.blocks += 1; - self.max.blocks = self.max.blocks.max(self.now.blocks); - } - fn dec_blocks(&mut self) { - self.now.blocks -= 1; + Self(mock, delays) } } @@ -259,24 +189,20 @@ async fn test() { executes: Duration::from_millis(10), ..Default::default() }; - let state = State::new(None, 50); let params = Config { max_get_header_requests: 10, max_get_txns_requests: 10, }; - - let counts = SharedCounts::new(Default::default()); + let state = State::new(None, 50); let state = SharedMutex::new(state); - let p2p = Arc::new(PressurePeerToPeerPort::new( - counts.clone(), - [input.headers, input.transactions], - )); - let executor = Arc::new(PressureBlockImporterPort::new( - counts.clone(), - input.executes, - )); - let consensus = Arc::new(PressureConsensusPort::new(counts.clone(), input.consensus)); + let p2p = Arc::new(PressurePeerToPeerPort::new([ + input.headers, + input.transactions, + ])); + + let executor = Arc::new(PressureBlockImporterPort::new(input.executes)); + let consensus = Arc::new(PressureConsensusPort::new(input.consensus)); let notify = Arc::new(Notify::new()); let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); From 5f3fb57ec84b091938c1a85d26bebad923163d9f Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 4 Aug 2023 14:41:27 -0400 Subject: [PATCH 04/55] benches import lib --- benches/benches/import.rs | 170 ++---------------- benches/src/import.rs | 7 + .../import/pressure_block_importer_port.rs | 45 +++++ benches/src/import/pressure_consensus_port.rs | 40 +++++ .../src/import/pressure_peer_to_peer_port.rs | 82 +++++++++ benches/src/lib.rs | 2 + 6 files changed, 187 insertions(+), 159 deletions(-) create mode 100644 benches/src/import.rs create mode 100644 benches/src/import/pressure_block_importer_port.rs create mode 100644 benches/src/import/pressure_consensus_port.rs create mode 100644 benches/src/import/pressure_peer_to_peer_port.rs diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 35da1f7c77e..61750421aef 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -6,71 +6,27 @@ use criterion::{ BenchmarkGroup, Criterion, }; -use fuel_core_services::{ - stream::BoxStream, - SharedMutex, - StateWatcher, +use fuel_core_benches::import::{ + PressureBlockImporterPort, + PressureConsensusPort, + PressurePeerToPeerPort, }; -use tokio::runtime::Runtime; - -use fuel_core_storage::InterpreterStorage; +use fuel_core_services::SharedMutex; use fuel_core_sync::{ import::{ Config, Import, }, - ports::{ - BlockImporterPort, - ConsensusPort, - MockBlockImporterPort, - MockConsensusPort, - MockPeerToPeerPort, - PeerToPeerPort, - }, state::State, }; -use fuel_core_types::{ - blockchain::{ - consensus::{ - Consensus, - Sealed, - }, - header::{ - BlockHeader, - GeneratedConsensusFields, - }, - primitives::{ - BlockId, - DaBlockHeight, - }, - SealedBlock, - SealedBlockHeader, - }, - fuel_tx::{ - Bytes32, - Transaction, - }, - fuel_types::{ - BlockHeight, - ContractId, - }, - services::p2p::SourcePeer, -}; -use rand::{ - rngs::StdRng, - thread_rng, - Rng, - SeedableRng, -}; use std::{ - iter, - sync::{ - Arc, - Mutex, - }, + sync::Arc, time::Duration, }; -use tokio::sync::Notify; +use tokio::{ + runtime::Runtime, + sync::Notify, +}; #[derive(Default)] struct Input { @@ -80,108 +36,6 @@ struct Input { executes: Duration, } -pub(crate) fn empty_header(h: BlockHeight) -> SourcePeer { - let mut header = BlockHeader::default(); - header.consensus.height = h; - let transaction_tree = - fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); - header.application.generated.transactions_root = transaction_tree.root().into(); - - let consensus = Consensus::default(); - let sealed = Sealed { - entity: header, - consensus, - }; - SourcePeer { - peer_id: vec![].into(), - data: sealed, - } -} - -struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2]); -struct PressureBlockImporterPort(MockBlockImporterPort, Duration); -struct PressureConsensusPort(MockConsensusPort, Duration); - -#[async_trait::async_trait] -impl PeerToPeerPort for PressurePeerToPeerPort { - fn height_stream(&self) -> BoxStream { - self.0.height_stream() - } - - async fn get_sealed_block_header( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - tokio::time::sleep(self.1[0]).await; - self.0.get_sealed_block_header(height).await - } - - async fn get_transactions( - &self, - block_id: SourcePeer, - ) -> anyhow::Result>> { - tokio::time::sleep(self.1[1]).await; - self.0.get_transactions(block_id).await - } -} - -#[async_trait::async_trait] -impl BlockImporterPort for PressureBlockImporterPort { - fn committed_height_stream(&self) -> BoxStream { - self.0.committed_height_stream() - } - - async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { - let timeout = self.1; - tokio::task::spawn_blocking(move || { - std::thread::sleep(timeout); - }) - .await - .unwrap(); - self.0.execute_and_commit(block).await - } -} - -#[async_trait::async_trait] -impl ConsensusPort for PressureConsensusPort { - fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { - self.0.check_sealed_header(header) - } - - async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { - tokio::time::sleep(self.1).await; - self.0.await_da_height(da_height).await - } -} - -impl PressurePeerToPeerPort { - fn new(delays: [Duration; 2]) -> Self { - let mut mock = MockPeerToPeerPort::default(); - mock.expect_get_sealed_block_header() - .returning(|h| Ok(Some(empty_header(h)))); - mock.expect_get_transactions() - .returning(|_| Ok(Some(vec![]))); - Self(mock, delays) - } -} - -impl PressureBlockImporterPort { - fn new(delays: Duration) -> Self { - let mut mock = MockBlockImporterPort::default(); - mock.expect_execute_and_commit().returning(move |_| Ok(())); - Self(mock, delays) - } -} - -impl PressureConsensusPort { - fn new(delays: Duration) -> Self { - let mut mock = MockConsensusPort::default(); - mock.expect_await_da_height().returning(|_| Ok(())); - mock.expect_check_sealed_header().returning(|_| Ok(true)); - Self(mock, delays) - } -} - async fn test() { let input = Input { headers: Duration::from_millis(5), @@ -200,16 +54,14 @@ async fn test() { input.headers, input.transactions, ])); - let executor = Arc::new(PressureBlockImporterPort::new(input.executes)); let consensus = Arc::new(PressureConsensusPort::new(input.consensus)); - let notify = Arc::new(Notify::new()); + let notify = Arc::new(Notify::new()); let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); let import = Import::new(state, notify, params, p2p, executor, consensus); - import.notify.notify_one(); import.import(&mut watcher).await.unwrap(); } diff --git a/benches/src/import.rs b/benches/src/import.rs new file mode 100644 index 00000000000..7481639537d --- /dev/null +++ b/benches/src/import.rs @@ -0,0 +1,7 @@ +mod pressure_block_importer_port; +mod pressure_consensus_port; +mod pressure_peer_to_peer_port; + +pub use pressure_block_importer_port::PressureBlockImporterPort; +pub use pressure_consensus_port::PressureConsensusPort; +pub use pressure_peer_to_peer_port::PressurePeerToPeerPort; diff --git a/benches/src/import/pressure_block_importer_port.rs b/benches/src/import/pressure_block_importer_port.rs new file mode 100644 index 00000000000..92439f3a97f --- /dev/null +++ b/benches/src/import/pressure_block_importer_port.rs @@ -0,0 +1,45 @@ +use fuel_core_services::stream::BoxStream; +use fuel_core_sync::ports::{ + BlockImporterPort, + MockBlockImporterPort, +}; +use fuel_core_types::{ + blockchain::SealedBlock, + fuel_types::BlockHeight, +}; +use std::time::Duration; + +pub struct PressureBlockImporterPort(MockBlockImporterPort, Duration); + +impl PressureBlockImporterPort { + pub fn new(delays: Duration) -> Self { + let mut mock = MockBlockImporterPort::default(); + mock.expect_execute_and_commit().returning(move |_| Ok(())); + Self(mock, delays) + } + + fn service(&self) -> &impl BlockImporterPort { + &self.0 + } + + fn duration(&self) -> Duration { + self.1 + } +} + +#[async_trait::async_trait] +impl BlockImporterPort for PressureBlockImporterPort { + fn committed_height_stream(&self) -> BoxStream { + self.service().committed_height_stream() + } + + async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { + let timeout = self.duration(); + tokio::task::spawn_blocking(move || { + std::thread::sleep(timeout); + }) + .await + .unwrap(); + self.service().execute_and_commit(block).await + } +} diff --git a/benches/src/import/pressure_consensus_port.rs b/benches/src/import/pressure_consensus_port.rs new file mode 100644 index 00000000000..796dff9e644 --- /dev/null +++ b/benches/src/import/pressure_consensus_port.rs @@ -0,0 +1,40 @@ +use fuel_core_sync::ports::{ + ConsensusPort, + MockConsensusPort, +}; +use fuel_core_types::blockchain::{ + primitives::DaBlockHeight, + SealedBlockHeader, +}; +use std::time::Duration; + +pub struct PressureConsensusPort(MockConsensusPort, Duration); + +impl PressureConsensusPort { + pub fn new(delays: Duration) -> Self { + let mut mock = MockConsensusPort::default(); + mock.expect_await_da_height().returning(|_| Ok(())); + mock.expect_check_sealed_header().returning(|_| Ok(true)); + Self(mock, delays) + } + + fn service(&self) -> &impl ConsensusPort { + &self.0 + } + + fn duration(&self) -> Duration { + self.1 + } +} + +#[async_trait::async_trait] +impl ConsensusPort for PressureConsensusPort { + fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { + self.service().check_sealed_header(header) + } + + async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { + tokio::time::sleep(self.duration()).await; + self.service().await_da_height(da_height).await + } +} diff --git a/benches/src/import/pressure_peer_to_peer_port.rs b/benches/src/import/pressure_peer_to_peer_port.rs new file mode 100644 index 00000000000..4d78e1875f9 --- /dev/null +++ b/benches/src/import/pressure_peer_to_peer_port.rs @@ -0,0 +1,82 @@ +use fuel_core_services::stream::BoxStream; +use fuel_core_sync::ports::{ + MockPeerToPeerPort, + PeerToPeerPort, +}; +use fuel_core_types::{ + blockchain::{ + consensus::{ + Consensus, + Sealed, + }, + header::BlockHeader, + primitives::BlockId, + SealedBlockHeader, + }, + fuel_tx::Transaction, + fuel_types::BlockHeight, + services::p2p::SourcePeer, +}; +use std::time::Duration; + +fn empty_header(h: BlockHeight) -> SourcePeer { + let mut header = BlockHeader::default(); + header.consensus.height = h; + let transaction_tree = + fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); + header.application.generated.transactions_root = transaction_tree.root().into(); + + let consensus = Consensus::default(); + let sealed = Sealed { + entity: header, + consensus, + }; + SourcePeer { + peer_id: vec![].into(), + data: sealed, + } +} + +pub struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2]); + +impl PressurePeerToPeerPort { + pub fn new(delays: [Duration; 2]) -> Self { + let mut mock = MockPeerToPeerPort::default(); + mock.expect_get_sealed_block_header() + .returning(|h| Ok(Some(empty_header(h)))); + mock.expect_get_transactions() + .returning(|_| Ok(Some(vec![]))); + Self(mock, delays) + } + + fn service(&self) -> &impl PeerToPeerPort { + &self.0 + } + + fn duration(&self, index: usize) -> Duration { + self.1[index] + } +} + +#[async_trait::async_trait] +impl PeerToPeerPort for PressurePeerToPeerPort { + fn height_stream(&self) -> BoxStream { + self.service().height_stream() + } + + async fn get_sealed_block_header( + &self, + height: BlockHeight, + ) -> anyhow::Result>> { + tokio::time::sleep(self.duration(0)).await; + self.service().get_sealed_block_header(height).await + } + + async fn get_transactions( + &self, + block_id: SourcePeer, + ) -> anyhow::Result>> { + tokio::time::sleep(self.duration(1)).await; + self.service().get_transactions(block_id).await + } +} diff --git a/benches/src/lib.rs b/benches/src/lib.rs index b3fad0753df..d4a48329aba 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -1,3 +1,5 @@ +pub mod import; + use fuel_core::database::vm_database::VmDatabase; pub use fuel_core::database::Database; use fuel_core_types::{ From 3b349ef4f318dfdbc762ffd4775846a189edbd52 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 4 Aug 2023 14:48:28 -0400 Subject: [PATCH 05/55] Update import.rs --- benches/benches/import.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 61750421aef..64431720faa 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -1,9 +1,6 @@ use criterion::{ - async_executor::AsyncExecutor, criterion_group, criterion_main, - measurement::WallTime, - BenchmarkGroup, Criterion, }; use fuel_core_benches::import::{ @@ -43,13 +40,14 @@ async fn test() { executes: Duration::from_millis(10), ..Default::default() }; + + let state = State::new(None, 50); + let state = SharedMutex::new(state); + let notify = Arc::new(Notify::new()); let params = Config { max_get_header_requests: 10, max_get_txns_requests: 10, }; - let state = State::new(None, 50); - let state = SharedMutex::new(state); - let p2p = Arc::new(PressurePeerToPeerPort::new([ input.headers, input.transactions, @@ -57,7 +55,6 @@ async fn test() { let executor = Arc::new(PressureBlockImporterPort::new(input.executes)); let consensus = Arc::new(PressureConsensusPort::new(input.consensus)); - let notify = Arc::new(Notify::new()); let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -69,7 +66,7 @@ async fn test() { fn import_one(c: &mut Criterion) { let rt = Runtime::new().unwrap(); let mut group = c.benchmark_group("import"); - group.bench_function("import one", |b| b.to_async(&rt).iter(|| test())); + group.bench_function("import one", |b| b.to_async(&rt).iter(test)); } criterion_group!(benches, import_one); From 1982867f27c84d1c6124c3a7e9c4d2fc4ef22bef Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 4 Aug 2023 17:58:20 -0400 Subject: [PATCH 06/55] Update --- benches/benches/import.rs | 84 ++++++++++++++----- .../src/import/pressure_peer_to_peer_port.rs | 6 +- crates/services/sync/src/import.rs | 8 +- 3 files changed, 72 insertions(+), 26 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 64431720faa..bf9a7a9771f 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -1,4 +1,5 @@ use criterion::{ + black_box, criterion_group, criterion_main, Criterion, @@ -8,7 +9,10 @@ use fuel_core_benches::import::{ PressureConsensusPort, PressurePeerToPeerPort, }; -use fuel_core_services::SharedMutex; +use fuel_core_services::{ + SharedMutex, + StateWatcher, +}; use fuel_core_sync::{ import::{ Config, @@ -22,31 +26,37 @@ use std::{ }; use tokio::{ runtime::Runtime, - sync::Notify, + sync::{ + watch::Sender, + Notify, + }, }; +type PressureImport = + Import; + #[derive(Default)] -struct Input { +struct Durations { headers: Duration, consensus: Duration, transactions: Duration, executes: Duration, } -async fn test() { - let input = Input { - headers: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), - ..Default::default() - }; - - let state = State::new(None, 50); - let state = SharedMutex::new(state); - let notify = Arc::new(Notify::new()); +fn create_import( + shared_state: SharedMutex, + input: Durations, + max_get_header_requests: usize, + max_get_txns_requests: usize, +) -> ( + PressureImport, + Sender, + StateWatcher, +) { + let shared_notify = Arc::new(Notify::new()); let params = Config { - max_get_header_requests: 10, - max_get_txns_requests: 10, + max_get_header_requests, + max_get_txns_requests, }; let p2p = Arc::new(PressurePeerToPeerPort::new([ input.headers, @@ -55,18 +65,48 @@ async fn test() { let executor = Arc::new(PressureBlockImporterPort::new(input.executes)); let consensus = Arc::new(PressureConsensusPort::new(input.consensus)); - let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); - let mut watcher = shutdown.into(); + let (tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let watcher = shutdown.into(); + let import = Import::new( + shared_state, + shared_notify, + params, + p2p, + executor, + consensus, + ); + (import, tx, watcher) +} - let import = Import::new(state, notify, params, p2p, executor, consensus); - import.notify.notify_one(); - import.import(&mut watcher).await.unwrap(); +async fn test(import: &PressureImport, shutdown: &mut StateWatcher) { + import.import(shutdown).await.unwrap(); } fn import_one(c: &mut Criterion) { let rt = Runtime::new().unwrap(); let mut group = c.benchmark_group("import"); - group.bench_function("import one", |b| b.to_async(&rt).iter(test)); + group.bench_function("import one", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 50); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 10, 10); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(test(&import, &mut shutdown).await); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); } criterion_group!(benches, import_one); diff --git a/benches/src/import/pressure_peer_to_peer_port.rs b/benches/src/import/pressure_peer_to_peer_port.rs index 4d78e1875f9..2ca7d2f7c9a 100644 --- a/benches/src/import/pressure_peer_to_peer_port.rs +++ b/benches/src/import/pressure_peer_to_peer_port.rs @@ -68,7 +68,8 @@ impl PeerToPeerPort for PressurePeerToPeerPort { &self, height: BlockHeight, ) -> anyhow::Result>> { - tokio::time::sleep(self.duration(0)).await; + let timeout = self.duration(0); + tokio::time::sleep(timeout).await; self.service().get_sealed_block_header(height).await } @@ -76,7 +77,8 @@ impl PeerToPeerPort for PressurePeerToPeerPort { &self, block_id: SourcePeer, ) -> anyhow::Result>> { - tokio::time::sleep(self.duration(1)).await; + let timeout = self.duration(1); + tokio::time::sleep(timeout).await; self.service().get_transactions(block_id).await } } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index bc3a656d1d6..4b511a2e61a 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -78,7 +78,7 @@ pub struct Import { /// Shared state between import and sync tasks. state: SharedMutex, /// Notify import when sync has new work. - pub notify: Arc, + notify: Arc, /// Configuration parameters. params: Config, /// Network port. @@ -108,6 +108,11 @@ impl Import { consensus, } } + + /// Notify one + pub fn notify_one(&self) { + self.notify.notify_one() + } } impl Import where @@ -228,7 +233,6 @@ where Ok(b) => b, Err(e) => return Err(e), }; - execute_and_commit(executor.as_ref(), &state, block).await } } From 18453ba0917c4f7c99e89a18d42bd76af05476c0 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 4 Aug 2023 20:50:50 -0400 Subject: [PATCH 07/55] test v3 --- benches/benches/import.rs | 192 +++++++++++++++++++++++++- crates/services/sync/src/import.rs | 211 ++++++++++++++++++++++++++++- 2 files changed, 396 insertions(+), 7 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index bf9a7a9771f..97c70d24815 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -85,23 +85,207 @@ async fn test(import: &PressureImport, shutdown: &mut StateWatcher) { fn import_one(c: &mut Criterion) { let rt = Runtime::new().unwrap(); let mut group = c.benchmark_group("import"); - group.bench_function("import one", |b| { + group.bench_function("import v1 - 500 * 5/5/5/15 - 10/10", |b| { b.to_async(&rt).iter_custom(|iters| async move { let mut elapsed_time = Duration::default(); for _ in 0..iters { - let state = State::new(None, 50); + let state = State::new(None, 500); let shared_state = SharedMutex::new(state); let input = Durations { headers: Duration::from_millis(5), consensus: Duration::from_millis(5), transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), + executes: Duration::from_millis(15), }; let (import, _tx, mut shutdown) = create_import(shared_state, input, 10, 10); import.notify_one(); let start = std::time::Instant::now(); - black_box(test(&import, &mut shutdown).await); + black_box(import.import(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v1 - 500 * 5/5/5/15 - 100/100", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 100, 100); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v1 - 500 * 5/5/5/15 - 500/500", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 500, 500); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v2 - 500 * 5/5/5/15 - 10", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 10); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v2(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v2 - 500 * 5/5/5/15 - 100", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 100); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v2(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v2 - 500 * 5/5/5/15 - 500", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 500); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v2(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v3 - 500 * 5/5/5/15 - 10", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 10); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v3(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v3 - 500 * 5/5/5/15 - 100", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 100); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v3(&mut shutdown).await.unwrap()); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + + group.bench_function("import v3 - 500 * 5/5/5/15 - 500", |b| { + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, 500); + let shared_state = SharedMutex::new(state); + let input = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(15), + }; + let (import, _tx, mut shutdown) = + create_import(shared_state, input, 0, 500); + import.notify_one(); + let start = std::time::Instant::now(); + black_box(import.import_v3(&mut shutdown).await.unwrap()); elapsed_time += start.elapsed(); } elapsed_time diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 4b511a2e61a..9309e1b9680 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -7,6 +7,7 @@ use std::{ sync::Arc, }; +use anyhow::anyhow; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -27,6 +28,7 @@ use futures::{ self, StreamExt, }, + FutureExt, Stream, }; use std::future::Future; @@ -123,16 +125,39 @@ where #[tracing::instrument(skip_all)] /// Import pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown).await?; + self.import_inner(shutdown, 1).await?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - async fn import_inner(&self, shutdown: &StateWatcher) -> anyhow::Result<()> { + /// Import + pub async fn import_v2(&self, shutdown: &mut StateWatcher) -> anyhow::Result { + self.import_inner(shutdown, 2).await?; + + Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) + } + + /// Import + pub async fn import_v3(&self, shutdown: &mut StateWatcher) -> anyhow::Result { + self.import_inner(shutdown, 3).await?; + + Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) + } + + async fn import_inner( + &self, + shutdown: &StateWatcher, + version: u32, + ) -> anyhow::Result<()> { // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. - let (count, result) = self.launch_stream(range.clone(), shutdown).await; + let (count, result) = match version { + 1 => self.launch_stream(range.clone(), shutdown).await, + 2 => self.launch_stream_v2(range.clone(), shutdown).await, + 3 => self.launch_stream_v3(range.clone(), shutdown).await, + _ => panic!("INVALID"), + }; // Get the size of the range. let range_len = range.size_hint().0 as u32; @@ -254,6 +279,186 @@ where .in_current_span() .await } + + async fn launch_stream_v2( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, + ) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + get_header_range(range.clone(), p2p.clone()) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { + let header = match result.await { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) + } + + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + + get_transactions_on_block(p2p.as_ref(), block_id, header).await + }) + .then(|task| async { task.map_err(|e| anyhow!(e))? }) + } + }) + .buffered(params.max_get_txns_requests) + .into_scan_none_or_err() + .scan_none_or_err() + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { + { + let state = state.clone(); + let executor = executor.clone(); + async move { + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await + } + } + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + } + }) + .into_scan_err() + .scan_err() + .fold((0usize, Ok(())), |(count, err), result| async move { + match result { + Ok(_) => (count + 1, err), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await + } + + async fn launch_stream_v3( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, + ) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + get_header_range(range.clone(), p2p.clone()) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + async move { + let header = match result.await { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) + } + + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + get_transactions_on_block(p2p.as_ref(), block_id, header).await + } + } + }) + .buffered(params.max_get_txns_requests) + .into_scan_none_or_err() + .scan_none_or_err() + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { + { + let state = state.clone(); + let executor = executor.clone(); + async move { + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await + } + } + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + } + }) + .into_scan_err() + .scan_err() + .fold((0usize, Ok(())), |(count, err), result| async move { + match result { + Ok(_) => (count + 1, err), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await + } } /// Waits for a notify or shutdown signal. From 4d375670657743004e5dd98e0e8a6d4114fdb165 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 7 Aug 2023 11:30:24 -0400 Subject: [PATCH 08/55] Version tests --- benches/benches/import.rs | 295 ++++++++++++-------------------------- 1 file changed, 89 insertions(+), 206 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 97c70d24815..3e38ab6fa28 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -2,6 +2,8 @@ use criterion::{ black_box, criterion_group, criterion_main, + measurement::WallTime, + BenchmarkGroup, Criterion, }; use fuel_core_benches::import::{ @@ -21,6 +23,10 @@ use fuel_core_sync::{ state::State, }; use std::{ + fmt::{ + Display, + Formatter, + }, sync::Arc, time::Duration, }; @@ -35,7 +41,7 @@ use tokio::{ type PressureImport = Import; -#[derive(Default)] +#[derive(Default, Clone, Copy)] struct Durations { headers: Duration, consensus: Duration, @@ -78,220 +84,97 @@ fn create_import( (import, tx, watcher) } -async fn test(import: &PressureImport, shutdown: &mut StateWatcher) { - import.import(shutdown).await.unwrap(); +#[derive(Clone, Copy)] +enum Version { + V1, + V2, + V3, } -fn import_one(c: &mut Criterion) { - let rt = Runtime::new().unwrap(); - let mut group = c.benchmark_group("import"); - group.bench_function("import v1 - 500 * 5/5/5/15 - 10/10", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 10, 10); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); - - group.bench_function("import v1 - 500 * 5/5/5/15 - 100/100", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 100, 100); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); +impl Display for Version { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Version::V1 => write!(f, "v1"), + Version::V2 => write!(f, "v2"), + Version::V3 => write!(f, "v3"), + } + } +} - group.bench_function("import v1 - 500 * 5/5/5/15 - 500/500", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 500, 500); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); +async fn import_version_switch( + import: PressureImport, + version: Version, + shutdown: &mut StateWatcher, +) { + match version { + Version::V1 => import.import(shutdown).await.unwrap(), + Version::V2 => import.import_v2(shutdown).await.unwrap(), + Version::V3 => import.import_v3(shutdown).await.unwrap(), + }; +} - group.bench_function("import v2 - 500 * 5/5/5/15 - 10", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 10); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v2(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); +fn bench_imports(c: &mut Criterion) { + let bench_import = |group: &mut BenchmarkGroup, + version: Version, + n: u32, + durations: Durations, + buffer_size: usize| { + let name = format!( + "import {version} - {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", + version = version, + n = n, + d_h = durations.headers.as_millis(), + d_c = durations.consensus.as_millis(), + d_t = durations.transactions.as_millis(), + d_e = durations.executes.as_millis(), + sz = buffer_size + ); + group.bench_function(name, move |b| { + let rt = Runtime::new().unwrap(); + b.to_async(&rt).iter_custom(|iters| async move { + let mut elapsed_time = Duration::default(); + for _ in 0..iters { + let state = State::new(None, n); + let shared_state = SharedMutex::new(state); + let (import, _tx, mut shutdown) = + create_import(shared_state, durations, buffer_size, buffer_size); + import.notify_one(); + let start = std::time::Instant::now(); + black_box( + import_version_switch(import, version, &mut shutdown).await, + ); + elapsed_time += start.elapsed(); + } + elapsed_time + }) + }); + }; - group.bench_function("import v2 - 500 * 5/5/5/15 - 100", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 100); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v2(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); + let mut group = c.benchmark_group("import"); - group.bench_function("import v2 - 500 * 5/5/5/15 - 500", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 500); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v2(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); + let durations = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let n = 50; - group.bench_function("import v3 - 500 * 5/5/5/15 - 10", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 10); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v3(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); + // V1 + bench_import(&mut group, Version::V1, n, durations, 5); + bench_import(&mut group, Version::V1, n, durations, 10); + bench_import(&mut group, Version::V1, n, durations, 50); - group.bench_function("import v3 - 500 * 5/5/5/15 - 100", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 100); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v3(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); + // V2 + bench_import(&mut group, Version::V2, n, durations, 5); + bench_import(&mut group, Version::V2, n, durations, 10); + bench_import(&mut group, Version::V2, n, durations, 50); - group.bench_function("import v3 - 500 * 5/5/5/15 - 500", |b| { - b.to_async(&rt).iter_custom(|iters| async move { - let mut elapsed_time = Duration::default(); - for _ in 0..iters { - let state = State::new(None, 500); - let shared_state = SharedMutex::new(state); - let input = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(15), - }; - let (import, _tx, mut shutdown) = - create_import(shared_state, input, 0, 500); - import.notify_one(); - let start = std::time::Instant::now(); - black_box(import.import_v3(&mut shutdown).await.unwrap()); - elapsed_time += start.elapsed(); - } - elapsed_time - }) - }); + // V3 + bench_import(&mut group, Version::V3, n, durations, 5); + bench_import(&mut group, Version::V3, n, durations, 10); + bench_import(&mut group, Version::V3, n, durations, 50); } -criterion_group!(benches, import_one); +criterion_group!(benches, bench_imports); criterion_main!(benches); From a35e94abb2cbbfa33a206a59693f6de6fc60bfe4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 11 Aug 2023 04:20:24 -0400 Subject: [PATCH 09/55] v4 --- benches/benches/import.rs | 104 +++--- benches/src/import.rs | 74 +++++ benches/src/import/count.rs | 30 ++ .../import/pressure_block_importer_port.rs | 18 +- benches/src/import/pressure_consensus_port.rs | 13 +- .../src/import/pressure_peer_to_peer_port.rs | 20 +- benches/src/import/test.rs | 108 ++++++ crates/services/sync/src/import.rs | 307 ++++++++++++++---- 8 files changed, 532 insertions(+), 142 deletions(-) create mode 100644 benches/src/import/count.rs create mode 100644 benches/src/import/test.rs diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 3e38ab6fa28..ab6e4876a59 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -7,8 +7,12 @@ use criterion::{ Criterion, }; use fuel_core_benches::import::{ + create_import, + Count, + Durations, PressureBlockImporterPort, PressureConsensusPort, + PressureImport, PressurePeerToPeerPort, }; use fuel_core_services::{ @@ -38,52 +42,6 @@ use tokio::{ }, }; -type PressureImport = - Import; - -#[derive(Default, Clone, Copy)] -struct Durations { - headers: Duration, - consensus: Duration, - transactions: Duration, - executes: Duration, -} - -fn create_import( - shared_state: SharedMutex, - input: Durations, - max_get_header_requests: usize, - max_get_txns_requests: usize, -) -> ( - PressureImport, - Sender, - StateWatcher, -) { - let shared_notify = Arc::new(Notify::new()); - let params = Config { - max_get_header_requests, - max_get_txns_requests, - }; - let p2p = Arc::new(PressurePeerToPeerPort::new([ - input.headers, - input.transactions, - ])); - let executor = Arc::new(PressureBlockImporterPort::new(input.executes)); - let consensus = Arc::new(PressureConsensusPort::new(input.consensus)); - - let (tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); - let watcher = shutdown.into(); - let import = Import::new( - shared_state, - shared_notify, - params, - p2p, - executor, - consensus, - ); - (import, tx, watcher) -} - #[derive(Clone, Copy)] enum Version { V1, @@ -113,37 +71,49 @@ async fn import_version_switch( }; } +fn name(version: Version, n: usize, durations: Durations, buffer_size: usize) -> String { + format!( + "import {version} - {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", + version = version, + n = n, + d_h = durations.headers.as_millis(), + d_c = durations.consensus.as_millis(), + d_t = durations.transactions.as_millis(), + d_e = durations.executes.as_millis(), + sz = buffer_size + ) +} + fn bench_imports(c: &mut Criterion) { let bench_import = |group: &mut BenchmarkGroup, version: Version, - n: u32, + n: usize, durations: Durations, buffer_size: usize| { - let name = format!( - "import {version} - {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", - version = version, - n = n, - d_h = durations.headers.as_millis(), - d_c = durations.consensus.as_millis(), - d_t = durations.transactions.as_millis(), - d_e = durations.executes.as_millis(), - sz = buffer_size - ); + let name = name(version, n, durations, buffer_size); group.bench_function(name, move |b| { let rt = Runtime::new().unwrap(); b.to_async(&rt).iter_custom(|iters| async move { let mut elapsed_time = Duration::default(); for _ in 0..iters { - let state = State::new(None, n); + let shared_count = SharedMutex::new(Count::default()); + let state = State::new(None, n as u32); let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = - create_import(shared_state, durations, buffer_size, buffer_size); + let (import, _tx, mut shutdown) = create_import( + shared_count.clone(), + shared_state, + durations, + buffer_size, + buffer_size, + ); import.notify_one(); let start = std::time::Instant::now(); black_box( import_version_switch(import, version, &mut shutdown).await, ); elapsed_time += start.elapsed(); + shared_count.apply(|count| assert_eq!(count.transactions, n + 1)); + // shared_count.apply(|count| println!("COUNTS: {:?}", count)); } elapsed_time }) @@ -158,21 +128,21 @@ fn bench_imports(c: &mut Criterion) { transactions: Duration::from_millis(5), executes: Duration::from_millis(10), }; - let n = 50; + let n = 50usize; // V1 - bench_import(&mut group, Version::V1, n, durations, 5); - bench_import(&mut group, Version::V1, n, durations, 10); + // bench_import(&mut group, Version::V1, n, durations, 5); + // bench_import(&mut group, Version::V1, n, durations, 10); bench_import(&mut group, Version::V1, n, durations, 50); // V2 - bench_import(&mut group, Version::V2, n, durations, 5); - bench_import(&mut group, Version::V2, n, durations, 10); + // bench_import(&mut group, Version::V2, n, durations, 5); + // bench_import(&mut group, Version::V2, n, durations, 10); bench_import(&mut group, Version::V2, n, durations, 50); // V3 - bench_import(&mut group, Version::V3, n, durations, 5); - bench_import(&mut group, Version::V3, n, durations, 10); + // bench_import(&mut group, Version::V3, n, durations, 5); + // bench_import(&mut group, Version::V3, n, durations, 10); bench_import(&mut group, Version::V3, n, durations, 50); } diff --git a/benches/src/import.rs b/benches/src/import.rs index 7481639537d..39d90ee2ebc 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -1,7 +1,81 @@ +mod count; mod pressure_block_importer_port; mod pressure_consensus_port; mod pressure_peer_to_peer_port; +mod test; +pub use count::Count; +use fuel_core_services::{ + SharedMutex, + StateWatcher, +}; pub use pressure_block_importer_port::PressureBlockImporterPort; pub use pressure_consensus_port::PressureConsensusPort; pub use pressure_peer_to_peer_port::PressurePeerToPeerPort; +use std::{ + sync::Arc, + time::Duration, +}; +use tokio::sync::{ + watch::Sender, + Notify, +}; + +use fuel_core_sync::{ + import::Import, + state::State, + Config, +}; + +pub type PressureImport = + Import; + +#[derive(Default, Clone, Copy)] +pub struct Durations { + pub headers: Duration, + pub consensus: Duration, + pub transactions: Duration, + pub executes: Duration, +} + +pub fn create_import( + shared_count: SharedMutex, + shared_state: SharedMutex, + input: Durations, + max_get_header_requests: usize, + max_get_txns_requests: usize, +) -> ( + PressureImport, + Sender, + StateWatcher, +) { + let shared_notify = Arc::new(Notify::new()); + let params = Config { + max_get_header_requests, + max_get_txns_requests, + }; + let p2p = Arc::new(PressurePeerToPeerPort::new( + [input.headers, input.transactions], + shared_count.clone(), + )); + let executor = Arc::new(PressureBlockImporterPort::new( + input.executes, + shared_count.clone(), + )); + let consensus = Arc::new(PressureConsensusPort::new( + input.consensus, + shared_count.clone(), + )); + + let (tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let watcher = shutdown.into(); + let import = Import::new( + shared_state, + shared_notify, + params, + p2p, + executor, + consensus, + ); + (import, tx, watcher) +} diff --git a/benches/src/import/count.rs b/benches/src/import/count.rs new file mode 100644 index 00000000000..df695820d94 --- /dev/null +++ b/benches/src/import/count.rs @@ -0,0 +1,30 @@ +#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct Count { + pub headers: usize, + pub transactions: usize, + pub consensus: usize, + pub executes: usize, + pub blocks: usize, +} + +impl Count { + pub fn inc_headers(&mut self) { + self.headers += 1; + } + + pub fn inc_transactions(&mut self) { + self.transactions += 1; + } + + pub fn inc_consensus(&mut self) { + self.consensus += 1; + } + + pub fn inc_executes(&mut self) { + self.executes += 1; + } + + pub fn inc_blocks(&mut self) { + self.blocks += 1; + } +} diff --git a/benches/src/import/pressure_block_importer_port.rs b/benches/src/import/pressure_block_importer_port.rs index 92439f3a97f..81ee4a16ec5 100644 --- a/benches/src/import/pressure_block_importer_port.rs +++ b/benches/src/import/pressure_block_importer_port.rs @@ -1,4 +1,9 @@ -use fuel_core_services::stream::BoxStream; +use crate::import::Count; + +use fuel_core_services::{ + stream::BoxStream, + SharedMutex, +}; use fuel_core_sync::ports::{ BlockImporterPort, MockBlockImporterPort, @@ -9,13 +14,13 @@ use fuel_core_types::{ }; use std::time::Duration; -pub struct PressureBlockImporterPort(MockBlockImporterPort, Duration); +pub struct PressureBlockImporterPort(MockBlockImporterPort, Duration, SharedMutex); impl PressureBlockImporterPort { - pub fn new(delays: Duration) -> Self { + pub fn new(delays: Duration, count: SharedMutex) -> Self { let mut mock = MockBlockImporterPort::default(); mock.expect_execute_and_commit().returning(move |_| Ok(())); - Self(mock, delays) + Self(mock, delays, count) } fn service(&self) -> &impl BlockImporterPort { @@ -25,6 +30,10 @@ impl PressureBlockImporterPort { fn duration(&self) -> Duration { self.1 } + + fn count(&self) -> SharedMutex { + self.2.clone() + } } #[async_trait::async_trait] @@ -40,6 +49,7 @@ impl BlockImporterPort for PressureBlockImporterPort { }) .await .unwrap(); + self.count().apply(|count| count.inc_executes()); self.service().execute_and_commit(block).await } } diff --git a/benches/src/import/pressure_consensus_port.rs b/benches/src/import/pressure_consensus_port.rs index 796dff9e644..5076ce347b3 100644 --- a/benches/src/import/pressure_consensus_port.rs +++ b/benches/src/import/pressure_consensus_port.rs @@ -1,3 +1,5 @@ +use crate::import::Count; +use fuel_core_services::SharedMutex; use fuel_core_sync::ports::{ ConsensusPort, MockConsensusPort, @@ -8,14 +10,14 @@ use fuel_core_types::blockchain::{ }; use std::time::Duration; -pub struct PressureConsensusPort(MockConsensusPort, Duration); +pub struct PressureConsensusPort(MockConsensusPort, Duration, SharedMutex); impl PressureConsensusPort { - pub fn new(delays: Duration) -> Self { + pub fn new(delays: Duration, count: SharedMutex) -> Self { let mut mock = MockConsensusPort::default(); mock.expect_await_da_height().returning(|_| Ok(())); mock.expect_check_sealed_header().returning(|_| Ok(true)); - Self(mock, delays) + Self(mock, delays, count) } fn service(&self) -> &impl ConsensusPort { @@ -25,6 +27,10 @@ impl PressureConsensusPort { fn duration(&self) -> Duration { self.1 } + + fn count(&self) -> SharedMutex { + self.2.clone() + } } #[async_trait::async_trait] @@ -34,6 +40,7 @@ impl ConsensusPort for PressureConsensusPort { } async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { + self.count().apply(|count| count.inc_consensus()); tokio::time::sleep(self.duration()).await; self.service().await_da_height(da_height).await } diff --git a/benches/src/import/pressure_peer_to_peer_port.rs b/benches/src/import/pressure_peer_to_peer_port.rs index 2ca7d2f7c9a..65ae5cd1096 100644 --- a/benches/src/import/pressure_peer_to_peer_port.rs +++ b/benches/src/import/pressure_peer_to_peer_port.rs @@ -1,4 +1,9 @@ -use fuel_core_services::stream::BoxStream; +use crate::import::Count; + +use fuel_core_services::{ + stream::BoxStream, + SharedMutex, +}; use fuel_core_sync::ports::{ MockPeerToPeerPort, PeerToPeerPort, @@ -37,16 +42,16 @@ fn empty_header(h: BlockHeight) -> SourcePeer { } } -pub struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2]); +pub struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2], SharedMutex); impl PressurePeerToPeerPort { - pub fn new(delays: [Duration; 2]) -> Self { + pub fn new(delays: [Duration; 2], count: SharedMutex) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_header() .returning(|h| Ok(Some(empty_header(h)))); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); - Self(mock, delays) + Self(mock, delays, count) } fn service(&self) -> &impl PeerToPeerPort { @@ -56,6 +61,10 @@ impl PressurePeerToPeerPort { fn duration(&self, index: usize) -> Duration { self.1[index] } + + fn count(&self) -> SharedMutex { + self.2.clone() + } } #[async_trait::async_trait] @@ -68,8 +77,10 @@ impl PeerToPeerPort for PressurePeerToPeerPort { &self, height: BlockHeight, ) -> anyhow::Result>> { + self.count().apply(|count| count.inc_headers()); let timeout = self.duration(0); tokio::time::sleep(timeout).await; + self.count().apply(|count| count.inc_blocks()); self.service().get_sealed_block_header(height).await } @@ -79,6 +90,7 @@ impl PeerToPeerPort for PressurePeerToPeerPort { ) -> anyhow::Result>> { let timeout = self.duration(1); tokio::time::sleep(timeout).await; + self.count().apply(|count| count.inc_transactions()); self.service().get_transactions(block_id).await } } diff --git a/benches/src/import/test.rs b/benches/src/import/test.rs new file mode 100644 index 00000000000..0381a48417f --- /dev/null +++ b/benches/src/import/test.rs @@ -0,0 +1,108 @@ +use crate::import::{ + create_import, + Count, + Durations, +}; +use fuel_core_services::SharedMutex; +use fuel_core_sync::state::State; +use std::time::Duration; + +#[tokio::test(flavor = "multi_thread")] +async fn test_v1() { + let durations = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let n = 10usize; + let buffer_size = 10; + let state = State::new(None, n as u32); + let shared_count = SharedMutex::new(Count::default()); + let shared_state = SharedMutex::new(state); + let (import, _tx, mut shutdown) = create_import( + shared_count.clone(), + shared_state, + durations, + buffer_size, + buffer_size, + ); + import.notify_one(); + import.import(&mut shutdown).await.unwrap(); + println!("{:?}", shared_count); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_v2() { + let durations = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let n = 10usize; + let buffer_size = 10; + let state = State::new(None, n as u32); + let shared_count = SharedMutex::new(Count::default()); + let shared_state = SharedMutex::new(state); + let (import, _tx, mut shutdown) = create_import( + shared_count.clone(), + shared_state, + durations, + buffer_size, + buffer_size, + ); + import.notify_one(); + import.import_v2(&mut shutdown).await.unwrap(); + println!("{:?}", shared_count); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_v3() { + let durations = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let n = 10usize; + let buffer_size = 10; + let state = State::new(None, n as u32); + let shared_count = SharedMutex::new(Count::default()); + let shared_state = SharedMutex::new(state); + let (import, _tx, mut shutdown) = create_import( + shared_count.clone(), + shared_state, + durations, + buffer_size, + buffer_size, + ); + import.notify_one(); + import.import_v3(&mut shutdown).await.unwrap(); + println!("{:?}", shared_count); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_v4() { + let durations = Durations { + headers: Duration::from_millis(5), + consensus: Duration::from_millis(5), + transactions: Duration::from_millis(5), + executes: Duration::from_millis(10), + }; + let n = 10usize; + let buffer_size = 10; + let state = State::new(None, n as u32); + let shared_count = SharedMutex::new(Count::default()); + let shared_state = SharedMutex::new(state); + let (import, _tx, mut shutdown) = create_import( + shared_count.clone(), + shared_state, + durations, + buffer_size, + buffer_size, + ); + import.notify_one(); + import.import_v4(&mut shutdown).await.unwrap(); + println!("{:?}", shared_count); +} diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 9309e1b9680..cae94f99881 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -24,6 +24,7 @@ use fuel_core_types::{ services::p2p::SourcePeer, }; use futures::{ + future::poll_fn, stream::{ self, StreamExt, @@ -31,8 +32,14 @@ use futures::{ FutureExt, Stream, }; -use std::future::Future; -use tokio::sync::Notify; +use std::{ + future::Future, + task::Poll, +}; +use tokio::sync::{ + mpsc, + Notify, +}; use tracing::Instrument; use crate::{ @@ -144,6 +151,13 @@ where Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } + /// Import + pub async fn import_v4(&self, shutdown: &mut StateWatcher) -> anyhow::Result { + self.import_inner(shutdown, 4).await?; + + Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) + } + async fn import_inner( &self, shutdown: &StateWatcher, @@ -156,6 +170,7 @@ where 1 => self.launch_stream(range.clone(), shutdown).await, 2 => self.launch_stream_v2(range.clone(), shutdown).await, 3 => self.launch_stream_v3(range.clone(), shutdown).await, + 4 => self.launch_stream_v4(range.clone(), shutdown).await, _ => panic!("INVALID"), }; @@ -194,7 +209,11 @@ where .. } = &self; // Request up to `max_get_header_requests` headers from the network. - get_header_range_buffered(range.clone(), params, p2p.clone()) + get_header_range(range, p2p.clone()) + .buffered(params.max_get_header_requests) + // Continue the stream unless an error or none occurs. + .into_scan_none_or_err() + .scan_none_or_err() .map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); @@ -384,81 +403,241 @@ where consensus, .. } = &self; - get_header_range(range.clone(), p2p.clone()) - .map({ + + let p2p_ = p2p.clone(); + stream::iter(range) + .map(move |height| { + let p2p = p2p_.clone(); + let height: BlockHeight = height.into(); + async move { + let r = + p2p.get_sealed_block_header(height) + .await? + .and_then(|header| { + validate_header_height(height, &header.data) + .then_some(header) + }); + Ok(r) + } + }) + .map(move |result| { let p2p = p2p.clone(); let consensus_port = consensus.clone(); - move |result| { + async move { let p2p = p2p.clone(); let consensus_port = consensus_port.clone(); - async move { - let header = match result.await { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } + let header = match result.await { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - get_transactions_on_block(p2p.as_ref(), block_id, header).await + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) } + + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + let block = + get_transactions_on_block(p2p.as_ref(), block_id, header).await?; + Ok(block) } }) .buffered(params.max_get_txns_requests) - .into_scan_none_or_err() - .scan_none_or_err() .take_until({ let mut s = shutdown.clone(); async move { let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); } }) - .then({ + .then(move |block| { let state = state.clone(); let executor = executor.clone(); - move |block| { - { - let state = state.clone(); - let executor = executor.clone(); - async move { - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() + async move { + let state = state.clone(); + let executor = executor.clone(); + let block = match block { + Ok(Some(b)) => b, + Ok(None) => return Ok(()), + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await?; + Ok(()) } }) - .into_scan_err() - .scan_err() .fold((0usize, Ok(())), |(count, err), result| async move { match result { Ok(_) => (count + 1, err), Err(e) => (count, Err(e)), } }) - .in_current_span() .await } + + async fn launch_stream_v4( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, + ) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + + let n = *range.end() as usize; + let count = SharedMutex::new(0); + let (header_sender, mut header_receiver) = + mpsc::channel::>( + params.max_get_header_requests, + ); + let (block_sender, mut block_receiver) = + mpsc::channel::(params.max_get_header_requests); + let (execute_sender, mut execute_receiver) = + mpsc::channel::>(params.max_get_header_requests); + let stop = async { + let mut s = shutdown.clone(); + let _ = s.while_started().await; + } + .shared(); + let complete = poll_fn(|_cx| { + let i = count.apply(|count| *count) as usize; + let poll = if i < n { + Poll::Pending + } else { + Poll::Ready(()) + }; + poll + }) + .shared(); + + range + .map(|i| { + let height: BlockHeight = i.into(); + height + }) + .for_each(|height| { + tokio::spawn(download_header(p2p.clone(), height, header_sender.clone())); + }); + + let mut results = vec![]; + loop { + tokio::select! { + header = header_receiver.recv() => { + if let Some(header) = header { + tokio::spawn(download_block(p2p.clone(), consensus.clone(), header, block_sender.clone())); + } + } + + block = block_receiver.recv() => { + if let Some(block) = block { + tokio::spawn(execute_block(executor.clone(), state.clone(), block, execute_sender.clone())); + } + } + + execute = execute_receiver.recv() => { + if let Some(execute) = execute { + results.push(execute); + count.apply(|count| *count += 1); + } + } + + _ = complete.clone() => { break; } + _ = stop.clone() => { break; } + } + } + + let i = count.apply(|count| *count) as usize; + let err = results.into_iter().collect::, _>>().err(); + match err { + Some(err) => (i, Err(err)), + None => (i, Ok(())), + } + } +} + +async fn download_header

( + p2p: Arc

, + block_height: BlockHeight, + sender: mpsc::Sender>, +) -> anyhow::Result<()> +where + P: PeerToPeerPort + Send + Sync + 'static, +{ + let p2p = p2p.clone(); + let block_header = + p2p.get_sealed_block_header(block_height) + .await? + .and_then(|header| { + validate_header_height(block_height, &header.data).then_some(header) + }); + if let Some(block_header) = block_header { + sender.send(block_header).await?; + } + Ok(()) +} + +async fn download_block( + p2p: Arc

, + consensus: Arc, + header: SourcePeer, + sender: mpsc::Sender, +) -> anyhow::Result<()> +where + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, +{ + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + if !consensus + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(()) + } + + consensus.await_da_height(&header.entity.da_height).await?; + let block = get_transactions_on_block(p2p.as_ref(), block_id, header).await?; + if let Some(block) = block { + sender.send(block).await? + } + + Ok(()) +} + +async fn execute_block( + executor: Arc, + state: SharedMutex, + block: SealedBlock, + sender: mpsc::Sender>, +) -> anyhow::Result<()> +where + E: BlockImporterPort + Send + Sync + 'static, +{ + let state = state.clone(); + let result = execute_and_commit(executor.as_ref(), &state, block).await; + sender.send(result).await?; + Ok(()) } /// Waits for a notify or shutdown signal. @@ -479,19 +658,19 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } -/// Returns a stream of headers processing concurrently up to `max_get_header_requests`. -/// The headers are returned in order. -fn get_header_range_buffered( - range: RangeInclusive, - params: &Config, - p2p: Arc, -) -> impl Stream>> { - get_header_range(range, p2p) - .buffered(params.max_get_header_requests) - // Continue the stream unless an error or none occurs. - .into_scan_none_or_err() - .scan_none_or_err() -} +// /// Returns a stream of headers processing concurrently up to `max_get_header_requests`. +// /// The headers are returned in order. +// fn get_header_range_buffered( +// range: RangeInclusive, +// params: &Config, +// p2p: Arc, +// ) -> impl Stream>> { +// get_header_range(range, p2p) +// .buffered(params.max_get_header_requests) +// // Continue the stream unless an error or none occurs. +// .into_scan_none_or_err() +// .scan_none_or_err() +// } #[tracing::instrument(skip(p2p))] /// Returns a stream of network requests for headers. From 7587ce29a41239df545b9d8462dc44000ba6c9aa Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 11 Aug 2023 11:51:13 -0400 Subject: [PATCH 10/55] Update import.rs --- crates/services/sync/src/import.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index cae94f99881..7d805dfa897 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -499,7 +499,7 @@ where .. } = &self; - let n = *range.end() as usize; + let end = *range.end() as usize; let count = SharedMutex::new(0); let (header_sender, mut header_receiver) = mpsc::channel::>( @@ -516,7 +516,7 @@ where .shared(); let complete = poll_fn(|_cx| { let i = count.apply(|count| *count) as usize; - let poll = if i < n { + let poll = if i < end + 1 { Poll::Pending } else { Poll::Ready(()) From a5a3b826eb275ce3ef343e03ba080a9323616fd2 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 11 Aug 2023 16:12:27 -0400 Subject: [PATCH 11/55] Update import.rs --- benches/benches/import.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index ab6e4876a59..275a494678f 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -47,6 +47,7 @@ enum Version { V1, V2, V3, + V4, } impl Display for Version { @@ -55,6 +56,7 @@ impl Display for Version { Version::V1 => write!(f, "v1"), Version::V2 => write!(f, "v2"), Version::V3 => write!(f, "v3"), + Version::V4 => write!(f, "v4"), } } } @@ -68,6 +70,7 @@ async fn import_version_switch( Version::V1 => import.import(shutdown).await.unwrap(), Version::V2 => import.import_v2(shutdown).await.unwrap(), Version::V3 => import.import_v3(shutdown).await.unwrap(), + Version::V4 => import.import_v4(shutdown).await.unwrap(), }; } @@ -144,6 +147,9 @@ fn bench_imports(c: &mut Criterion) { // bench_import(&mut group, Version::V3, n, durations, 5); // bench_import(&mut group, Version::V3, n, durations, 10); bench_import(&mut group, Version::V3, n, durations, 50); + + // V4 + bench_import(&mut group, Version::V4, n, durations, 50); } criterion_group!(benches, bench_imports); From a9a0acb60682b2f41e73c7afc8a21214f9e54954 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 21 Aug 2023 23:52:12 -0400 Subject: [PATCH 12/55] Merge in progress --- Cargo.lock | 2 + crates/services/sync/src/import.rs | 499 ++++++++++++++--------------- 2 files changed, 251 insertions(+), 250 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7632e3280f..a7e01f5cef7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2653,6 +2653,8 @@ dependencies = [ name = "fuel-core-benches" version = "0.0.0" dependencies = [ + "anyhow", + "async-trait", "clap", "criterion", "ctrlc", diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 704f11026d6..b83b14558a9 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -35,7 +35,10 @@ use std::{ future::Future, task::Poll, }; -use tokio::sync::{mpsc, Notify}; +use tokio::sync::{ + mpsc, + Notify, +}; use tracing::Instrument; use crate::{ @@ -263,28 +266,28 @@ fn range_chunks( }) } - #[tracing::instrument(skip(self, shutdown))] - /// Launches a stream to import and execute a range of blocks. - /// - /// This stream will process all blocks up to the given range or - /// an error occurs. - /// If an error occurs, the preceding blocks still be processed - /// and the error will be returned. - async fn launch_stream( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; +#[tracing::instrument(skip(self, shutdown))] +/// Launches a stream to import and execute a range of blocks. +/// +/// This stream will process all blocks up to the given range or +/// an error occurs. +/// If an error occurs, the preceding blocks still be processed +/// and the error will be returned. +async fn launch_stream( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, +) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; - get_headers_buffered(range.clone(), params, p2p.clone()) + get_headers_buffered(range.clone(), params, p2p.clone()) .map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); @@ -340,135 +343,29 @@ fn range_chunks( }) .in_current_span() .await - } - - async fn launch_stream_v2( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - get_header_range(range.clone(), p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - let header = match result.await { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } - - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - - get_transactions_on_block(p2p.as_ref(), block_id, header).await - }) - .then(|task| async { task.map_err(|e| anyhow!(e))? }) - } - }) - .buffered(params.max_get_txns_requests) - .into_scan_none_or_err() - .scan_none_or_err() - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) - .then({ - let state = state.clone(); - let executor = executor.clone(); - move |block| { - { - let state = state.clone(); - let executor = executor.clone(); - async move { - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - } - }) - .into_scan_err() - .scan_err() - .fold((0usize, Ok(())), |(count, err), result| async move { - match result { - Ok(_) => (count + 1, err), - Err(e) => (count, Err(e)), - } - }) - .in_current_span() - .await - } +} - async fn launch_stream_v3( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - - let p2p_ = p2p.clone(); - stream::iter(range) - .map(move |height| { - let p2p = p2p_.clone(); - let height: BlockHeight = height.into(); - async move { - let r = - p2p.get_sealed_block_header(height) - .await? - .and_then(|header| { - validate_header_height(height, &header.data) - .then_some(header) - }); - Ok(r) - } - }) - .map(move |result| { +async fn launch_stream_v2( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, +) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + get_header_range(range.clone(), p2p.clone()) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - async move { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { let header = match result.await { Ok(Some(h)) => h, Ok(None) => return Ok(None), @@ -492,125 +389,227 @@ fn range_chunks( consensus_port .await_da_height(&header.entity.da_height) .await?; - let block = - get_transactions_on_block(p2p.as_ref(), block_id, header).await?; - Ok(block) - } - }) - .buffered(params.max_get_txns_requests) - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - } - }) - .then(move |block| { - let state = state.clone(); - let executor = executor.clone(); - async move { + + get_transactions_on_block(p2p.as_ref(), block_id, header).await + }) + .then(|task| async { task.map_err(|e| anyhow!(e))? }) + } + }) + .buffered(params.max_get_txns_requests) + .into_scan_none_or_err() + .scan_none_or_err() + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { + { let state = state.clone(); let executor = executor.clone(); - let block = match block { - Ok(Some(b)) => b, - Ok(None) => return Ok(()), - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await?; - Ok(()) + async move { + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await + } } - }) - .fold((0usize, Ok(())), |(count, err), result| async move { - match result { - Ok(_) => (count + 1, err), - Err(e) => (count, Err(e)), + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + } + }) + .into_scan_err() + .scan_err() + .fold((0usize, Ok(())), |(count, err), result| async move { + match result { + Ok(_) => (count + 1, err), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await +} + +async fn launch_stream_v3( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, +) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + + let p2p_ = p2p.clone(); + stream::iter(range) + .map(move |height| { + let p2p = p2p_.clone(); + let height: BlockHeight = height.into(); + async move { + let r = p2p + .get_sealed_block_header(height) + .await? + .and_then(|header| { + validate_header_height(height, &header.data).then_some(header) + }); + Ok(r) + } + }) + .map(move |result| { + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + async move { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + let header = match result.await { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) } - }) - .await - } - async fn launch_stream_v4( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - - let end = *range.end() as usize; - let count = SharedMutex::new(0); - let (header_sender, mut header_receiver) = - mpsc::channel::>( - params.max_get_header_requests, - ); - let (block_sender, mut block_receiver) = - mpsc::channel::(params.max_get_header_requests); - let (execute_sender, mut execute_receiver) = - mpsc::channel::>(params.max_get_header_requests); - let stop = async { + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + let block = + get_transactions_on_block(p2p.as_ref(), block_id, header).await?; + Ok(block) + } + }) + .buffered(params.max_get_txns_requests) + .take_until({ let mut s = shutdown.clone(); - let _ = s.while_started().await; - } - .shared(); - let complete = poll_fn(|_cx| { - let i = count.apply(|count| *count) as usize; - let poll = if i < end + 1 { - Poll::Pending - } else { - Poll::Ready(()) - }; - poll + async move { + let _ = s.while_started().await; + } }) - .shared(); + .then(move |block| { + let state = state.clone(); + let executor = executor.clone(); + async move { + let state = state.clone(); + let executor = executor.clone(); + let block = match block { + Ok(Some(b)) => b, + Ok(None) => return Ok(()), + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await?; + Ok(()) + } + }) + .fold((0usize, Ok(())), |(count, err), result| async move { + match result { + Ok(_) => (count + 1, err), + Err(e) => (count, Err(e)), + } + }) + .await +} - range - .map(|i| { - let height: BlockHeight = i.into(); - height - }) - .for_each(|height| { - tokio::spawn(download_header(p2p.clone(), height, header_sender.clone())); - }); +async fn launch_stream_v4( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, +) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + + let end = *range.end() as usize; + let count = SharedMutex::new(0); + let (header_sender, mut header_receiver) = + mpsc::channel::>(params.max_get_header_requests); + let (block_sender, mut block_receiver) = + mpsc::channel::(params.max_get_header_requests); + let (execute_sender, mut execute_receiver) = + mpsc::channel::>(params.max_get_header_requests); + let stop = async { + let mut s = shutdown.clone(); + let _ = s.while_started().await; + } + .shared(); + let complete = poll_fn(|_cx| { + let i = count.apply(|count| *count) as usize; + let poll = if i < end + 1 { + Poll::Pending + } else { + Poll::Ready(()) + }; + poll + }) + .shared(); - let mut results = vec![]; - loop { - tokio::select! { - header = header_receiver.recv() => { - if let Some(header) = header { - tokio::spawn(download_block(p2p.clone(), consensus.clone(), header, block_sender.clone())); - } + range + .map(|i| { + let height: BlockHeight = i.into(); + height + }) + .for_each(|height| { + tokio::spawn(download_header(p2p.clone(), height, header_sender.clone())); + }); + + let mut results = vec![]; + loop { + tokio::select! { + header = header_receiver.recv() => { + if let Some(header) = header { + tokio::spawn(download_block(p2p.clone(), consensus.clone(), header, block_sender.clone())); } + } - block = block_receiver.recv() => { - if let Some(block) = block { - tokio::spawn(execute_block(executor.clone(), state.clone(), block, execute_sender.clone())); - } + block = block_receiver.recv() => { + if let Some(block) = block { + tokio::spawn(execute_block(executor.clone(), state.clone(), block, execute_sender.clone())); } + } - execute = execute_receiver.recv() => { - if let Some(execute) = execute { - results.push(execute); - count.apply(|count| *count += 1); - } + execute = execute_receiver.recv() => { + if let Some(execute) = execute { + results.push(execute); + count.apply(|count| *count += 1); } - - _ = complete.clone() => { break; } - _ = stop.clone() => { break; } } - } - let i = count.apply(|count| *count) as usize; - let err = results.into_iter().collect::, _>>().err(); - match err { - Some(err) => (i, Err(err)), - None => (i, Ok(())), + _ = complete.clone() => { break; } + _ = stop.clone() => { break; } } } + + let i = count.apply(|count| *count) as usize; + let err = results.into_iter().collect::, _>>().err(); + match err { + Some(err) => (i, Err(err)), + None => (i, Ok(())), + } } async fn download_header

( From 353faf28a216e783092080e6d098b17cc6ee10fb Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 13:07:47 -0400 Subject: [PATCH 13/55] Remove deprecated import version --- crates/services/sync/src/import.rs | 246 ----------------------------- 1 file changed, 246 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index b83b14558a9..82f5a0b1709 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -436,252 +436,6 @@ async fn launch_stream_v2( .await } -async fn launch_stream_v3( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, -) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - - let p2p_ = p2p.clone(); - stream::iter(range) - .map(move |height| { - let p2p = p2p_.clone(); - let height: BlockHeight = height.into(); - async move { - let r = p2p - .get_sealed_block_header(height) - .await? - .and_then(|header| { - validate_header_height(height, &header.data).then_some(header) - }); - Ok(r) - } - }) - .map(move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - async move { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - let header = match result.await { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } - - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - let block = - get_transactions_on_block(p2p.as_ref(), block_id, header).await?; - Ok(block) - } - }) - .buffered(params.max_get_txns_requests) - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - } - }) - .then(move |block| { - let state = state.clone(); - let executor = executor.clone(); - async move { - let state = state.clone(); - let executor = executor.clone(); - let block = match block { - Ok(Some(b)) => b, - Ok(None) => return Ok(()), - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await?; - Ok(()) - } - }) - .fold((0usize, Ok(())), |(count, err), result| async move { - match result { - Ok(_) => (count + 1, err), - Err(e) => (count, Err(e)), - } - }) - .await -} - -async fn launch_stream_v4( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, -) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - - let end = *range.end() as usize; - let count = SharedMutex::new(0); - let (header_sender, mut header_receiver) = - mpsc::channel::>(params.max_get_header_requests); - let (block_sender, mut block_receiver) = - mpsc::channel::(params.max_get_header_requests); - let (execute_sender, mut execute_receiver) = - mpsc::channel::>(params.max_get_header_requests); - let stop = async { - let mut s = shutdown.clone(); - let _ = s.while_started().await; - } - .shared(); - let complete = poll_fn(|_cx| { - let i = count.apply(|count| *count) as usize; - let poll = if i < end + 1 { - Poll::Pending - } else { - Poll::Ready(()) - }; - poll - }) - .shared(); - - range - .map(|i| { - let height: BlockHeight = i.into(); - height - }) - .for_each(|height| { - tokio::spawn(download_header(p2p.clone(), height, header_sender.clone())); - }); - - let mut results = vec![]; - loop { - tokio::select! { - header = header_receiver.recv() => { - if let Some(header) = header { - tokio::spawn(download_block(p2p.clone(), consensus.clone(), header, block_sender.clone())); - } - } - - block = block_receiver.recv() => { - if let Some(block) = block { - tokio::spawn(execute_block(executor.clone(), state.clone(), block, execute_sender.clone())); - } - } - - execute = execute_receiver.recv() => { - if let Some(execute) = execute { - results.push(execute); - count.apply(|count| *count += 1); - } - } - - _ = complete.clone() => { break; } - _ = stop.clone() => { break; } - } - } - - let i = count.apply(|count| *count) as usize; - let err = results.into_iter().collect::, _>>().err(); - match err { - Some(err) => (i, Err(err)), - None => (i, Ok(())), - } -} - -async fn download_header

( - p2p: Arc

, - block_height: BlockHeight, - sender: mpsc::Sender>, -) -> anyhow::Result<()> -where - P: PeerToPeerPort + Send + Sync + 'static, -{ - let p2p = p2p.clone(); - let block_header = - p2p.get_sealed_block_header(block_height) - .await? - .and_then(|header| { - validate_header_height(block_height, &header.data).then_some(header) - }); - if let Some(block_header) = block_header { - sender.send(block_header).await?; - } - Ok(()) -} - -async fn download_block( - p2p: Arc

, - consensus: Arc, - header: SourcePeer, - sender: mpsc::Sender, -) -> anyhow::Result<()> -where - P: PeerToPeerPort + Send + Sync + 'static, - C: ConsensusPort + Send + Sync + 'static, -{ - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(()) - } - - consensus.await_da_height(&header.entity.da_height).await?; - let block = get_transactions_on_block(p2p.as_ref(), block_id, header).await?; - if let Some(block) = block { - sender.send(block).await? - } - - Ok(()) -} - -async fn execute_block( - executor: Arc, - state: SharedMutex, - block: SealedBlock, - sender: mpsc::Sender>, -) -> anyhow::Result<()> -where - E: BlockImporterPort + Send + Sync + 'static, -{ - let state = state.clone(); - let result = execute_and_commit(executor.as_ref(), &state, block).await; - sender.send(result).await?; - Ok(()) -} - /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. async fn wait_for_notify_or_shutdown( From 3dfebe10dfa46709ad5e23cd1acbc01f6823998c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 13:09:23 -0400 Subject: [PATCH 14/55] Rearrange --- crates/services/sync/src/import.rs | 340 ++++++++++++++--------------- 1 file changed, 170 insertions(+), 170 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 82f5a0b1709..6b7e1e09093 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -193,6 +193,176 @@ where Ok(()) } + #[tracing::instrument(skip(self, shutdown))] + /// Launches a stream to import and execute a range of blocks. + /// + /// This stream will process all blocks up to the given range or + /// an error occurs. + /// If an error occurs, the preceding blocks still be processed + /// and the error will be returned. + async fn launch_stream( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, + ) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + + get_headers_buffered(range.clone(), params, p2p.clone()) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { + Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() + }) + // Request up to `max_get_txns_requests` transactions from the network. + .buffered(params.max_get_txns_requests) + // Continue the stream unless an error or none occurs. + // Note the error will be returned but the stream will close. + .into_scan_none_or_err() + .scan_none_or_err() + // Continue the stream until the shutdown signal is received. + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { + let state = state.clone(); + let executor = executor.clone(); + async move { + // Short circuit on error. + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await + } + } + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + }) + // Continue the stream unless an error occurs. + .into_scan_err() + .scan_err() + // Count the number of successfully executed blocks and + // find any errors. + // Fold the stream into a count and any errors. + .fold((0usize, Ok(())), |(count, res), result| async move { + match result { + Ok(_) => (count + 1, res), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await + } + + async fn launch_stream_v2( + &self, + range: RangeInclusive, + shutdown: &StateWatcher, + ) -> (usize, anyhow::Result<()>) { + let Self { + state, + params, + p2p, + executor, + consensus, + .. + } = &self; + get_header_range(range.clone(), p2p.clone()) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { + let header = match result.await { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) + } + + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + + get_transactions_on_block(p2p.as_ref(), block_id, header).await + }) + .then(|task| async { task.map_err(|e| anyhow!(e))? }) + } + }) + .buffered(params.max_get_txns_requests) + .into_scan_none_or_err() + .scan_none_or_err() + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { + { + let state = state.clone(); + let executor = executor.clone(); + async move { + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await + } + } + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + } + }) + .into_scan_err() + .scan_err() + .fold((0usize, Ok(())), |(count, err), result| async move { + match result { + Ok(_) => (count + 1, err), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await + } + async fn get_block_for_header( result: anyhow::Result>, p2p: Arc

, @@ -266,176 +436,6 @@ fn range_chunks( }) } -#[tracing::instrument(skip(self, shutdown))] -/// Launches a stream to import and execute a range of blocks. -/// -/// This stream will process all blocks up to the given range or -/// an error occurs. -/// If an error occurs, the preceding blocks still be processed -/// and the error will be returned. -async fn launch_stream( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, -) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - - get_headers_buffered(range.clone(), params, p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - }) - // Request up to `max_get_txns_requests` transactions from the network. - .buffered(params.max_get_txns_requests) - // Continue the stream unless an error or none occurs. - // Note the error will be returned but the stream will close. - .into_scan_none_or_err() - .scan_none_or_err() - // Continue the stream until the shutdown signal is received. - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) - .then({ - let state = state.clone(); - let executor = executor.clone(); - move |block| { - let state = state.clone(); - let executor = executor.clone(); - async move { - // Short circuit on error. - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - }) - // Continue the stream unless an error occurs. - .into_scan_err() - .scan_err() - // Count the number of successfully executed blocks and - // find any errors. - // Fold the stream into a count and any errors. - .fold((0usize, Ok(())), |(count, res), result| async move { - match result { - Ok(_) => (count + 1, res), - Err(e) => (count, Err(e)), - } - }) - .in_current_span() - .await -} - -async fn launch_stream_v2( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, -) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - get_header_range(range.clone(), p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - let header = match result.await { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } - - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - - get_transactions_on_block(p2p.as_ref(), block_id, header).await - }) - .then(|task| async { task.map_err(|e| anyhow!(e))? }) - } - }) - .buffered(params.max_get_txns_requests) - .into_scan_none_or_err() - .scan_none_or_err() - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) - .then({ - let state = state.clone(); - let executor = executor.clone(); - move |block| { - { - let state = state.clone(); - let executor = executor.clone(); - async move { - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - } - }) - .into_scan_err() - .scan_err() - .fold((0usize, Ok(())), |(count, err), result| async move { - match result { - Ok(_) => (count + 1, err), - Err(e) => (count, Err(e)), - } - }) - .in_current_span() - .await -} - /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. async fn wait_for_notify_or_shutdown( From ce9bd5fed1b9be379caef0804eb58cfc93dcb1da Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 20:08:05 -0400 Subject: [PATCH 15/55] Fix whitespace --- crates/services/sync/src/import.rs | 118 +++++++++++++---------------- 1 file changed, 52 insertions(+), 66 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 6b7e1e09093..62b09056a0e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -146,20 +146,6 @@ where Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - /// Import - pub async fn import_v3(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown, 3).await?; - - Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) - } - - /// Import - pub async fn import_v4(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown, 4).await?; - - Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) - } - async fn import_inner( &self, shutdown: &StateWatcher, @@ -215,61 +201,61 @@ where } = &self; get_headers_buffered(range.clone(), params, p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - }) - // Request up to `max_get_txns_requests` transactions from the network. - .buffered(params.max_get_txns_requests) - // Continue the stream unless an error or none occurs. - // Note the error will be returned but the stream will close. - .into_scan_none_or_err() - .scan_none_or_err() - // Continue the stream until the shutdown signal is received. - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) - .then({ + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |result| { + Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() + }) + // Request up to `max_get_txns_requests` transactions from the network. + .buffered(params.max_get_txns_requests) + // Continue the stream unless an error or none occurs. + // Note the error will be returned but the stream will close. + .into_scan_none_or_err() + .scan_none_or_err() + // Continue the stream until the shutdown signal is received. + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); + } + }) + .then({ + let state = state.clone(); + let executor = executor.clone(); + move |block| { let state = state.clone(); let executor = executor.clone(); - move |block| { - let state = state.clone(); - let executor = executor.clone(); - async move { - // Short circuit on error. - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - }) - // Continue the stream unless an error occurs. - .into_scan_err() - .scan_err() - // Count the number of successfully executed blocks and - // find any errors. - // Fold the stream into a count and any errors. - .fold((0usize, Ok(())), |(count, res), result| async move { - match result { - Ok(_) => (count + 1, res), - Err(e) => (count, Err(e)), + async move { + // Short circuit on error. + let block = match block { + Ok(b) => b, + Err(e) => return Err(e), + }; + execute_and_commit(executor.as_ref(), &state, block).await } - }) - .in_current_span() - .await + } + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + }) + // Continue the stream unless an error occurs. + .into_scan_err() + .scan_err() + // Count the number of successfully executed blocks and + // find any errors. + // Fold the stream into a count and any errors. + .fold((0usize, Ok(())), |(count, res), result| async move { + match result { + Ok(_) => (count + 1, res), + Err(e) => (count, Err(e)), + } + }) + .in_current_span() + .await } async fn launch_stream_v2( From 1ae5b9f4133ddb962978500bf38de043a2c9605c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 21:58:44 -0400 Subject: [PATCH 16/55] Remove unused version tests --- benches/Cargo.toml | 2 +- benches/benches/import.rs | 96 ++--------- benches/src/import.rs | 7 +- benches/src/import/count.rs | 4 + .../src/import/pressure_peer_to_peer_port.rs | 27 ++- benches/src/import/test.rs | 108 ------------ crates/services/sync/src/import.rs | 155 +++--------------- 7 files changed, 63 insertions(+), 336 deletions(-) delete mode 100644 benches/src/import/test.rs diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 57040e0616e..d14a425b53c 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -6,7 +6,7 @@ publish = false version = "0.0.0" [dependencies] -anyhow = { worksspace = true } +anyhow = { workspace = true } async-trait = { workspace = true } clap = { workspace = true, features = ["derive"] } criterion = { version = "0.5", features = ["html_reports", "async", "async_tokio"] } diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 275a494678f..b5c66a2bd7e 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -10,74 +10,23 @@ use fuel_core_benches::import::{ create_import, Count, Durations, - PressureBlockImporterPort, - PressureConsensusPort, PressureImport, - PressurePeerToPeerPort, }; use fuel_core_services::{ SharedMutex, StateWatcher, }; -use fuel_core_sync::{ - import::{ - Config, - Import, - }, - state::State, -}; -use std::{ - fmt::{ - Display, - Formatter, - }, - sync::Arc, - time::Duration, -}; -use tokio::{ - runtime::Runtime, - sync::{ - watch::Sender, - Notify, - }, -}; +use fuel_core_sync::state::State; +use std::time::Duration; +use tokio::runtime::Runtime; -#[derive(Clone, Copy)] -enum Version { - V1, - V2, - V3, - V4, +async fn execute_import(import: PressureImport, shutdown: &mut StateWatcher) { + import.import(shutdown).await.unwrap(); } -impl Display for Version { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - Version::V1 => write!(f, "v1"), - Version::V2 => write!(f, "v2"), - Version::V3 => write!(f, "v3"), - Version::V4 => write!(f, "v4"), - } - } -} - -async fn import_version_switch( - import: PressureImport, - version: Version, - shutdown: &mut StateWatcher, -) { - match version { - Version::V1 => import.import(shutdown).await.unwrap(), - Version::V2 => import.import_v2(shutdown).await.unwrap(), - Version::V3 => import.import_v3(shutdown).await.unwrap(), - Version::V4 => import.import_v4(shutdown).await.unwrap(), - }; -} - -fn name(version: Version, n: usize, durations: Durations, buffer_size: usize) -> String { +fn name(n: usize, durations: Durations, buffer_size: usize) -> String { format!( - "import {version} - {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", - version = version, + "import {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", n = n, d_h = durations.headers.as_millis(), d_c = durations.consensus.as_millis(), @@ -89,11 +38,11 @@ fn name(version: Version, n: usize, durations: Durations, buffer_size: usize) -> fn bench_imports(c: &mut Criterion) { let bench_import = |group: &mut BenchmarkGroup, - version: Version, n: usize, durations: Durations, + batch_size: u32, buffer_size: usize| { - let name = name(version, n, durations, buffer_size); + let name = name(n, durations, buffer_size); group.bench_function(name, move |b| { let rt = Runtime::new().unwrap(); b.to_async(&rt).iter_custom(|iters| async move { @@ -106,17 +55,14 @@ fn bench_imports(c: &mut Criterion) { shared_count.clone(), shared_state, durations, + batch_size, buffer_size, buffer_size, ); import.notify_one(); let start = std::time::Instant::now(); - black_box( - import_version_switch(import, version, &mut shutdown).await, - ); + black_box(execute_import(import, &mut shutdown).await); elapsed_time += start.elapsed(); - shared_count.apply(|count| assert_eq!(count.transactions, n + 1)); - // shared_count.apply(|count| println!("COUNTS: {:?}", count)); } elapsed_time }) @@ -131,25 +77,9 @@ fn bench_imports(c: &mut Criterion) { transactions: Duration::from_millis(5), executes: Duration::from_millis(10), }; + let batch_size = 10; let n = 50usize; - - // V1 - // bench_import(&mut group, Version::V1, n, durations, 5); - // bench_import(&mut group, Version::V1, n, durations, 10); - bench_import(&mut group, Version::V1, n, durations, 50); - - // V2 - // bench_import(&mut group, Version::V2, n, durations, 5); - // bench_import(&mut group, Version::V2, n, durations, 10); - bench_import(&mut group, Version::V2, n, durations, 50); - - // V3 - // bench_import(&mut group, Version::V3, n, durations, 5); - // bench_import(&mut group, Version::V3, n, durations, 10); - bench_import(&mut group, Version::V3, n, durations, 50); - - // V4 - bench_import(&mut group, Version::V4, n, durations, 50); + bench_import(&mut group, n, durations, batch_size, 50); } criterion_group!(benches, bench_imports); diff --git a/benches/src/import.rs b/benches/src/import.rs index 39d90ee2ebc..76b4e37c7d0 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -2,7 +2,6 @@ mod count; mod pressure_block_importer_port; mod pressure_consensus_port; mod pressure_peer_to_peer_port; -mod test; pub use count::Count; use fuel_core_services::{ @@ -42,7 +41,8 @@ pub fn create_import( shared_count: SharedMutex, shared_state: SharedMutex, input: Durations, - max_get_header_requests: usize, + header_batch_size: u32, + max_header_batch_requests: usize, max_get_txns_requests: usize, ) -> ( PressureImport, @@ -51,7 +51,8 @@ pub fn create_import( ) { let shared_notify = Arc::new(Notify::new()); let params = Config { - max_get_header_requests, + max_header_batch_requests, + header_batch_size, max_get_txns_requests, }; let p2p = Arc::new(PressurePeerToPeerPort::new( diff --git a/benches/src/import/count.rs b/benches/src/import/count.rs index df695820d94..3300a39529a 100644 --- a/benches/src/import/count.rs +++ b/benches/src/import/count.rs @@ -12,6 +12,10 @@ impl Count { self.headers += 1; } + pub fn dec_headers(&mut self) { + self.headers -= 1; + } + pub fn inc_transactions(&mut self) { self.transactions += 1; } diff --git a/benches/src/import/pressure_peer_to_peer_port.rs b/benches/src/import/pressure_peer_to_peer_port.rs index 65ae5cd1096..a37815a8f08 100644 --- a/benches/src/import/pressure_peer_to_peer_port.rs +++ b/benches/src/import/pressure_peer_to_peer_port.rs @@ -1,4 +1,5 @@ use crate::import::Count; +use std::ops::Range; use fuel_core_services::{ stream::BoxStream, @@ -47,8 +48,15 @@ pub struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2], SharedMutex impl PressurePeerToPeerPort { pub fn new(delays: [Duration; 2], count: SharedMutex) -> Self { let mut mock = MockPeerToPeerPort::default(); - mock.expect_get_sealed_block_header() - .returning(|h| Ok(Some(empty_header(h)))); + mock.expect_get_sealed_block_headers().returning(|range| { + Ok(Some( + range + .clone() + .map(BlockHeight::from) + .map(empty_header) + .collect(), + )) + }); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); Self(mock, delays, count) @@ -73,15 +81,20 @@ impl PeerToPeerPort for PressurePeerToPeerPort { self.service().height_stream() } - async fn get_sealed_block_header( + async fn get_sealed_block_headers( &self, - height: BlockHeight, - ) -> anyhow::Result>> { + block_height_range: Range, + ) -> anyhow::Result>>> { self.count().apply(|count| count.inc_headers()); let timeout = self.duration(0); + self.count().apply(|c| c.dec_headers()); tokio::time::sleep(timeout).await; - self.count().apply(|count| count.inc_blocks()); - self.service().get_sealed_block_header(height).await + for _ in block_height_range.clone() { + self.count().apply(|c| c.inc_blocks()); + } + self.service() + .get_sealed_block_headers(block_height_range) + .await } async fn get_transactions( diff --git a/benches/src/import/test.rs b/benches/src/import/test.rs deleted file mode 100644 index 0381a48417f..00000000000 --- a/benches/src/import/test.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::import::{ - create_import, - Count, - Durations, -}; -use fuel_core_services::SharedMutex; -use fuel_core_sync::state::State; -use std::time::Duration; - -#[tokio::test(flavor = "multi_thread")] -async fn test_v1() { - let durations = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), - }; - let n = 10usize; - let buffer_size = 10; - let state = State::new(None, n as u32); - let shared_count = SharedMutex::new(Count::default()); - let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = create_import( - shared_count.clone(), - shared_state, - durations, - buffer_size, - buffer_size, - ); - import.notify_one(); - import.import(&mut shutdown).await.unwrap(); - println!("{:?}", shared_count); -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_v2() { - let durations = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), - }; - let n = 10usize; - let buffer_size = 10; - let state = State::new(None, n as u32); - let shared_count = SharedMutex::new(Count::default()); - let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = create_import( - shared_count.clone(), - shared_state, - durations, - buffer_size, - buffer_size, - ); - import.notify_one(); - import.import_v2(&mut shutdown).await.unwrap(); - println!("{:?}", shared_count); -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_v3() { - let durations = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), - }; - let n = 10usize; - let buffer_size = 10; - let state = State::new(None, n as u32); - let shared_count = SharedMutex::new(Count::default()); - let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = create_import( - shared_count.clone(), - shared_state, - durations, - buffer_size, - buffer_size, - ); - import.notify_one(); - import.import_v3(&mut shutdown).await.unwrap(); - println!("{:?}", shared_count); -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_v4() { - let durations = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), - }; - let n = 10usize; - let buffer_size = 10; - let state = State::new(None, n as u32); - let shared_count = SharedMutex::new(Count::default()); - let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = create_import( - shared_count.clone(), - shared_state, - durations, - buffer_size, - buffer_size, - ); - import.notify_one(); - import.import_v4(&mut shutdown).await.unwrap(); - println!("{:?}", shared_count); -} diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 62b09056a0e..582880c224a 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -23,22 +23,11 @@ use fuel_core_types::{ services::p2p::SourcePeer, }; use futures::{ - future::poll_fn, - stream::{ - self, - StreamExt, - }, + stream::StreamExt, FutureExt, Stream, }; -use std::{ - future::Future, - task::Poll, -}; -use tokio::sync::{ - mpsc, - Notify, -}; +use tokio::sync::Notify; use tracing::Instrument; use crate::{ @@ -134,33 +123,16 @@ where #[tracing::instrument(skip_all)] /// Import pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown, 1).await?; - - Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) - } - - /// Import - pub async fn import_v2(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown, 2).await?; + self.import_inner(shutdown).await?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - async fn import_inner( - &self, - shutdown: &StateWatcher, - version: u32, - ) -> anyhow::Result<()> { + async fn import_inner(&self, shutdown: &StateWatcher) -> anyhow::Result<()> { // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. - let (count, result) = match version { - 1 => self.launch_stream(range.clone(), shutdown).await, - 2 => self.launch_stream_v2(range.clone(), shutdown).await, - 3 => self.launch_stream_v3(range.clone(), shutdown).await, - 4 => self.launch_stream_v4(range.clone(), shutdown).await, - _ => panic!("INVALID"), - }; + let (count, result) = self.launch_stream(range.clone(), shutdown).await; // Get the size of the range. let range_len = range.size_hint().0 as u32; @@ -205,10 +177,14 @@ where let p2p = p2p.clone(); let consensus_port = consensus.clone(); move |result| { - Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { + Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()).await + }).then(|task| async { task.map_err(|e| anyhow!(e))? }) } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() }) // Request up to `max_get_txns_requests` transactions from the network. .buffered(params.max_get_txns_requests) @@ -236,11 +212,12 @@ where Ok(b) => b, Err(e) => return Err(e), }; + execute_and_commit(executor.as_ref(), &state, block).await } } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() }) // Continue the stream unless an error occurs. .into_scan_err() @@ -258,104 +235,14 @@ where .await } - async fn launch_stream_v2( - &self, - range: RangeInclusive, - shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { - let Self { - state, - params, - p2p, - executor, - consensus, - .. - } = &self; - get_header_range(range.clone(), p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - let header = match result.await { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } - - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - - get_transactions_on_block(p2p.as_ref(), block_id, header).await - }) - .then(|task| async { task.map_err(|e| anyhow!(e))? }) - } - }) - .buffered(params.max_get_txns_requests) - .into_scan_none_or_err() - .scan_none_or_err() - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) - .then({ - let state = state.clone(); - let executor = executor.clone(); - move |block| { - { - let state = state.clone(); - let executor = executor.clone(); - async move { - let block = match block { - Ok(b) => b, - Err(e) => return Err(e), - }; - execute_and_commit(executor.as_ref(), &state, block).await - } - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - } - }) - .into_scan_err() - .scan_err() - .fold((0usize, Ok(())), |(count, err), result| async move { - match result { - Ok(_) => (count + 1, err), - Err(e) => (count, Err(e)), - } - }) - .in_current_span() - .await - } - async fn get_block_for_header( - result: anyhow::Result>, + result: anyhow::Result>>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result> { let header = match result { - Ok(h) => h, + Ok(Some(h)) => h, + Ok(None) => return Ok(None), Err(e) => return Err(e), }; let SourcePeer { @@ -387,7 +274,7 @@ fn get_headers_buffered( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>> { +) -> impl Stream>>> { let Config { header_batch_size, max_header_batch_requests, @@ -407,8 +294,8 @@ fn get_headers_buffered( }) .buffered(*max_header_batch_requests) .flatten() - .into_scan_none_or_err() - .scan_none_or_err() + // .into_scan_none_or_err() + // .scan_none_or_err() } fn range_chunks( From 5a99b6e72c58236fca8df270bebcb02e6ef4a9d6 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 22:14:42 -0400 Subject: [PATCH 17/55] Minor refactor --- benches/benches/import.rs | 12 ++++++------ benches/src/import.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index b5c66a2bd7e..5591d913260 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -7,7 +7,7 @@ use criterion::{ Criterion, }; use fuel_core_benches::import::{ - create_import, + provision_import_test, Count, Durations, PressureImport, @@ -24,7 +24,7 @@ async fn execute_import(import: PressureImport, shutdown: &mut StateWatcher) { import.import(shutdown).await.unwrap(); } -fn name(n: usize, durations: Durations, buffer_size: usize) -> String { +fn name(n: u32, durations: Durations, buffer_size: usize) -> String { format!( "import {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", n = n, @@ -38,7 +38,7 @@ fn name(n: usize, durations: Durations, buffer_size: usize) -> String { fn bench_imports(c: &mut Criterion) { let bench_import = |group: &mut BenchmarkGroup, - n: usize, + n: u32, durations: Durations, batch_size: u32, buffer_size: usize| { @@ -49,9 +49,9 @@ fn bench_imports(c: &mut Criterion) { let mut elapsed_time = Duration::default(); for _ in 0..iters { let shared_count = SharedMutex::new(Count::default()); - let state = State::new(None, n as u32); + let state = State::new(None, n); let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = create_import( + let (import, _tx, mut shutdown) = provision_import_test( shared_count.clone(), shared_state, durations, @@ -78,7 +78,7 @@ fn bench_imports(c: &mut Criterion) { executes: Duration::from_millis(10), }; let batch_size = 10; - let n = 50usize; + let n = 50; bench_import(&mut group, n, durations, batch_size, 50); } diff --git a/benches/src/import.rs b/benches/src/import.rs index 76b4e37c7d0..16572986d21 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -37,7 +37,7 @@ pub struct Durations { pub executes: Duration, } -pub fn create_import( +pub fn provision_import_test( shared_count: SharedMutex, shared_state: SharedMutex, input: Durations, From 19b973ad4bdc43a03fede4f1aa4c79243fb5dd24 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 22 Aug 2023 22:19:24 -0400 Subject: [PATCH 18/55] Update import.rs --- crates/services/sync/src/import.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 582880c224a..3f180d0f888 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -236,13 +236,12 @@ where } async fn get_block_for_header( - result: anyhow::Result>>, + result: anyhow::Result>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result> { let header = match result { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), + Ok(h) => h, Err(e) => return Err(e), }; let SourcePeer { @@ -274,7 +273,7 @@ fn get_headers_buffered( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>>> { +) -> impl Stream>> { let Config { header_batch_size, max_header_batch_requests, @@ -294,8 +293,8 @@ fn get_headers_buffered( }) .buffered(*max_header_batch_requests) .flatten() - // .into_scan_none_or_err() - // .scan_none_or_err() + .into_scan_none_or_err() + .scan_none_or_err() } fn range_chunks( From 86a5d016ad62547c8652acd528b519004861fdbb Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 00:21:28 -0400 Subject: [PATCH 19/55] Separate test helpers --- benches/benches/import.rs | 9 +- benches/src/import.rs | 44 ++-- benches/src/import/count.rs | 34 --- .../import/pressure_block_importer_port.rs | 55 ----- benches/src/import/pressure_consensus_port.rs | 47 ----- .../src/import/pressure_peer_to_peer_port.rs | 109 ---------- crates/services/sync/src/import.rs | 7 +- .../sync/src/import/back_pressure_tests.rs | 199 +----------------- .../services/sync/src/import/test_helpers.rs | 45 ++++ .../sync/src/import/test_helpers/counts.rs | 56 +++++ .../test_helpers/pressure_block_importer.rs | 40 ++++ .../import/test_helpers/pressure_consensus.rs | 37 ++++ .../test_helpers/pressure_peer_to_peer.rs | 82 ++++++++ crates/services/sync/src/service/tests.rs | 2 +- 14 files changed, 301 insertions(+), 465 deletions(-) delete mode 100644 benches/src/import/count.rs delete mode 100644 benches/src/import/pressure_block_importer_port.rs delete mode 100644 benches/src/import/pressure_consensus_port.rs delete mode 100644 benches/src/import/pressure_peer_to_peer_port.rs create mode 100644 crates/services/sync/src/import/test_helpers.rs create mode 100644 crates/services/sync/src/import/test_helpers/counts.rs create mode 100644 crates/services/sync/src/import/test_helpers/pressure_block_importer.rs create mode 100644 crates/services/sync/src/import/test_helpers/pressure_consensus.rs create mode 100644 crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 5591d913260..7a99dcfa039 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -8,9 +8,9 @@ use criterion::{ }; use fuel_core_benches::import::{ provision_import_test, - Count, Durations, PressureImport, + SharedCounts, }; use fuel_core_services::{ SharedMutex, @@ -48,7 +48,7 @@ fn bench_imports(c: &mut Criterion) { b.to_async(&rt).iter_custom(|iters| async move { let mut elapsed_time = Duration::default(); for _ in 0..iters { - let shared_count = SharedMutex::new(Count::default()); + let shared_count = SharedCounts::new(Default::default()); let state = State::new(None, n); let shared_state = SharedMutex::new(state); let (import, _tx, mut shutdown) = provision_import_test( @@ -71,6 +71,7 @@ fn bench_imports(c: &mut Criterion) { let mut group = c.benchmark_group("import"); + let n = 50; let durations = Durations { headers: Duration::from_millis(5), consensus: Duration::from_millis(5), @@ -78,8 +79,8 @@ fn bench_imports(c: &mut Criterion) { executes: Duration::from_millis(10), }; let batch_size = 10; - let n = 50; - bench_import(&mut group, n, durations, batch_size, 50); + let buffer_size = 50; + bench_import(&mut group, n, durations, batch_size, buffer_size); } criterion_group!(benches, bench_imports); diff --git a/benches/src/import.rs b/benches/src/import.rs index 16572986d21..3d79206bda5 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -1,16 +1,20 @@ -mod count; -mod pressure_block_importer_port; -mod pressure_consensus_port; -mod pressure_peer_to_peer_port; - -pub use count::Count; use fuel_core_services::{ SharedMutex, StateWatcher, }; -pub use pressure_block_importer_port::PressureBlockImporterPort; -pub use pressure_consensus_port::PressureConsensusPort; -pub use pressure_peer_to_peer_port::PressurePeerToPeerPort; +pub use fuel_core_sync::import::test_helpers::SharedCounts; +use fuel_core_sync::{ + import::{ + test_helpers::{ + PressureBlockImporter, + PressureConsensus, + PressurePeerToPeer, + }, + Import, + }, + state::State, + Config, +}; use std::{ sync::Arc, time::Duration, @@ -20,14 +24,8 @@ use tokio::sync::{ Notify, }; -use fuel_core_sync::{ - import::Import, - state::State, - Config, -}; - pub type PressureImport = - Import; + Import; #[derive(Default, Clone, Copy)] pub struct Durations { @@ -38,7 +36,7 @@ pub struct Durations { } pub fn provision_import_test( - shared_count: SharedMutex, + shared_count: SharedCounts, shared_state: SharedMutex, input: Durations, header_batch_size: u32, @@ -55,17 +53,17 @@ pub fn provision_import_test( header_batch_size, max_get_txns_requests, }; - let p2p = Arc::new(PressurePeerToPeerPort::new( - [input.headers, input.transactions], + let p2p = Arc::new(PressurePeerToPeer::new( shared_count.clone(), + [input.headers, input.transactions], )); - let executor = Arc::new(PressureBlockImporterPort::new( - input.executes, + let executor = Arc::new(PressureBlockImporter::new( shared_count.clone(), + input.executes, )); - let consensus = Arc::new(PressureConsensusPort::new( - input.consensus, + let consensus = Arc::new(PressureConsensus::new( shared_count.clone(), + input.consensus, )); let (tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); diff --git a/benches/src/import/count.rs b/benches/src/import/count.rs deleted file mode 100644 index 3300a39529a..00000000000 --- a/benches/src/import/count.rs +++ /dev/null @@ -1,34 +0,0 @@ -#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] -pub struct Count { - pub headers: usize, - pub transactions: usize, - pub consensus: usize, - pub executes: usize, - pub blocks: usize, -} - -impl Count { - pub fn inc_headers(&mut self) { - self.headers += 1; - } - - pub fn dec_headers(&mut self) { - self.headers -= 1; - } - - pub fn inc_transactions(&mut self) { - self.transactions += 1; - } - - pub fn inc_consensus(&mut self) { - self.consensus += 1; - } - - pub fn inc_executes(&mut self) { - self.executes += 1; - } - - pub fn inc_blocks(&mut self) { - self.blocks += 1; - } -} diff --git a/benches/src/import/pressure_block_importer_port.rs b/benches/src/import/pressure_block_importer_port.rs deleted file mode 100644 index 81ee4a16ec5..00000000000 --- a/benches/src/import/pressure_block_importer_port.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::import::Count; - -use fuel_core_services::{ - stream::BoxStream, - SharedMutex, -}; -use fuel_core_sync::ports::{ - BlockImporterPort, - MockBlockImporterPort, -}; -use fuel_core_types::{ - blockchain::SealedBlock, - fuel_types::BlockHeight, -}; -use std::time::Duration; - -pub struct PressureBlockImporterPort(MockBlockImporterPort, Duration, SharedMutex); - -impl PressureBlockImporterPort { - pub fn new(delays: Duration, count: SharedMutex) -> Self { - let mut mock = MockBlockImporterPort::default(); - mock.expect_execute_and_commit().returning(move |_| Ok(())); - Self(mock, delays, count) - } - - fn service(&self) -> &impl BlockImporterPort { - &self.0 - } - - fn duration(&self) -> Duration { - self.1 - } - - fn count(&self) -> SharedMutex { - self.2.clone() - } -} - -#[async_trait::async_trait] -impl BlockImporterPort for PressureBlockImporterPort { - fn committed_height_stream(&self) -> BoxStream { - self.service().committed_height_stream() - } - - async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { - let timeout = self.duration(); - tokio::task::spawn_blocking(move || { - std::thread::sleep(timeout); - }) - .await - .unwrap(); - self.count().apply(|count| count.inc_executes()); - self.service().execute_and_commit(block).await - } -} diff --git a/benches/src/import/pressure_consensus_port.rs b/benches/src/import/pressure_consensus_port.rs deleted file mode 100644 index 5076ce347b3..00000000000 --- a/benches/src/import/pressure_consensus_port.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::import::Count; -use fuel_core_services::SharedMutex; -use fuel_core_sync::ports::{ - ConsensusPort, - MockConsensusPort, -}; -use fuel_core_types::blockchain::{ - primitives::DaBlockHeight, - SealedBlockHeader, -}; -use std::time::Duration; - -pub struct PressureConsensusPort(MockConsensusPort, Duration, SharedMutex); - -impl PressureConsensusPort { - pub fn new(delays: Duration, count: SharedMutex) -> Self { - let mut mock = MockConsensusPort::default(); - mock.expect_await_da_height().returning(|_| Ok(())); - mock.expect_check_sealed_header().returning(|_| Ok(true)); - Self(mock, delays, count) - } - - fn service(&self) -> &impl ConsensusPort { - &self.0 - } - - fn duration(&self) -> Duration { - self.1 - } - - fn count(&self) -> SharedMutex { - self.2.clone() - } -} - -#[async_trait::async_trait] -impl ConsensusPort for PressureConsensusPort { - fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { - self.service().check_sealed_header(header) - } - - async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { - self.count().apply(|count| count.inc_consensus()); - tokio::time::sleep(self.duration()).await; - self.service().await_da_height(da_height).await - } -} diff --git a/benches/src/import/pressure_peer_to_peer_port.rs b/benches/src/import/pressure_peer_to_peer_port.rs deleted file mode 100644 index a37815a8f08..00000000000 --- a/benches/src/import/pressure_peer_to_peer_port.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::import::Count; -use std::ops::Range; - -use fuel_core_services::{ - stream::BoxStream, - SharedMutex, -}; -use fuel_core_sync::ports::{ - MockPeerToPeerPort, - PeerToPeerPort, -}; -use fuel_core_types::{ - blockchain::{ - consensus::{ - Consensus, - Sealed, - }, - header::BlockHeader, - primitives::BlockId, - SealedBlockHeader, - }, - fuel_tx::Transaction, - fuel_types::BlockHeight, - services::p2p::SourcePeer, -}; -use std::time::Duration; - -fn empty_header(h: BlockHeight) -> SourcePeer { - let mut header = BlockHeader::default(); - header.consensus.height = h; - let transaction_tree = - fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); - header.application.generated.transactions_root = transaction_tree.root().into(); - - let consensus = Consensus::default(); - let sealed = Sealed { - entity: header, - consensus, - }; - SourcePeer { - peer_id: vec![].into(), - data: sealed, - } -} - -pub struct PressurePeerToPeerPort(MockPeerToPeerPort, [Duration; 2], SharedMutex); - -impl PressurePeerToPeerPort { - pub fn new(delays: [Duration; 2], count: SharedMutex) -> Self { - let mut mock = MockPeerToPeerPort::default(); - mock.expect_get_sealed_block_headers().returning(|range| { - Ok(Some( - range - .clone() - .map(BlockHeight::from) - .map(empty_header) - .collect(), - )) - }); - mock.expect_get_transactions() - .returning(|_| Ok(Some(vec![]))); - Self(mock, delays, count) - } - - fn service(&self) -> &impl PeerToPeerPort { - &self.0 - } - - fn duration(&self, index: usize) -> Duration { - self.1[index] - } - - fn count(&self) -> SharedMutex { - self.2.clone() - } -} - -#[async_trait::async_trait] -impl PeerToPeerPort for PressurePeerToPeerPort { - fn height_stream(&self) -> BoxStream { - self.service().height_stream() - } - - async fn get_sealed_block_headers( - &self, - block_height_range: Range, - ) -> anyhow::Result>>> { - self.count().apply(|count| count.inc_headers()); - let timeout = self.duration(0); - self.count().apply(|c| c.dec_headers()); - tokio::time::sleep(timeout).await; - for _ in block_height_range.clone() { - self.count().apply(|c| c.inc_blocks()); - } - self.service() - .get_sealed_block_headers(block_height_range) - .await - } - - async fn get_transactions( - &self, - block_id: SourcePeer, - ) -> anyhow::Result>> { - let timeout = self.duration(1); - tokio::time::sleep(timeout).await; - self.count().apply(|count| count.inc_transactions()); - self.service().get_transactions(block_id).await - } -} diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 3f180d0f888..6e68a705142 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -43,8 +43,10 @@ use crate::{ }, }; -#[cfg(test)] -pub(crate) use tests::empty_header; +#[cfg(any(test, feature = "benchmarking"))] +/// Accessories for testing the sync. Available only when compiling under test +/// or benchmarking. +pub mod test_helpers; #[cfg(test)] mod tests; @@ -182,6 +184,7 @@ where tokio::spawn(async move { Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()).await }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + // Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index 423286df914..6dea34a7678 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -1,29 +1,13 @@ -use std::{ - ops::Range, - time::Duration, +use std::time::Duration; + +use super::*; +use crate::import::test_helpers::{ + Count, + PressureBlockImporter, + PressureConsensus, + PressurePeerToPeer, + SharedCounts, }; - -use fuel_core_services::stream::BoxStream; -use fuel_core_types::{ - blockchain::primitives::{ - BlockId, - DaBlockHeight, - }, - fuel_tx::Transaction, -}; - -use crate::ports::{ - BlockImporterPort, - MockBlockImporterPort, - MockConsensusPort, - MockPeerToPeerPort, -}; - -use super::{ - tests::empty_header, - *, -}; -use fuel_core_types::fuel_types::BlockHeight; use test_case::test_case; #[derive(Default)] @@ -141,168 +125,3 @@ async fn test_back_pressure(input: Input, state: State, params: Config) -> Count import.import(&mut watcher).await.unwrap(); counts.apply(|c| c.max.clone()) } - -#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] -struct Count { - headers: usize, - transactions: usize, - consensus: usize, - executes: usize, - blocks: usize, -} - -#[derive(Debug, Default, PartialEq, Eq)] -struct Counts { - now: Count, - max: Count, -} - -type SharedCounts = SharedMutex; - -struct PressurePeerToPeer { - p2p: MockPeerToPeerPort, - durations: [Duration; 2], - counts: SharedCounts, -} - -struct PressureBlockImporter(MockBlockImporterPort, Duration, SharedCounts); - -struct PressureConsensus(MockConsensusPort, Duration, SharedCounts); - -#[async_trait::async_trait] -impl PeerToPeerPort for PressurePeerToPeer { - fn height_stream(&self) -> BoxStream { - self.p2p.height_stream() - } - - async fn get_sealed_block_headers( - &self, - block_height_range: Range, - ) -> anyhow::Result>>> { - self.counts.apply(|c| c.inc_headers()); - tokio::time::sleep(self.durations[0]).await; - self.counts.apply(|c| c.dec_headers()); - for _ in block_height_range.clone() { - self.counts.apply(|c| c.inc_blocks()); - } - self.p2p.get_sealed_block_headers(block_height_range).await - } - - async fn get_transactions( - &self, - block_id: SourcePeer, - ) -> anyhow::Result>> { - self.counts.apply(|c| c.inc_transactions()); - tokio::time::sleep(self.durations[1]).await; - self.counts.apply(|c| c.dec_transactions()); - self.p2p.get_transactions(block_id).await - } -} - -#[async_trait::async_trait] -impl BlockImporterPort for PressureBlockImporter { - fn committed_height_stream(&self) -> BoxStream { - self.0.committed_height_stream() - } - - async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { - self.2.apply(|c| c.inc_executes()); - tokio::time::sleep(self.1).await; - self.2.apply(|c| { - c.dec_executes(); - c.dec_blocks(); - }); - self.0.execute_and_commit(block).await - } -} - -#[async_trait::async_trait] -impl ConsensusPort for PressureConsensus { - fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { - self.0.check_sealed_header(header) - } - - async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { - self.2.apply(|c| c.inc_consensus()); - tokio::time::sleep(self.1).await; - self.2.apply(|c| c.dec_consensus()); - self.0.await_da_height(da_height).await - } -} - -impl PressurePeerToPeer { - fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { - let mut mock = MockPeerToPeerPort::default(); - mock.expect_get_sealed_block_headers().returning(|range| { - Ok(Some( - range - .clone() - .map(BlockHeight::from) - .map(empty_header) - .collect(), - )) - }); - mock.expect_get_transactions() - .returning(|_| Ok(Some(vec![]))); - Self { - p2p: mock, - durations: delays, - counts, - } - } -} - -impl PressureBlockImporter { - fn new(counts: SharedCounts, delays: Duration) -> Self { - let mut mock = MockBlockImporterPort::default(); - mock.expect_execute_and_commit().returning(move |_| Ok(())); - Self(mock, delays, counts) - } -} - -impl PressureConsensus { - fn new(counts: SharedCounts, delays: Duration) -> Self { - let mut mock = MockConsensusPort::default(); - mock.expect_await_da_height().returning(|_| Ok(())); - mock.expect_check_sealed_header().returning(|_| Ok(true)); - Self(mock, delays, counts) - } -} - -impl Counts { - fn inc_headers(&mut self) { - self.now.headers += 1; - self.max.headers = self.max.headers.max(self.now.headers); - } - fn dec_headers(&mut self) { - self.now.headers -= 1; - } - fn inc_transactions(&mut self) { - self.now.transactions += 1; - self.max.transactions = self.max.transactions.max(self.now.transactions); - } - fn dec_transactions(&mut self) { - self.now.transactions -= 1; - } - fn inc_consensus(&mut self) { - self.now.consensus += 1; - self.max.consensus = self.max.consensus.max(self.now.consensus); - } - fn dec_consensus(&mut self) { - self.now.consensus -= 1; - } - fn inc_executes(&mut self) { - self.now.executes += 1; - self.max.executes = self.max.executes.max(self.now.executes); - } - fn dec_executes(&mut self) { - self.now.executes -= 1; - } - fn inc_blocks(&mut self) { - self.now.blocks += 1; - self.max.blocks = self.max.blocks.max(self.now.blocks); - } - fn dec_blocks(&mut self) { - self.now.blocks -= 1; - } -} diff --git a/crates/services/sync/src/import/test_helpers.rs b/crates/services/sync/src/import/test_helpers.rs new file mode 100644 index 00000000000..939dbd7befb --- /dev/null +++ b/crates/services/sync/src/import/test_helpers.rs @@ -0,0 +1,45 @@ +#![allow(missing_docs)] + +mod counts; +mod pressure_block_importer; +mod pressure_consensus; +mod pressure_peer_to_peer; + +use fuel_core_types::{ + blockchain::{ + consensus::{ + Consensus, + Sealed, + }, + header::BlockHeader, + SealedBlockHeader, + }, + fuel_types::BlockHeight, + services::p2p::SourcePeer, +}; + +pub use counts::{ + Count, + SharedCounts, +}; +pub use pressure_block_importer::PressureBlockImporter; +pub use pressure_consensus::PressureConsensus; +pub use pressure_peer_to_peer::PressurePeerToPeer; + +pub fn empty_header(h: BlockHeight) -> SourcePeer { + let mut header = BlockHeader::default(); + header.consensus.height = h; + let transaction_tree = + fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); + header.application.generated.transactions_root = transaction_tree.root().into(); + + let consensus = Consensus::default(); + let sealed = Sealed { + entity: header, + consensus, + }; + SourcePeer { + peer_id: vec![].into(), + data: sealed, + } +} diff --git a/crates/services/sync/src/import/test_helpers/counts.rs b/crates/services/sync/src/import/test_helpers/counts.rs new file mode 100644 index 00000000000..d98e75ddd30 --- /dev/null +++ b/crates/services/sync/src/import/test_helpers/counts.rs @@ -0,0 +1,56 @@ +use fuel_core_services::SharedMutex; + +#[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct Count { + pub headers: usize, + pub transactions: usize, + pub consensus: usize, + pub executes: usize, + pub blocks: usize, +} + +#[derive(Debug, Default, PartialEq, Eq)] +pub struct Counts { + pub now: Count, + pub max: Count, +} + +pub type SharedCounts = SharedMutex; + +impl Counts { + pub fn inc_headers(&mut self) { + self.now.headers += 1; + self.max.headers = self.max.headers.max(self.now.headers); + } + pub fn dec_headers(&mut self) { + self.now.headers -= 1; + } + pub fn inc_transactions(&mut self) { + self.now.transactions += 1; + self.max.transactions = self.max.transactions.max(self.now.transactions); + } + pub fn dec_transactions(&mut self) { + self.now.transactions -= 1; + } + pub fn inc_consensus(&mut self) { + self.now.consensus += 1; + self.max.consensus = self.max.consensus.max(self.now.consensus); + } + pub fn dec_consensus(&mut self) { + self.now.consensus -= 1; + } + pub fn inc_executes(&mut self) { + self.now.executes += 1; + self.max.executes = self.max.executes.max(self.now.executes); + } + pub fn dec_executes(&mut self) { + self.now.executes -= 1; + } + pub fn inc_blocks(&mut self) { + self.now.blocks += 1; + self.max.blocks = self.max.blocks.max(self.now.blocks); + } + pub fn dec_blocks(&mut self) { + self.now.blocks -= 1; + } +} diff --git a/crates/services/sync/src/import/test_helpers/pressure_block_importer.rs b/crates/services/sync/src/import/test_helpers/pressure_block_importer.rs new file mode 100644 index 00000000000..217e3fd1c9f --- /dev/null +++ b/crates/services/sync/src/import/test_helpers/pressure_block_importer.rs @@ -0,0 +1,40 @@ +use crate::{ + import::test_helpers::SharedCounts, + ports::{ + BlockImporterPort, + MockBlockImporterPort, + }, +}; +use fuel_core_services::stream::BoxStream; +use fuel_core_types::{ + blockchain::SealedBlock, + fuel_types::BlockHeight, +}; +use std::time::Duration; + +pub struct PressureBlockImporter(MockBlockImporterPort, Duration, SharedCounts); + +#[async_trait::async_trait] +impl BlockImporterPort for PressureBlockImporter { + fn committed_height_stream(&self) -> BoxStream { + self.0.committed_height_stream() + } + + async fn execute_and_commit(&self, block: SealedBlock) -> anyhow::Result<()> { + self.2.apply(|c| c.inc_executes()); + tokio::time::sleep(self.1).await; + self.2.apply(|c| { + c.dec_executes(); + c.dec_blocks(); + }); + self.0.execute_and_commit(block).await + } +} + +impl PressureBlockImporter { + pub fn new(counts: SharedCounts, delays: Duration) -> Self { + let mut mock = MockBlockImporterPort::default(); + mock.expect_execute_and_commit().returning(move |_| Ok(())); + Self(mock, delays, counts) + } +} diff --git a/crates/services/sync/src/import/test_helpers/pressure_consensus.rs b/crates/services/sync/src/import/test_helpers/pressure_consensus.rs new file mode 100644 index 00000000000..441dca03563 --- /dev/null +++ b/crates/services/sync/src/import/test_helpers/pressure_consensus.rs @@ -0,0 +1,37 @@ +use crate::{ + import::test_helpers::counts::SharedCounts, + ports::{ + ConsensusPort, + MockConsensusPort, + }, +}; +use fuel_core_types::blockchain::{ + primitives::DaBlockHeight, + SealedBlockHeader, +}; +use std::time::Duration; + +pub struct PressureConsensus(MockConsensusPort, Duration, SharedCounts); + +#[async_trait::async_trait] +impl ConsensusPort for PressureConsensus { + fn check_sealed_header(&self, header: &SealedBlockHeader) -> anyhow::Result { + self.0.check_sealed_header(header) + } + + async fn await_da_height(&self, da_height: &DaBlockHeight) -> anyhow::Result<()> { + self.2.apply(|c| c.inc_consensus()); + tokio::time::sleep(self.1).await; + self.2.apply(|c| c.dec_consensus()); + self.0.await_da_height(da_height).await + } +} + +impl PressureConsensus { + pub fn new(counts: SharedCounts, delays: Duration) -> Self { + let mut mock = MockConsensusPort::default(); + mock.expect_await_da_height().returning(|_| Ok(())); + mock.expect_check_sealed_header().returning(|_| Ok(true)); + Self(mock, delays, counts) + } +} diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs new file mode 100644 index 00000000000..b0d5b93b946 --- /dev/null +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -0,0 +1,82 @@ +use crate::{ + import::test_helpers::{ + empty_header, + SharedCounts, + }, + ports::{ + MockPeerToPeerPort, + PeerToPeerPort, + }, +}; +use fuel_core_services::stream::BoxStream; +use fuel_core_types::{ + blockchain::{ + primitives::BlockId, + SealedBlockHeader, + }, + fuel_tx::Transaction, + fuel_types::BlockHeight, + services::p2p::SourcePeer, +}; +use std::{ + ops::Range, + time::Duration, +}; + +pub struct PressurePeerToPeer { + p2p: MockPeerToPeerPort, + durations: [Duration; 2], + counts: SharedCounts, +} + +#[async_trait::async_trait] +impl PeerToPeerPort for PressurePeerToPeer { + fn height_stream(&self) -> BoxStream { + self.p2p.height_stream() + } + + async fn get_sealed_block_headers( + &self, + block_height_range: Range, + ) -> anyhow::Result>>> { + self.counts.apply(|c| c.inc_headers()); + tokio::time::sleep(self.durations[0]).await; + self.counts.apply(|c| c.dec_headers()); + for _ in block_height_range.clone() { + self.counts.apply(|c| c.inc_blocks()); + } + self.p2p.get_sealed_block_headers(block_height_range).await + } + + async fn get_transactions( + &self, + block_id: SourcePeer, + ) -> anyhow::Result>> { + self.counts.apply(|c| c.inc_transactions()); + tokio::time::sleep(self.durations[1]).await; + self.counts.apply(|c| c.dec_transactions()); + self.p2p.get_transactions(block_id).await + } +} + +impl PressurePeerToPeer { + pub fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { + let mut mock = MockPeerToPeerPort::default(); + mock.expect_get_sealed_block_headers().returning(|range| { + Ok(Some( + range + .clone() + .map(BlockHeight::from) + .map(empty_header) + .collect(), + )) + }); + mock.expect_get_transactions() + .returning(|_| Ok(Some(vec![]))); + Self { + p2p: mock, + durations: delays, + counts, + } + } +} diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 30cca1235bc..04617bb97a5 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -8,7 +8,7 @@ use futures::{ }; use crate::{ - import::empty_header, + import::test_helpers::empty_header, ports::{ MockBlockImporterPort, MockConsensusPort, From 7657b0bdfd2272954130b9213e8b1fb3e94ce436 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 00:41:25 -0400 Subject: [PATCH 20/55] Update import and tests --- crates/services/sync/src/import.rs | 1 - crates/services/sync/src/import/tests.rs | 69 ++++++++++-------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 6e68a705142..2e844428cb7 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -184,7 +184,6 @@ where tokio::spawn(async move { Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()).await }).then(|task| async { task.map_err(|e| anyhow!(e))? }) - // Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 308d58e5347..78fbb9416b0 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -1,16 +1,13 @@ #![allow(non_snake_case)] -use fuel_core_types::blockchain::{ - consensus::Consensus, - header::BlockHeader, +use crate::{ + import::test_helpers::empty_header, + ports::{ + MockBlockImporterPort, + MockConsensusPort, + MockPeerToPeerPort, + }, }; - -use crate::ports::{ - MockBlockImporterPort, - MockConsensusPort, - MockPeerToPeerPort, -}; -use fuel_core_types::fuel_types::BlockHeight; use test_case::test_case; use super::*; @@ -56,13 +53,17 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|h| Ok(**h.entity.height() != 4)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([0]), + p2p: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -175,13 +176,13 @@ async fn import__transactions_not_found() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); p2p.expect_get_transactions() - .times(1) + .times(2) .returning(|_| Ok(None)); let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -200,7 +201,7 @@ async fn import__transactions_not_found_for_header_4() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(1).returning(move |_| { + p2p.expect_get_transactions().times(2).returning(move |_| { height += 1; if height == 4 { Ok(None) @@ -212,7 +213,7 @@ async fn import__transactions_not_found_for_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -284,7 +285,7 @@ async fn import__p2p_error_on_4_transactions() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(1).returning(move |_| { + p2p.expect_get_transactions().times(2).returning(move |_| { height += 1; if height == 4 { Err(anyhow::anyhow!("Some network error")) @@ -296,7 +297,7 @@ async fn import__p2p_error_on_4_transactions() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -344,7 +345,7 @@ async fn import__consensus_error_on_4() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|h| { if **h.entity.height() == 4 { Err(anyhow::anyhow!("Some consensus error")) @@ -352,11 +353,15 @@ async fn import__consensus_error_on_4() { Ok(true) } }); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([0]), + p2p: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -417,8 +422,8 @@ async fn import__execution_error_on_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { - consensus_port: DefaultMocks::times([1]), - p2p: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), + p2p: DefaultMocks::times([2]), executor, }; @@ -464,7 +469,7 @@ async fn signature_always_fails() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|_| Ok(false)); let state = State::new(3, 5).into(); @@ -657,21 +662,3 @@ impl DefaultMocks for MockBlockImporterPort { executor } } - -pub(crate) fn empty_header(h: BlockHeight) -> SourcePeer { - let mut header = BlockHeader::default(); - header.consensus.height = h; - let transaction_tree = - fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); - header.application.generated.transactions_root = transaction_tree.root().into(); - - let consensus = Consensus::default(); - let sealed = Sealed { - entity: header, - consensus, - }; - SourcePeer { - peer_id: vec![].into(), - data: sealed, - } -} From 3ef35e396073b149a319d16f2683ef2177fce263 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 00:44:48 -0400 Subject: [PATCH 21/55] Add more benches --- benches/benches/import.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 7a99dcfa039..e6e06386bf2 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -71,16 +71,31 @@ fn bench_imports(c: &mut Criterion) { let mut group = c.benchmark_group("import"); - let n = 50; + let n = 100; let durations = Durations { headers: Duration::from_millis(5), consensus: Duration::from_millis(5), transactions: Duration::from_millis(5), executes: Duration::from_millis(10), }; - let batch_size = 10; - let buffer_size = 50; - bench_import(&mut group, n, durations, batch_size, buffer_size); + + // Header batch size = 10, header/txn buffer size = 10 + bench_import(&mut group, n, durations, 10, 10); + + // Header batch size = 20, header/txn buffer size = 10 + bench_import(&mut group, n, durations, 20, 10); + + // Header batch size = 50, header/txn buffer size = 10 + bench_import(&mut group, n, durations, 20, 10); + + // Header batch size = 10, header/txn buffer size = 20 + bench_import(&mut group, n, durations, 10, 20); + + // Header batch size = 10, header/txn buffer size = 50 + bench_import(&mut group, n, durations, 10, 50); + + // Header batch size = 50, header/txn buffer size = 50 + bench_import(&mut group, n, durations, 10, 20); } criterion_group!(benches, bench_imports); From a15b0084cc621ce551f8802a823031f5c3f24650 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 00:49:03 -0400 Subject: [PATCH 22/55] Update Cargo.toml --- benches/Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benches/Cargo.toml b/benches/Cargo.toml index d14a425b53c..89e2c62e91b 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -17,7 +17,7 @@ ethnum = "1.3" fuel-core = { path = "../crates/fuel-core", default-features = false, features = ["metrics", "rocksdb-production"] } fuel-core-services = { path = "./../crates/services" } fuel-core-storage = { path = "./../crates/storage" } -fuel-core-sync = { path = "./../crates/services/sync", features = ["benchmarking"]} +fuel-core-sync = { path = "./../crates/services/sync", features = ["benchmarking"] } fuel-core-types = { path = "./../crates/types", features = ["test-helpers"] } p256 = { version = "0.13", default-features = false, features = ["digest", "ecdsa"] } rand = { workspace = true } @@ -29,15 +29,15 @@ tokio = { workspace = true, features = ["full"] } [[bench]] harness = false -name = "state" +name = "import" [[bench]] harness = false -name = "vm" +name = "state" [[bench]] harness = false -name = "import" +name = "vm" [features] default = ["fuel-core/rocksdb"] From 09b00549c4338c264da0d46196d814b1fb5af356 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 00:52:55 -0400 Subject: [PATCH 23/55] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ffd551b29..b19cb143f54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Description of the upcoming release here. ### Changed +- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Modified sync to use asynchronous task execution for when retrieving block headers. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. From ec8c01dbec4921fca85c59eebde486ffc7cee043 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 01:16:58 -0400 Subject: [PATCH 24/55] remove call to blackbox --- benches/benches/import.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index e6e06386bf2..c1c1a85412f 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -1,5 +1,4 @@ use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, @@ -61,7 +60,7 @@ fn bench_imports(c: &mut Criterion) { ); import.notify_one(); let start = std::time::Instant::now(); - black_box(execute_import(import, &mut shutdown).await); + execute_import(import, &mut shutdown).await; elapsed_time += start.elapsed(); } elapsed_time From 49a645b7c4bd76532b21c6adc2074784cd8ee009 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 14:37:43 -0400 Subject: [PATCH 25/55] Update placeholder comments --- crates/services/sync/src/import.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 2e844428cb7..0b05a597e74 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -75,7 +75,8 @@ impl Default for Config { } } -/// Import +/// The combination of shared state, configuration, and services that define +/// import behavior. pub struct Import { /// Shared state between import and sync tasks. state: SharedMutex, @@ -92,7 +93,8 @@ pub struct Import { } impl Import { - /// New Import + /// Configure an import behavior from a shared state, configuration and + /// services that can be executed by an ImportTask. pub fn new( state: SharedMutex, notify: Arc, @@ -111,7 +113,7 @@ impl Import { } } - /// Notify one + /// Signal other asynchronous tasks that an import event has occurred. pub fn notify_one(&self) { self.notify.notify_one() } From 30739e74614c3b00cd16b274a0736e5682630834 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 14:40:15 -0400 Subject: [PATCH 26/55] Update import.rs --- crates/services/sync/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0b05a597e74..891be344389 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -125,7 +125,7 @@ where C: ConsensusPort + Send + Sync + 'static, { #[tracing::instrument(skip_all)] - /// Import + /// Execute imports until a shutdown is requested. pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { self.import_inner(shutdown).await?; From 85d7425017dd851289ad4ee2b2399efa1921e9eb Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 15:22:32 -0400 Subject: [PATCH 27/55] Update CHANGELOG.md Co-authored-by: Brandon Kite --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b19cb143f54..ffbf255a4a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ Description of the upcoming release here. ### Changed -- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Modified sync to use asynchronous task execution for when retrieving block headers. +- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Modified block synchronization to use asynchronous task execution when retrieving block headers. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. From 652089613a739815afa2ea90d166b99f5a3f6a92 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:07:18 -0400 Subject: [PATCH 28/55] Flatten stream of stream of headers --- crates/services/sync/src/import.rs | 134 ++++++++++++++++------------- 1 file changed, 76 insertions(+), 58 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 891be344389..7bdfb11542c 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -3,6 +3,8 @@ //! importing blocks from the network into the local blockchain. use std::{ + future::Future, + iter, ops::RangeInclusive, sync::Arc, }; @@ -176,20 +178,7 @@ where .. } = &self; - get_headers_buffered(range.clone(), params, p2p.clone()) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()).await - }).then(|task| async { task.map_err(|e| anyhow!(e))? }) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - }) + get_transactions(range.clone(), params, p2p.clone(), consensus.clone()) // Request up to `max_get_txns_requests` transactions from the network. .buffered(params.max_get_txns_requests) // Continue the stream unless an error or none occurs. @@ -238,67 +227,69 @@ where .in_current_span() .await } +} - async fn get_block_for_header( - result: anyhow::Result>, - p2p: Arc

, - consensus_port: Arc, - ) -> anyhow::Result> { - let header = match result { - Ok(h) => h, - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - // Check the consensus is valid on this header. - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } +type NetworkBlockHeader = SourcePeer; +type BlockHeaderResultOpt = anyhow::Result>; - // Wait for the da to be at least the da height on the header. - consensus_port - .await_da_height(&header.entity.da_height) - .await?; +async fn get_block_for_header< + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, +>( + result: BlockHeaderResultOpt, + p2p: Arc

, + consensus_port: Arc, +) -> anyhow::Result> { + let header = match result { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; - get_transactions_on_block(p2p.as_ref(), block_id, header).await + // Check the consensus is valid on this header. + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) } + + // Wait for the da to be at least the da height on the header. + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + + get_transactions_on_block(p2p.as_ref(), block_id, header).await } -fn get_headers_buffered( +fn get_headers( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>> { +) -> impl Stream { let Config { - header_batch_size, - max_header_batch_requests, - .. + header_batch_size, .. } = params; - futures::stream::iter(range_chunks(range, *header_batch_size)) - .map(move |range| { + let ranges = range_chunks(range, *header_batch_size); + let p2p_gen = iter::repeat_with(move || p2p.clone()); + let iter = ranges.zip(p2p_gen); + futures::stream::iter(iter) + .then(move |(range, p2p)| async { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), range.end() ); - let p2p = p2p.clone(); - async move { get_headers_batch(range, p2p).await } - .instrument(tracing::debug_span!("get_headers_batch")) - .in_current_span() + get_headers_batch(range, p2p).await }) - .buffered(*max_header_batch_requests) .flatten() - .into_scan_none_or_err() - .scan_none_or_err() } fn range_chunks( @@ -312,6 +303,33 @@ fn range_chunks( }) } +fn get_transactions< + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, +>( + range: RangeInclusive, + params: &Config, + p2p: Arc

, + consensus: Arc, +) -> impl Stream>>> { + get_headers(range, params, p2p.clone()).map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |batch| { + { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { + get_block_for_header(batch, p2p.clone(), consensus_port.clone()).await + }) + .then(|task| async { task.map_err(|e| anyhow!(e))? }) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() + } + }) +} + /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. async fn wait_for_notify_or_shutdown( @@ -333,7 +351,7 @@ async fn wait_for_notify_or_shutdown( async fn get_headers_batch( mut range: RangeInclusive, p2p: Arc, -) -> impl Stream>>> { +) -> impl Stream { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), From f98c9ceb54c789179211dbc7ba6d99f4cc7c5d11 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:14:20 -0400 Subject: [PATCH 29/55] Revert --- crates/services/sync/src/import.rs | 6 +---- crates/services/sync/src/import/tests.rs | 34 +++++++++--------------- 2 files changed, 14 insertions(+), 26 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 891be344389..d4ef024c034 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -7,7 +7,6 @@ use std::{ sync::Arc, }; -use anyhow::anyhow; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -24,7 +23,6 @@ use fuel_core_types::{ }; use futures::{ stream::StreamExt, - FutureExt, Stream, }; use tokio::sync::Notify; @@ -183,9 +181,7 @@ where move |result| { let p2p = p2p.clone(); let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()).await - }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 78fbb9416b0..796dc70094c 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -53,17 +53,13 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) - .returning(|h| Ok(**h.entity.height() != 4)); - consensus_port - .expect_await_da_height() .times(1) - .returning(|_| Ok(())); + .returning(|h| Ok(**h.entity.height() != 4)); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; @@ -176,13 +172,13 @@ async fn import__transactions_not_found() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); p2p.expect_get_transactions() - .times(2) + .times(1) .returning(|_| Ok(None)); let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -201,7 +197,7 @@ async fn import__transactions_not_found_for_header_4() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { + p2p.expect_get_transactions().times(1).returning(move |_| { height += 1; if height == 4 { Ok(None) @@ -213,7 +209,7 @@ async fn import__transactions_not_found_for_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -285,7 +281,7 @@ async fn import__p2p_error_on_4_transactions() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { + p2p.expect_get_transactions().times(1).returning(move |_| { height += 1; if height == 4 { Err(anyhow::anyhow!("Some network error")) @@ -297,7 +293,7 @@ async fn import__p2p_error_on_4_transactions() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -345,7 +341,7 @@ async fn import__consensus_error_on_4() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|h| { if **h.entity.height() == 4 { Err(anyhow::anyhow!("Some consensus error")) @@ -353,15 +349,11 @@ async fn import__consensus_error_on_4() { Ok(true) } }); - consensus_port - .expect_await_da_height() - .times(1) - .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; @@ -422,8 +414,8 @@ async fn import__execution_error_on_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { - consensus_port: DefaultMocks::times([2]), - p2p: DefaultMocks::times([2]), + consensus_port: DefaultMocks::times([1]), + p2p: DefaultMocks::times([1]), executor, }; @@ -469,7 +461,7 @@ async fn signature_always_fails() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|_| Ok(false)); let state = State::new(3, 5).into(); From 76ca4070edd04931300f146727984fc5e2a9265d Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:16:09 -0400 Subject: [PATCH 30/55] Revert --- crates/services/sync/src/import.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d4ef024c034..19658e220ee 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -179,8 +179,6 @@ where let p2p = p2p.clone(); let consensus_port = consensus.clone(); move |result| { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); Self::get_block_for_header(result, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) From b89db7d982ef64ad28851a27ae92ba14662a4fe6 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:41:40 -0400 Subject: [PATCH 31/55] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ffbf255a4a2..c0101f5407e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Description of the upcoming release here. ### Added +- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Added tests to benchmark block synchronization. - [#1309](https://github.com/FuelLabs/fuel-core/pull/1309): Add documentation for running debug builds with CLion and Visual Studio Code. - [#1308](https://github.com/FuelLabs/fuel-core/pull/1308): Add support for loading .env files when compiling with the `env` feature. This allows users to conveniently supply CLI arguments in a secure and IDE-agnostic way. - [#1263](https://github.com/FuelLabs/fuel-core/pull/1263): Add gas benchmarks for `ED19` and `ECR1` instructions. @@ -18,7 +19,6 @@ Description of the upcoming release here. ### Changed -- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Modified block synchronization to use asynchronous task execution when retrieving block headers. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. From fb6ce66c69ea4207fbdee66a271da058d072d630 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:45:04 -0400 Subject: [PATCH 32/55] Typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0101f5407e..f2cf97220f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ Description of the upcoming release here. ### Added -- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274) Added tests to benchmark block synchronization. +- [#1274](https://github.com/FuelLabs/fuel-core/pull/1274): Added tests to benchmark block synchronization. - [#1309](https://github.com/FuelLabs/fuel-core/pull/1309): Add documentation for running debug builds with CLion and Visual Studio Code. - [#1308](https://github.com/FuelLabs/fuel-core/pull/1308): Add support for loading .env files when compiling with the `env` feature. This allows users to conveniently supply CLI arguments in a secure and IDE-agnostic way. - [#1263](https://github.com/FuelLabs/fuel-core/pull/1263): Add gas benchmarks for `ED19` and `ECR1` instructions. From eb32da96a0c33a5e3f19a523f9b53d4c43e9c118 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 18:53:34 -0400 Subject: [PATCH 33/55] Fix uses --- crates/services/sync/src/import.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 90900fe9bdf..7bdfb11542c 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -9,6 +9,7 @@ use std::{ sync::Arc, }; +use anyhow::anyhow; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -25,6 +26,7 @@ use fuel_core_types::{ }; use futures::{ stream::StreamExt, + FutureExt, Stream, }; use tokio::sync::Notify; From 9533cc64bba352ae4b5b970c67fa2a17d23c1621 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 19:42:21 -0400 Subject: [PATCH 34/55] Minor refactor --- crates/services/sync/src/import.rs | 111 ++++++++++++++--------------- 1 file changed, 55 insertions(+), 56 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 7bdfb11542c..c68659f8b77 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -229,51 +229,40 @@ where } } -type NetworkBlockHeader = SourcePeer; -type BlockHeaderResultOpt = anyhow::Result>; +type PeerBlockHeader = SourcePeer; -async fn get_block_for_header< +fn get_transactions< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - result: BlockHeaderResultOpt, + range: RangeInclusive, + params: &Config, p2p: Arc

, - consensus_port: Arc, -) -> anyhow::Result> { - let header = match result { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { peer_id, data: id }; - - // Check the consensus is valid on this header. - if !consensus_port - .check_sealed_header(&header) - .trace_err("Failed to check consensus on header")? - { - tracing::warn!("Header {:?} failed consensus check", header); - return Ok(None) - } - - // Wait for the da to be at least the da height on the header. - consensus_port - .await_da_height(&header.entity.da_height) - .await?; - - get_transactions_on_block(p2p.as_ref(), block_id, header).await + consensus: Arc, +) -> impl Stream>>> { + get_headers(range, params, p2p.clone()).map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |batch| { + { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + tokio::spawn(async move { + get_block_for_header(batch, p2p.clone(), consensus_port.clone()).await + }) + .then(|task| async { task.map_err(|e| anyhow!(e))? }) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() + } + }) } fn get_headers( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream { +) -> impl Stream>> { let Config { header_batch_size, .. } = params; @@ -303,31 +292,41 @@ fn range_chunks( }) } -fn get_transactions< +async fn get_block_for_header< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - range: RangeInclusive, - params: &Config, + result: anyhow::Result>, p2p: Arc

, - consensus: Arc, -) -> impl Stream>>> { - get_headers(range, params, p2p.clone()).map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |batch| { - { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - get_block_for_header(batch, p2p.clone(), consensus_port.clone()).await - }) - .then(|task| async { task.map_err(|e| anyhow!(e))? }) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - } - }) + consensus_port: Arc, +) -> anyhow::Result> { + let header = match result { + Ok(Some(h)) => h, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: header, + } = header; + let id = header.entity.id(); + let block_id = SourcePeer { peer_id, data: id }; + + // Check the consensus is valid on this header. + if !consensus_port + .check_sealed_header(&header) + .trace_err("Failed to check consensus on header")? + { + tracing::warn!("Header {:?} failed consensus check", header); + return Ok(None) + } + + // Wait for the da to be at least the da height on the header. + consensus_port + .await_da_height(&header.entity.da_height) + .await?; + + get_transactions_on_block(p2p.as_ref(), block_id, header).await } /// Waits for a notify or shutdown signal. @@ -351,7 +350,7 @@ async fn wait_for_notify_or_shutdown( async fn get_headers_batch( mut range: RangeInclusive, p2p: Arc, -) -> impl Stream { +) -> impl Stream>> { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), From 0136cc886cf6ccd8f26188b246d640d5aa4455ff Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 19:55:07 -0400 Subject: [PATCH 35/55] Minor refactor --- crates/services/sync/src/import.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index c68659f8b77..8fdea2262e9 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -229,8 +229,6 @@ where } } -type PeerBlockHeader = SourcePeer; - fn get_transactions< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, @@ -262,7 +260,7 @@ fn get_headers( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>> { +) -> impl Stream>>> { let Config { header_batch_size, .. } = params; @@ -296,7 +294,7 @@ async fn get_block_for_header< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - result: anyhow::Result>, + result: anyhow::Result>>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result> { @@ -350,7 +348,7 @@ async fn wait_for_notify_or_shutdown( async fn get_headers_batch( mut range: RangeInclusive, p2p: Arc, -) -> impl Stream>> { +) -> impl Stream>>> { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), From 769bf6edf64597b249f33689b592a016ce0450a0 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 19:55:13 -0400 Subject: [PATCH 36/55] Update tests --- crates/services/sync/src/import/tests.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 796dc70094c..ded57a9b32d 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -53,7 +53,7 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|h| Ok(**h.entity.height() != 4)); let state = State::new(3, 5).into(); @@ -172,13 +172,13 @@ async fn import__transactions_not_found() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); p2p.expect_get_transactions() - .times(1) + .times(2) .returning(|_| Ok(None)); let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -197,7 +197,7 @@ async fn import__transactions_not_found_for_header_4() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(1).returning(move |_| { + p2p.expect_get_transactions().times(2).returning(move |_| { height += 1; if height == 4 { Ok(None) @@ -209,7 +209,7 @@ async fn import__transactions_not_found_for_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -281,7 +281,7 @@ async fn import__p2p_error_on_4_transactions() { .times(1) .returning(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))); let mut height = 3; - p2p.expect_get_transactions().times(1).returning(move |_| { + p2p.expect_get_transactions().times(2).returning(move |_| { height += 1; if height == 4 { Err(anyhow::anyhow!("Some network error")) @@ -293,7 +293,7 @@ async fn import__p2p_error_on_4_transactions() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), executor: DefaultMocks::times([0]), }; @@ -341,7 +341,7 @@ async fn import__consensus_error_on_4() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|h| { if **h.entity.height() == 4 { Err(anyhow::anyhow!("Some consensus error")) @@ -414,8 +414,8 @@ async fn import__execution_error_on_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { - consensus_port: DefaultMocks::times([1]), - p2p: DefaultMocks::times([1]), + consensus_port: DefaultMocks::times([2]), + p2p: DefaultMocks::times([2]), executor, }; @@ -461,7 +461,7 @@ async fn signature_always_fails() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|_| Ok(false)); let state = State::new(3, 5).into(); From 51f74d2a23a1d0a3b16b5008fd34549aaf492954 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 19:58:56 -0400 Subject: [PATCH 37/55] Fix tests --- crates/services/sync/src/import/tests.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index ded57a9b32d..78fbb9416b0 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -55,11 +55,15 @@ async fn import__signature_fails_on_header_4_only() { .expect_check_sealed_header() .times(2) .returning(|h| Ok(**h.entity.height() != 4)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([0]), + p2p: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; @@ -349,11 +353,15 @@ async fn import__consensus_error_on_4() { Ok(true) } }); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([0]), + p2p: DefaultMocks::times([1]), executor: DefaultMocks::times([0]), }; From f2a37b45a3b8d4dac298aebe9535840080a808c3 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 20:03:23 -0400 Subject: [PATCH 38/55] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2cf97220f0..b0c934b6676 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ Description of the upcoming release here. ### Changed +- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. From 0cbcaa1ed067c42bffd994ea926931d0d6f31249 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 23 Aug 2023 22:18:11 -0400 Subject: [PATCH 39/55] Rename get_block_for_header to get_sealed_blocks --- crates/services/sync/src/import.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 8fdea2262e9..0f4671ca155 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -246,7 +246,7 @@ fn get_transactions< let p2p = p2p.clone(); let consensus_port = consensus_port.clone(); tokio::spawn(async move { - get_block_for_header(batch, p2p.clone(), consensus_port.clone()).await + get_sealed_blocks(batch, p2p.clone(), consensus_port.clone()).await }) .then(|task| async { task.map_err(|e| anyhow!(e))? }) } @@ -290,7 +290,7 @@ fn range_chunks( }) } -async fn get_block_for_header< +async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( From 11357e690457ce89637de3012c8844c8997ae684 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Thu, 24 Aug 2023 13:52:51 +0100 Subject: [PATCH 40/55] Update the benchmark --- benches/benches/import.rs | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index c1c1a85412f..3a22be25ba6 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -1,8 +1,6 @@ use criterion::{ criterion_group, criterion_main, - measurement::WallTime, - BenchmarkGroup, Criterion, }; use fuel_core_benches::import::{ @@ -23,26 +21,28 @@ async fn execute_import(import: PressureImport, shutdown: &mut StateWatcher) { import.import(shutdown).await.unwrap(); } -fn name(n: u32, durations: Durations, buffer_size: usize) -> String { +fn name(n: u32, durations: Durations, batch_size: u32, buffer_size: usize) -> String { format!( - "import {n} * {d_h}/{d_c}/{d_t}/{d_e} - {sz}", + "import {n} * {d_h}/{d_c}/{d_t}/{d_e} - {bas}/{bus}", n = n, d_h = durations.headers.as_millis(), d_c = durations.consensus.as_millis(), d_t = durations.transactions.as_millis(), d_e = durations.executes.as_millis(), - sz = buffer_size + bas = batch_size, + bus = buffer_size ) } fn bench_imports(c: &mut Criterion) { - let bench_import = |group: &mut BenchmarkGroup, + let bench_import = |c: &mut Criterion, n: u32, durations: Durations, batch_size: u32, buffer_size: usize| { - let name = name(n, durations, buffer_size); - group.bench_function(name, move |b| { + let name = name(n, durations, batch_size, buffer_size); + let mut group = c.benchmark_group(format!("import {}", name)); + group.bench_function("bench", move |b| { let rt = Runtime::new().unwrap(); b.to_async(&rt).iter_custom(|iters| async move { let mut elapsed_time = Duration::default(); @@ -68,33 +68,31 @@ fn bench_imports(c: &mut Criterion) { }); }; - let mut group = c.benchmark_group("import"); - let n = 100; let durations = Durations { headers: Duration::from_millis(5), consensus: Duration::from_millis(5), - transactions: Duration::from_millis(5), - executes: Duration::from_millis(10), + transactions: Duration::from_millis(10), + executes: Duration::from_millis(15), }; // Header batch size = 10, header/txn buffer size = 10 - bench_import(&mut group, n, durations, 10, 10); + bench_import(c, n, durations, 10, 10); // Header batch size = 20, header/txn buffer size = 10 - bench_import(&mut group, n, durations, 20, 10); + bench_import(c, n, durations, 20, 10); // Header batch size = 50, header/txn buffer size = 10 - bench_import(&mut group, n, durations, 20, 10); + bench_import(c, n, durations, 50, 10); // Header batch size = 10, header/txn buffer size = 20 - bench_import(&mut group, n, durations, 10, 20); + bench_import(c, n, durations, 10, 20); // Header batch size = 10, header/txn buffer size = 50 - bench_import(&mut group, n, durations, 10, 50); + bench_import(c, n, durations, 10, 50); // Header batch size = 50, header/txn buffer size = 50 - bench_import(&mut group, n, durations, 10, 20); + bench_import(c, n, durations, 50, 50); } criterion_group!(benches, bench_imports); From 6838932febabdb6ce790f9e2215e117e1dab3dc4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 24 Aug 2023 12:54:28 -0400 Subject: [PATCH 41/55] Remove max_header_batch_requests --- benches/src/import.rs | 2 -- bin/fuel-core/src/cli/run/p2p.rs | 4 ---- crates/services/sync/src/import.rs | 3 --- crates/services/sync/src/import/back_pressure_tests.rs | 6 ------ crates/services/sync/src/import/tests.rs | 1 - crates/services/sync/src/service/tests.rs | 1 - deployment/charts/templates/fuel-core-deploy.yaml | 4 ---- deployment/charts/values.yaml | 1 - deployment/scripts/.env | 1 - 9 files changed, 23 deletions(-) diff --git a/benches/src/import.rs b/benches/src/import.rs index 3d79206bda5..c8dff450583 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -40,7 +40,6 @@ pub fn provision_import_test( shared_state: SharedMutex, input: Durations, header_batch_size: u32, - max_header_batch_requests: usize, max_get_txns_requests: usize, ) -> ( PressureImport, @@ -49,7 +48,6 @@ pub fn provision_import_test( ) { let shared_notify = Arc::new(Notify::new()); let params = Config { - max_header_batch_requests, header_batch_size, max_get_txns_requests, }; diff --git a/bin/fuel-core/src/cli/run/p2p.rs b/bin/fuel-core/src/cli/run/p2p.rs index ea1fda55c80..e1b31d816c7 100644 --- a/bin/fuel-core/src/cli/run/p2p.rs +++ b/bin/fuel-core/src/cli/run/p2p.rs @@ -185,9 +185,6 @@ pub struct SyncArgs { /// The maximum number of headers to request in a single batch. #[clap(long = "sync-header-batch-size", default_value = "10", env)] pub header_batch_size: u32, - /// The maximum number of header batch requests to have active at one time. - #[clap(long = "sync-max-header-batch-requests", default_value = "10", env)] - pub max_header_batch_requests: usize, } #[derive(Clone, Debug)] @@ -220,7 +217,6 @@ impl From for fuel_core::sync::Config { Self { max_get_txns_requests: value.max_get_txns_requests, header_batch_size: value.header_batch_size, - max_header_batch_requests: value.max_header_batch_requests, } } } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0f4671ca155..91ee3e73f1e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -63,8 +63,6 @@ pub struct Config { pub max_get_txns_requests: usize, /// The maximum number of headers to request in a single batch. pub header_batch_size: u32, - /// The maximum number of header batch requests to have active at one time. - pub max_header_batch_requests: usize, } impl Default for Config { @@ -72,7 +70,6 @@ impl Default for Config { Self { max_get_txns_requests: 10, header_batch_size: 100, - max_header_batch_requests: 10, } } } diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index 6dea34a7678..ed69e65e6d4 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -23,7 +23,6 @@ struct Input { Config{ max_get_txns_requests: 1, header_batch_size: 1, - max_header_batch_requests: 1, } => Count::default() ; "Empty sanity test" )] @@ -36,7 +35,6 @@ struct Input { Config{ max_get_txns_requests: 1, header_batch_size: 1, - max_header_batch_requests: 1, } => is less_or_equal_than Count{ headers: 1, consensus: 1, transactions: 1, executes: 1, blocks: 1 } ; "Single with slow headers" @@ -50,7 +48,6 @@ struct Input { Config{ max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 1, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } ; "100 headers with max 10 with slow headers" @@ -64,7 +61,6 @@ struct Input { Config{ max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 1, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } ; "100 headers with max 10 with slow transactions" @@ -78,7 +74,6 @@ struct Input { Config{ max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 1, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } ; "50 headers with max 10 with slow executes" @@ -92,7 +87,6 @@ struct Input { Config{ max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 10, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } ; "50 headers with max 10 size and max 10 requests" diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 78fbb9416b0..0d4e8938cda 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -530,7 +530,6 @@ async fn test_import_inner( let params = Config { max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 10, }; let p2p = Arc::new(p2p); diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 04617bb97a5..ae7edaaa6af 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -61,7 +61,6 @@ async fn test_new_service() { let params = Config { max_get_txns_requests: 10, header_batch_size: 10, - max_header_batch_requests: 10, }; let s = new_service(4u32.into(), p2p, importer, consensus, params).unwrap(); diff --git a/deployment/charts/templates/fuel-core-deploy.yaml b/deployment/charts/templates/fuel-core-deploy.yaml index 2066b38e383..7c0dbf5b328 100644 --- a/deployment/charts/templates/fuel-core-deploy.yaml +++ b/deployment/charts/templates/fuel-core-deploy.yaml @@ -228,10 +228,6 @@ spec: - "--sync-header-batch-size" - "{{ .Values.app.sync_header_batch_size }}" {{- end }} - {{- if .Values.app.sync_max_header_batch_requests }} - - "--sync-max-header-batch-requests" - - "{{ .Values.app.sync_max_header_batch_requests }}" - {{- end }} {{- if .Values.app.reserved_nodes_only_mode }} - "--reserved-nodes-only-mode" {{- end}} diff --git a/deployment/charts/values.yaml b/deployment/charts/values.yaml index 384b6932901..3cf22ef2a53 100644 --- a/deployment/charts/values.yaml +++ b/deployment/charts/values.yaml @@ -23,7 +23,6 @@ app: max_transmit_size: "${fuel_core_max_buffer_size}" sync_max_get_txns: "${fuel_core_sync_max_get_txns}" sync_header_batch_size: "${fuel_core_sync_header_batch_size}" - sync_max_header_batch_requests: "${fuel_core_sync_max_header_batch_requests}" p2p_key: ${fuel_core_p2p_key} allow_private_addresses: ${fuel_core_allow_private_addresses} reserved_nodes_only_mode: ${fuel_core_reserved_only} diff --git a/deployment/scripts/.env b/deployment/scripts/.env index f225ae81bbd..a5c91962642 100644 --- a/deployment/scripts/.env +++ b/deployment/scripts/.env @@ -67,7 +67,6 @@ fuel_core_reserved_nodes="/dns4/test.test.svc.cluster.local/tcp/30333/p2p/16Uiu2 # Sync Environment Variables fuel_core_sync_max_get_txns="10" fuel_core_sync_header_batch_size="10" -fuel_core_sync_max_header_batch_requests="10" # Ingress Environment variables fuel_core_ingress_dns="node.example.com" From 8fdd12d517536c9e24685a720419647fa2ffbce8 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 24 Aug 2023 13:24:46 -0400 Subject: [PATCH 42/55] Update import.rs --- benches/benches/import.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 3a22be25ba6..cb88c474543 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -56,7 +56,6 @@ fn bench_imports(c: &mut Criterion) { durations, batch_size, buffer_size, - buffer_size, ); import.notify_one(); let start = std::time::Instant::now(); From 574e9936c25890bfbc194962fdab9676fa7216e7 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 24 Aug 2023 13:48:45 -0400 Subject: [PATCH 43/55] Update naming and CLI arg --- CHANGELOG.md | 2 +- benches/src/import.rs | 4 ++-- bin/fuel-core/src/cli/run/p2p.rs | 6 +++--- crates/services/sync/src/import.rs | 16 ++++++++-------- .../sync/src/import/back_pressure_tests.rs | 12 ++++++------ crates/services/sync/src/import/tests.rs | 2 +- crates/services/sync/src/service/tests.rs | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0c934b6676..c6707cffffa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,7 @@ Description of the upcoming release here. ### Changed -- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. +- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. Removed the `--sync-max-header-batch-requests` CLI argument, and renamed `--sync-max-get-txns` to `--sync-block-stream-buffer-size` to better represent the current behavior in the import. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. diff --git a/benches/src/import.rs b/benches/src/import.rs index c8dff450583..88f26857046 100644 --- a/benches/src/import.rs +++ b/benches/src/import.rs @@ -40,7 +40,7 @@ pub fn provision_import_test( shared_state: SharedMutex, input: Durations, header_batch_size: u32, - max_get_txns_requests: usize, + block_stream_buffer_size: usize, ) -> ( PressureImport, Sender, @@ -49,7 +49,7 @@ pub fn provision_import_test( let shared_notify = Arc::new(Notify::new()); let params = Config { header_batch_size, - max_get_txns_requests, + block_stream_buffer_size, }; let p2p = Arc::new(PressurePeerToPeer::new( shared_count.clone(), diff --git a/bin/fuel-core/src/cli/run/p2p.rs b/bin/fuel-core/src/cli/run/p2p.rs index e1b31d816c7..b48631eaa06 100644 --- a/bin/fuel-core/src/cli/run/p2p.rs +++ b/bin/fuel-core/src/cli/run/p2p.rs @@ -180,8 +180,8 @@ pub struct P2PArgs { #[derive(Debug, Clone, Args)] pub struct SyncArgs { /// The maximum number of get transaction requests to make in a single batch. - #[clap(long = "sync-max-get-txns", default_value = "10", env)] - pub max_get_txns_requests: usize, + #[clap(long = "sync-block-stream-buffer-size", default_value = "10", env)] + pub block_stream_buffer_size: usize, /// The maximum number of headers to request in a single batch. #[clap(long = "sync-header-batch-size", default_value = "10", env)] pub header_batch_size: u32, @@ -215,7 +215,7 @@ impl KeypairArg { impl From for fuel_core::sync::Config { fn from(value: SyncArgs) -> Self { Self { - max_get_txns_requests: value.max_get_txns_requests, + block_stream_buffer_size: value.block_stream_buffer_size, header_batch_size: value.header_batch_size, } } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 91ee3e73f1e..fd9064dff0d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -60,7 +60,7 @@ mod back_pressure_tests; /// Parameters for the import task. pub struct Config { /// The maximum number of get transaction requests to make in a single batch. - pub max_get_txns_requests: usize, + pub block_stream_buffer_size: usize, /// The maximum number of headers to request in a single batch. pub header_batch_size: u32, } @@ -68,7 +68,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Self { - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 100, } } @@ -175,9 +175,9 @@ where .. } = &self; - get_transactions(range.clone(), params, p2p.clone(), consensus.clone()) - // Request up to `max_get_txns_requests` transactions from the network. - .buffered(params.max_get_txns_requests) + get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()) + // Request up to `block_stream_buffer_size` transactions from the network. + .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. // Note the error will be returned but the stream will close. .into_scan_none_or_err() @@ -226,7 +226,7 @@ where } } -fn get_transactions< +fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( @@ -235,7 +235,7 @@ fn get_transactions< p2p: Arc

, consensus: Arc, ) -> impl Stream>>> { - get_headers(range, params, p2p.clone()).map({ + get_header_stream(range, params, p2p.clone()).map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); move |batch| { @@ -253,7 +253,7 @@ fn get_transactions< }) } -fn get_headers( +fn get_header_stream( range: RangeInclusive, params: &Config, p2p: Arc

, diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index ed69e65e6d4..c58fc97032d 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -21,7 +21,7 @@ struct Input { #[test_case( Input::default(), State::new(None, None), Config{ - max_get_txns_requests: 1, + block_stream_buffer_size: 1, header_batch_size: 1, } => Count::default() ; "Empty sanity test" @@ -33,7 +33,7 @@ struct Input { }, State::new(None, 0), Config{ - max_get_txns_requests: 1, + block_stream_buffer_size: 1, header_batch_size: 1, } => is less_or_equal_than Count{ headers: 1, consensus: 1, transactions: 1, executes: 1, blocks: 1 } @@ -46,7 +46,7 @@ struct Input { }, State::new(None, 100), Config{ - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } @@ -59,7 +59,7 @@ struct Input { }, State::new(None, 100), Config{ - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } @@ -72,7 +72,7 @@ struct Input { }, State::new(None, 50), Config{ - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } @@ -85,7 +85,7 @@ struct Input { }, State::new(None, 50), Config{ - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, } => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 21 } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 0d4e8938cda..d81a9bea606 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -528,7 +528,7 @@ async fn test_import_inner( executor, } = mocks; let params = Config { - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, }; let p2p = Arc::new(p2p); diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index ae7edaaa6af..bd1d59123fa 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -59,7 +59,7 @@ async fn test_new_service() { .returning(|_| Ok(true)); consensus.expect_await_da_height().returning(|_| Ok(())); let params = Config { - max_get_txns_requests: 10, + block_stream_buffer_size: 10, header_batch_size: 10, }; let s = new_service(4u32.into(), p2p, importer, consensus, params).unwrap(); From 2429ee0d7c9c8b003a71dc0d74cf977eda5ec78a Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 24 Aug 2023 13:54:06 -0400 Subject: [PATCH 44/55] Replace max_get_txns with block_stream_size in configs --- deployment/charts/templates/fuel-core-deploy.yaml | 8 ++++---- deployment/charts/values.yaml | 2 +- deployment/scripts/.env | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deployment/charts/templates/fuel-core-deploy.yaml b/deployment/charts/templates/fuel-core-deploy.yaml index 7c0dbf5b328..4b274ebaa09 100644 --- a/deployment/charts/templates/fuel-core-deploy.yaml +++ b/deployment/charts/templates/fuel-core-deploy.yaml @@ -220,11 +220,11 @@ spec: - "--max-transmit-size" - "{{ .Values.app.max_transmit_size }}" {{- end }} - {{- if .Values.app.sync_max_get_txns }} - - "--sync-max-get-txns" - - "{{ .Values.app.sync_max_get_txns }}" + {{- if .Values.app.sync_block_stream_size }} + - "--sync-block-stream-size" + - "{{ .Values.app.sync_block_stream_size }}" {{- end }} - {{- if .Values.app.sync_header_batch_size}} + {{- if .Values.app.sync_header_batch_size }} - "--sync-header-batch-size" - "{{ .Values.app.sync_header_batch_size }}" {{- end }} diff --git a/deployment/charts/values.yaml b/deployment/charts/values.yaml index 3cf22ef2a53..5f6dcafa8c6 100644 --- a/deployment/charts/values.yaml +++ b/deployment/charts/values.yaml @@ -21,7 +21,7 @@ app: max_headers_per_request: "${fuel_core_max_headers_per_request}" max_database_cache_size: "${fuel_core_max_database_cache_size}" max_transmit_size: "${fuel_core_max_buffer_size}" - sync_max_get_txns: "${fuel_core_sync_max_get_txns}" + sync_block_stream_size: "${fuel_core_sync_block_stream_size}" sync_header_batch_size: "${fuel_core_sync_header_batch_size}" p2p_key: ${fuel_core_p2p_key} allow_private_addresses: ${fuel_core_allow_private_addresses} diff --git a/deployment/scripts/.env b/deployment/scripts/.env index a5c91962642..d7bb380a84f 100644 --- a/deployment/scripts/.env +++ b/deployment/scripts/.env @@ -65,7 +65,7 @@ fuel_core_reserved_nodes="/dns4/test.test.svc.cluster.local/tcp/30333/p2p/16Uiu2 # fuel_core_bootstrap_nodes="/dns4/test.test.svc.cluster.local/tcp/30333/p2p/16Uiu2HAmEB6RQuDfEZjvosRRundrEddfGqgRq51EReNV9E4pfDw5,/dns4/sentry-3/tcp/30333/16Uiu2HAmEB6RQuDfEZjvosRRundrEddfGqgRq51EReNV9E4pfDw5" # Sync Environment Variables -fuel_core_sync_max_get_txns="10" +fuel_core_sync_block_stream_size="10" fuel_core_sync_header_batch_size="10" # Ingress Environment variables From e35504204b3c87647b1f6d3c732ec2b8a59981c1 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 24 Aug 2023 19:47:38 -0400 Subject: [PATCH 45/55] Update benchmarks --- benches/benches/import.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index cb88c474543..c8402b37e51 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -69,27 +69,27 @@ fn bench_imports(c: &mut Criterion) { let n = 100; let durations = Durations { - headers: Duration::from_millis(5), - consensus: Duration::from_millis(5), + headers: Duration::from_millis(10), + consensus: Duration::from_millis(10), transactions: Duration::from_millis(10), - executes: Duration::from_millis(15), + executes: Duration::from_millis(5), }; // Header batch size = 10, header/txn buffer size = 10 bench_import(c, n, durations, 10, 10); - // Header batch size = 20, header/txn buffer size = 10 - bench_import(c, n, durations, 20, 10); - - // Header batch size = 50, header/txn buffer size = 10 - bench_import(c, n, durations, 50, 10); - - // Header batch size = 10, header/txn buffer size = 20 - bench_import(c, n, durations, 10, 20); + // Header batch size = 10, header/txn buffer size = 25 + bench_import(c, n, durations, 10, 25); // Header batch size = 10, header/txn buffer size = 50 bench_import(c, n, durations, 10, 50); + // Header batch size = 25, header/txn buffer size = 10 + bench_import(c, n, durations, 25, 10); + + // Header batch size = 50, header/txn buffer size = 10 + bench_import(c, n, durations, 50, 10); + // Header batch size = 50, header/txn buffer size = 50 bench_import(c, n, durations, 50, 50); } From 1f9c36e2ea06527b332e98b02282e48e9574fb47 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 10:09:13 -0400 Subject: [PATCH 46/55] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6707cffffa..9f98e6a5c3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,7 @@ Description of the upcoming release here. ### Changed -- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. Removed the `--sync-max-header-batch-requests` CLI argument, and renamed `--sync-max-get-txns` to `--sync-block-stream-buffer-size` to better represent the current behavior in the import. +- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. - [#1314](https://github.com/FuelLabs/fuel-core/pull/1314): Removed `types::ConsensusParameters` in favour of `fuel_tx:ConsensusParameters`. - [#1302](https://github.com/FuelLabs/fuel-core/pull/1302): Removed the usage of flake and building of the bridge contract ABI. It simplifies the maintenance and updating of the events, requiring only putting the event definition into the codebase of the relayer. @@ -27,6 +27,7 @@ Description of the upcoming release here. - [#1270](https://github.com/FuelLabs/fuel-core/pull/1270): Modify the way block headers are retrieved from peers to be done in batches. #### Breaking +- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Removed the `--sync-max-header-batch-requests` CLI argument, and renamed `--sync-max-get-txns` to `--sync-block-stream-buffer-size` to better represent the current behavior in the import. - [#1279](https://github.com/FuelLabs/fuel-core/pull/1279): Added a new CLI flag to enable the Relayer service `--enable-relayer`, and disabled the Relayer service by default. When supplying the `--enable-relayer` flag, the `--relayer` argument becomes mandatory, and omitting it is an error. Similarly, providing a `--relayer` argument without the `--enable-relayer` flag is an error. Lastly, providing the `--keypair` or `--network` arguments will also produce an error if the `--enable-p2p` flag is not set. - [#1262](https://github.com/FuelLabs/fuel-core/pull/1262): The `ConsensusParameters` aggregates all configuration data related to the consensus. It contains many fields that are segregated by the usage. The API of some functions was affected to use lesser types instead the whole `ConsensusParameters`. It is a huge breaking change requiring repetitively monotonically updating all places that use the `ConsensusParameters`. But during updating, consider that maybe you can use lesser types. Usage of them may simplify signatures of methods and make them more user-friendly and transparent. - [#1290](https://github.com/FuelLabs/fuel-core/pull/1290): Standardize CLI args to use `-` instead of `_` From 425bb8cb4fa37263d39a6651c45da20866bfb9ef Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 10:17:18 -0400 Subject: [PATCH 47/55] Remove Nones from header stream --- crates/services/sync/src/import.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index fd9064dff0d..0ff630f2152 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -257,7 +257,7 @@ fn get_header_stream( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>>> { +) -> impl Stream>> { let Config { header_batch_size, .. } = params; @@ -274,6 +274,8 @@ fn get_header_stream( get_headers_batch(range, p2p).await }) .flatten() + .into_scan_none_or_err() + .scan_none_or_err() } fn range_chunks( @@ -291,13 +293,12 @@ async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - result: anyhow::Result>>, + result: anyhow::Result>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result> { let header = match result { - Ok(Some(h)) => h, - Ok(None) => return Ok(None), + Ok(h) => h, Err(e) => return Err(e), }; let SourcePeer { From 839725a4d2c8ecfad8db3b59051175c05a17a650 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 18:55:56 -0400 Subject: [PATCH 48/55] Graceful shutdown using mpsc blocking --- crates/services/sync/src/import.rs | 39 +++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0ff630f2152..b7c2de7fe75 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -6,6 +6,7 @@ use std::{ future::Future, iter, ops::RangeInclusive, + // sync, sync::Arc, }; @@ -175,7 +176,32 @@ where .. } = &self; - get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()) + let shutdown_signal = shutdown.clone(); + let (shutdown_guard, mut shutdown_guard_recv) = + tokio::sync::mpsc::channel::<()>(1); + let block_stream = + get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()); + let guard_stream = futures::stream::repeat_with(move || { + (shutdown_guard.clone(), shutdown_signal.clone()) + }); + let stream = block_stream.zip(guard_stream); + let result = stream + .then(move |(stream_block, (shutdown_guard, shutdown_signal))| async move { + let shutdown_guard = shutdown_guard.clone(); + let shutdown_signal = shutdown_signal.clone(); + tokio::spawn(async move { + // Hold a shutdown sender for the lifetime of the spawned task + let _shutdown_guard = shutdown_guard.clone(); + let mut shutdown_signal = shutdown_signal.clone(); + tokio::select! { + // Stream a single block + block = stream_block => block, + // If a shutdown signal is received during the stream, terminate early and + // return an empty response + _ = shutdown_signal.while_started() => Ok(None) + } + }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. @@ -222,7 +248,11 @@ where } }) .in_current_span() - .await + .await; + + // Wait for any spawned tasks to shutdown + let _ = shutdown_guard_recv.recv().await; + result } } @@ -242,10 +272,7 @@ fn get_block_stream< { let p2p = p2p.clone(); let consensus_port = consensus_port.clone(); - tokio::spawn(async move { - get_sealed_blocks(batch, p2p.clone(), consensus_port.clone()).await - }) - .then(|task| async { task.map_err(|e| anyhow!(e))? }) + get_sealed_blocks(batch, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() From ab86da726600e5318ef101f1511222adf11f882c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 19:04:55 -0400 Subject: [PATCH 49/55] Clean up --- crates/services/sync/src/import.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index b7c2de7fe75..ad0862b5533 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -6,7 +6,6 @@ use std::{ future::Future, iter, ops::RangeInclusive, - // sync, sync::Arc, }; From 5d9966ea8830c308bc4e5a6cf9c0d73738a83612 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 19:08:41 -0400 Subject: [PATCH 50/55] Update naming --- crates/services/sync/src/import.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index ad0862b5533..a1cff7b434e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -185,7 +185,7 @@ where }); let stream = block_stream.zip(guard_stream); let result = stream - .then(move |(stream_block, (shutdown_guard, shutdown_signal))| async move { + .then(move |(stream_block_batch, (shutdown_guard, shutdown_signal))| async move { let shutdown_guard = shutdown_guard.clone(); let shutdown_signal = shutdown_signal.clone(); tokio::spawn(async move { @@ -193,8 +193,8 @@ where let _shutdown_guard = shutdown_guard.clone(); let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { - // Stream a single block - block = stream_block => block, + // Stream a batch of blocks + blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(None) From 6616d8656fb470f1ad8144b553a8e2dc461fce68 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 19:12:09 -0400 Subject: [PATCH 51/55] Remove unnecessary `move` --- crates/services/sync/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index a1cff7b434e..e265fd2b58b 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -185,7 +185,7 @@ where }); let stream = block_stream.zip(guard_stream); let result = stream - .then(move |(stream_block_batch, (shutdown_guard, shutdown_signal))| async move { + .then(|(stream_block_batch, (shutdown_guard, shutdown_signal))| async move { let shutdown_guard = shutdown_guard.clone(); let shutdown_signal = shutdown_signal.clone(); tokio::spawn(async move { From 4c327d994c89afe915887b4413d714212a17771b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 19:13:52 -0400 Subject: [PATCH 52/55] Simplify --- crates/services/sync/src/import.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index e265fd2b58b..423db6dc10b 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -186,8 +186,6 @@ where let stream = block_stream.zip(guard_stream); let result = stream .then(|(stream_block_batch, (shutdown_guard, shutdown_signal))| async move { - let shutdown_guard = shutdown_guard.clone(); - let shutdown_signal = shutdown_signal.clone(); tokio::spawn(async move { // Hold a shutdown sender for the lifetime of the spawned task let _shutdown_guard = shutdown_guard.clone(); From 1af52dba5e680557e0c463079ea592ecbab08c58 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 19:26:36 -0400 Subject: [PATCH 53/55] Clippy --- crates/services/sync/src/import.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 423db6dc10b..ad2322cd292 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -154,7 +154,7 @@ where Ok(()) } - #[tracing::instrument(skip(self, shutdown))] + // #[tracing::instrument(skip(self, shutdown))] /// Launches a stream to import and execute a range of blocks. /// /// This stream will process all blocks up to the given range or @@ -185,20 +185,20 @@ where }); let stream = block_stream.zip(guard_stream); let result = stream - .then(|(stream_block_batch, (shutdown_guard, shutdown_signal))| async move { - tokio::spawn(async move { - // Hold a shutdown sender for the lifetime of the spawned task - let _shutdown_guard = shutdown_guard.clone(); - let mut shutdown_signal = shutdown_signal.clone(); - tokio::select! { - // Stream a batch of blocks - blocks = stream_block_batch => blocks, - // If a shutdown signal is received during the stream, terminate early and - // return an empty response - _ = shutdown_signal.while_started() => Ok(None) - } - }).then(|task| async { task.map_err(|e| anyhow!(e))? }) - }) + .map(|(stream_block_batch, (shutdown_guard, shutdown_signal))| { + tokio::spawn(async move { + // Hold a shutdown sender for the lifetime of the spawned task + let _shutdown_guard = shutdown_guard.clone(); + let mut shutdown_signal = shutdown_signal.clone(); + tokio::select! { + // Stream a batch of blocks + blocks = stream_block_batch => blocks, + // If a shutdown signal is received during the stream, terminate early and + // return an empty response + _ = shutdown_signal.while_started() => Ok(None) + } + }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. From a046d6de874b1a867f401386adc548179edad281 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 25 Aug 2023 20:01:34 -0400 Subject: [PATCH 54/55] Update import.rs --- crates/services/sync/src/import.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index ad2322cd292..dde7321c9a4 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -180,12 +180,10 @@ where tokio::sync::mpsc::channel::<()>(1); let block_stream = get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()); - let guard_stream = futures::stream::repeat_with(move || { - (shutdown_guard.clone(), shutdown_signal.clone()) - }); - let stream = block_stream.zip(guard_stream); - let result = stream - .map(|(stream_block_batch, (shutdown_guard, shutdown_signal))| { + let result = block_stream + .map(move |stream_block_batch| { + let shutdown_guard = shutdown_guard.clone(); + let shutdown_signal = shutdown_signal.clone(); tokio::spawn(async move { // Hold a shutdown sender for the lifetime of the spawned task let _shutdown_guard = shutdown_guard.clone(); From f137f620abed1cded8d1f29a2a633907e62fcc4c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 26 Aug 2023 14:41:50 -0400 Subject: [PATCH 55/55] Update crates/services/sync/src/import.rs Co-authored-by: Green Baneling --- crates/services/sync/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index dde7321c9a4..cd9c355a5e8 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -154,7 +154,7 @@ where Ok(()) } - // #[tracing::instrument(skip(self, shutdown))] + #[tracing::instrument(skip(self, shutdown))] /// Launches a stream to import and execute a range of blocks. /// /// This stream will process all blocks up to the given range or