From 01373eb7bb4078bf076ef695baddae2080a5ce80 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Thu, 6 Mar 2025 17:02:02 -0800 Subject: [PATCH] [testloop] Refactor builder to separate node state and shared state (#13041) The big function `setup_client` in builder now does not take builder as a parameter. We have extracted out the common state from builder into `SharedState` while we build `NodeState` for each individual client. This is useful as now if we have the node_state and shared_state, we can independently initialize nodes. This is useful for implementing node restarts and adding new nodes to test environment outside builder. Couple of other refactoring changes included. Before ``` fn setup_client( &mut self, idx: usize, tempdir: &TempDir, is_archival: bool, network_shared_state: &TestLoopNetworkSharedState, ) -> TestData {} ``` After ``` fn setup_client( identifier: &str, test_loop: &mut TestLoopV2, node_handle: NodeState, shared_state: &SharedState, ) -> TestData {} ``` --- test-loop-tests/src/lib.rs | 3 +- test-loop-tests/src/{ => setup}/builder.rs | 656 ++++++++++-------- test-loop-tests/src/{ => setup}/env.rs | 89 +-- test-loop-tests/src/setup/mod.rs | 3 + test-loop-tests/src/setup/state.rs | 118 ++++ .../src/tests/bandwidth_scheduler.rs | 12 +- .../src/tests/chunk_validator_kickout.rs | 14 +- .../src/tests/congestion_control.rs | 12 +- .../congestion_control_genesis_bootstrap.rs | 11 +- .../contract_distribution_cross_shard.rs | 20 +- .../src/tests/contract_distribution_simple.rs | 13 +- .../src/tests/create_delete_account.rs | 20 +- test-loop-tests/src/tests/epoch_sync.rs | 15 +- .../fix_chunk_producer_stake_threshold.rs | 13 +- .../src/tests/fix_min_stake_ratio.rs | 14 +- .../src/tests/fix_stake_threshold.rs | 13 +- test-loop-tests/src/tests/global_contracts.rs | 15 +- test-loop-tests/src/tests/in_memory_tries.rs | 11 +- .../src/tests/malicious_chunk_producer.rs | 9 +- test-loop-tests/src/tests/max_receipt_size.rs | 110 ++- .../tests/multinode_stateless_validators.rs | 14 +- .../src/tests/multinode_test_loop_example.rs | 14 +- test-loop-tests/src/tests/optimistic_block.rs | 18 +- test-loop-tests/src/tests/protocol_upgrade.rs | 11 +- .../src/tests/reject_outdated_blocks.rs | 13 +- test-loop-tests/src/tests/resharding_v3.rs | 18 +- test-loop-tests/src/tests/restart_node.rs | 10 +- test-loop-tests/src/tests/state_sync.rs | 32 +- test-loop-tests/src/tests/syncing.rs | 15 +- .../tests/view_requests_to_archival_node.rs | 12 +- .../src/utils/contract_distribution.rs | 23 +- test-loop-tests/src/utils/loop_action.rs | 2 +- test-loop-tests/src/utils/mod.rs | 6 +- test-loop-tests/src/utils/network.rs | 2 +- test-loop-tests/src/utils/receipts.rs | 2 +- test-loop-tests/src/utils/resharding.rs | 2 +- test-loop-tests/src/utils/setups.rs | 5 +- test-loop-tests/src/utils/transactions.rs | 28 +- 38 files changed, 772 insertions(+), 626 deletions(-) rename test-loop-tests/src/{ => setup}/builder.rs (62%) rename test-loop-tests/src/{ => setup}/env.rs (65%) create mode 100644 test-loop-tests/src/setup/mod.rs create mode 100644 test-loop-tests/src/setup/state.rs diff --git a/test-loop-tests/src/lib.rs b/test-loop-tests/src/lib.rs index e820f0ede29..3e97657a712 100644 --- a/test-loop-tests/src/lib.rs +++ b/test-loop-tests/src/lib.rs @@ -1,5 +1,4 @@ #![cfg(test)] -mod builder; -mod env; +mod setup; mod tests; mod utils; diff --git a/test-loop-tests/src/builder.rs b/test-loop-tests/src/setup/builder.rs similarity index 62% rename from test-loop-tests/src/builder.rs rename to test-loop-tests/src/setup/builder.rs index dbbda4aa55d..37cb82ca129 100644 --- a/test-loop-tests/src/builder.rs +++ b/test-loop-tests/src/setup/builder.rs @@ -1,5 +1,7 @@ +use itertools::Itertools; use near_chain_configs::test_genesis::TestGenesisBuilder; use std::collections::{HashMap, HashSet}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use tempfile::TempDir; @@ -41,13 +43,15 @@ use near_vm_runner::logic::ProtocolVersion; use near_vm_runner::{ContractRuntimeCache, FilesystemContractRuntimeCache}; use nearcore::state_sync::StateSyncDumper; -use super::env::{ClientToShardsManagerSender, TestData, TestLoopChunksStorage, TestLoopEnv}; -use super::utils::network::{ +use crate::utils::network::{ block_dropper_by_height, chunk_endorsement_dropper, chunk_endorsement_dropper_by_hash, }; + +use super::env::{ClientToShardsManagerSender, TestLoopChunksStorage, TestLoopEnv}; +use super::state::{NodeState, SharedState, TestData}; use near_chain::resharding::resharding_actor::ReshardingActor; -enum DropConditionKind { +pub enum DropConditionKind { /// Whether test loop should drop all chunks validated by the given account. /// Works if number of nodes is significant enough (at least three?). ChunksValidatedBy(AccountId), @@ -82,8 +86,6 @@ pub(crate) struct TestLoopBuilder { /// Accounts whose clients should be configured as an archival node. /// This should be a subset of the accounts in the `clients` list. archival_clients: HashSet, - /// Will store all chunks produced within the test loop. - chunks_storage: Arc>, /// Conditions under which chunks/endorsements/blocks are dropped. drop_condition_kinds: Vec, /// Number of latest epochs to keep before garbage collecting associated data. @@ -93,7 +95,7 @@ pub(crate) struct TestLoopBuilder { /// Custom function to change the configs before constructing each client. config_modifier: Option>, /// Whether to do the warmup or not. See `skip_warmup` for more details. - warmup: bool, + warmup_pending: Arc, /// Whether all nodes must track all shards. track_all_shards: bool, /// Whether to load mem tries for the tracked shards. @@ -294,12 +296,11 @@ impl TestLoopBuilder { stores_override: None, test_loop_data_dir: None, archival_clients: HashSet::new(), - chunks_storage: Default::default(), drop_condition_kinds: vec![], gc_num_epochs_to_keep: None, runtime_config_store: None, config_modifier: None, - warmup: true, + warmup_pending: Arc::new(AtomicBool::new(true)), track_all_shards: false, load_memtries_for_tracked_shards: true, upgrade_schedule: PROTOCOL_UPGRADE_SCHEDULE.clone(), @@ -421,8 +422,8 @@ impl TestLoopBuilder { /// Note that this can cause unexpected issues, as the chain behaves /// somewhat differently (and correctly so) at genesis. So only skip /// warmup if you are interested in the behavior of starting from genesis. - pub fn skip_warmup(mut self) -> Self { - self.warmup = false; + pub fn skip_warmup(self) -> Self { + self.warmup_pending.store(false, Ordering::Relaxed); self } @@ -450,7 +451,11 @@ impl TestLoopBuilder { /// Build the test loop environment. pub(crate) fn build(self) -> TestLoopEnv { - self.ensure_genesis().ensure_clients().build_impl() + self.ensure_genesis() + .ensure_epoch_config_store() + .ensure_clients() + .ensure_tempdir() + .build_impl() } fn ensure_genesis(self) -> Self { @@ -458,6 +463,11 @@ impl TestLoopBuilder { self } + fn ensure_epoch_config_store(self) -> Self { + assert!(self.epoch_config_store.is_some(), "EpochConfigStore must be provided"); + self + } + fn ensure_clients(self) -> Self { assert!(!self.clients.is_empty(), "Clients must be provided to the test loop"); assert!( @@ -467,40 +477,63 @@ impl TestLoopBuilder { self } + fn ensure_tempdir(mut self) -> Self { + self.test_loop_data_dir.get_or_insert_with(|| tempfile::tempdir().unwrap()); + self + } + fn build_impl(mut self) -> TestLoopEnv { - let mut datas = Vec::new(); - let tempdir = - self.test_loop_data_dir.take().unwrap_or_else(|| tempfile::tempdir().unwrap()); - let network_shared_state = TestLoopNetworkSharedState::new(); - for idx in 0..self.clients.len() { - let account = &self.clients[idx]; - let is_archival = self.archival_clients.contains(account); - let data = self.setup_client(idx, &tempdir, is_archival, &network_shared_state); - datas.push(data); - } + let warmup_pending = self.warmup_pending.clone(); + self.test_loop.send_adhoc_event("warmup_pending".into(), move |_| { + assert!( + !warmup_pending.load(Ordering::Relaxed), + "Warmup is pending! Call env.warmup() or builder.skip_warmup()" + ); + }); - let env = TestLoopEnv { test_loop: self.test_loop, datas, tempdir }; - if self.warmup { env.warmup() } else { env } + let node_states = + (0..self.clients.len()).map(|idx| self.setup_node_state(idx)).collect_vec(); + let (mut test_loop, shared_state) = self.setup_shared_state(); + let datas = node_states + .into_iter() + .map(|node_state| { + let account_id = node_state.account_id.clone(); + setup_client(account_id.as_str(), &mut test_loop, node_state, &shared_state) + }) + .collect_vec(); + + TestLoopEnv { test_loop, node_datas: datas, shared_state } } - fn setup_client( - &mut self, - idx: usize, - tempdir: &TempDir, - is_archival: bool, - network_shared_state: &TestLoopNetworkSharedState, - ) -> TestData { - let account_id = self.clients[idx].as_str(); + fn setup_shared_state(self) -> (TestLoopV2, SharedState) { + let shared_state = SharedState { + genesis: self.genesis.unwrap(), + tempdir: self.test_loop_data_dir.unwrap(), + epoch_config_store: self.epoch_config_store.unwrap(), + runtime_config_store: self.runtime_config_store, + network_shared_state: TestLoopNetworkSharedState::new(), + upgrade_schedule: self.upgrade_schedule, + chunks_storage: Default::default(), + drop_condition_kinds: self.drop_condition_kinds, + load_memtries_for_tracked_shards: self.load_memtries_for_tracked_shards, + warmup_pending: self.warmup_pending, + }; + (self.test_loop, shared_state) + } + + fn setup_node_state(&mut self, idx: usize) -> NodeState { + let account_id = self.clients[idx].clone(); + let client_config = self.setup_client_config(idx); + let (store, split_store) = self.setup_store(idx); + NodeState { account_id, client_config, store, split_store } + } - let client_adapter = LateBoundSender::new(); - let network_adapter = LateBoundSender::new(); - let state_snapshot_adapter = LateBoundSender::new(); - let partial_witness_adapter = LateBoundSender::new(); - let sync_jobs_adapter = LateBoundSender::new(); - let resharding_sender = LateBoundSender::new(); + fn setup_client_config(&mut self, idx: usize) -> ClientConfig { + let account_id = &self.clients[idx]; + let is_archival = self.archival_clients.contains(account_id); let genesis = self.genesis.as_ref().unwrap(); - let epoch_config_store = self.epoch_config_store.as_ref().unwrap(); + let tempdir = self.test_loop_data_dir.as_ref().unwrap(); let mut client_config = ClientConfig::test(true, 600, 2000, 4, is_archival, true, false); client_config.epoch_length = genesis.config.epoch_length; client_config.max_block_wait_delay = Duration::seconds(6); @@ -536,10 +569,8 @@ impl TestLoopBuilder { // Configure tracked shards. // * single shard tracking for validators // * all shard tracking for non-validators (RPCs and archival) - let is_validator = { - let epoch_config = epoch_config_store.get_config(genesis.config.protocol_version); - idx < epoch_config.num_validators() as usize - }; + let is_validator = + genesis.config.validators.iter().any(|validator| validator.account_id == *account_id); if is_validator && !self.track_all_shards { client_config.tracked_shards = Vec::new(); } else { @@ -550,288 +581,297 @@ impl TestLoopBuilder { config_modifier(&mut client_config, idx); } - let homedir = tempdir.path().join(format!("{}", idx)); - std::fs::create_dir_all(&homedir).expect("Unable to create homedir"); + client_config + } - let store_config = StoreConfig { - path: Some(homedir.clone()), - load_memtries_for_tracked_shards: self.load_memtries_for_tracked_shards, - ..Default::default() + fn setup_store(&mut self, idx: usize) -> (Store, Option) { + let account_id = &self.clients[idx]; + let is_archival = self.archival_clients.contains(account_id); + + let (store, split_store) = if let Some(stores_override) = &self.stores_override { + stores_override[idx].clone() + } else if is_archival { + let (hot_store, split_store) = create_test_split_store(); + (hot_store, Some(split_store)) + } else { + (create_test_store(), None) }; - let (store, split_store): (Store, Option) = - if let Some(stores_override) = &self.stores_override { - stores_override[idx].clone() - } else if is_archival { - let (hot_store, split_store) = create_test_split_store(); - (hot_store, Some(split_store)) - } else { - let hot_store = create_test_store(); - (hot_store, None) - }; - initialize_genesis_state(store.clone(), &genesis, None); - - let sync_jobs_actor = SyncJobsActor::new(client_adapter.as_multi_sender()); - let chain_genesis = ChainGenesis::new(&genesis.config); - let epoch_manager = EpochManager::new_arc_handle_from_epoch_config_store( - store.clone(), + let genesis = self.genesis.as_ref().unwrap(); + initialize_genesis_state(store.clone(), genesis, None); + (store, split_store) + } +} + +fn setup_client( + identifier: &str, + test_loop: &mut TestLoopV2, + node_handle: NodeState, + shared_state: &SharedState, +) -> TestData { + let NodeState { account_id, client_config, store, split_store } = node_handle; + let SharedState { + genesis, + tempdir, + epoch_config_store, + runtime_config_store, + network_shared_state, + upgrade_schedule, + chunks_storage, + drop_condition_kinds, + load_memtries_for_tracked_shards, + .. + } = shared_state; + + let client_adapter = LateBoundSender::new(); + let network_adapter = LateBoundSender::new(); + let state_snapshot_adapter = LateBoundSender::new(); + let partial_witness_adapter = LateBoundSender::new(); + let sync_jobs_adapter = LateBoundSender::new(); + let resharding_sender = LateBoundSender::new(); + + let homedir = tempdir.path().join(format!("{}", identifier)); + std::fs::create_dir_all(&homedir).expect("Unable to create homedir"); + + let store_config = StoreConfig { + path: Some(homedir.clone()), + load_memtries_for_tracked_shards: *load_memtries_for_tracked_shards, + ..Default::default() + }; + + let sync_jobs_actor = SyncJobsActor::new(client_adapter.as_multi_sender()); + let chain_genesis = ChainGenesis::new(&genesis.config); + let epoch_manager = EpochManager::new_arc_handle_from_epoch_config_store( + store.clone(), + &genesis.config, + epoch_config_store.clone(), + ); + let shard_tracker = + ShardTracker::new(TrackedConfig::from_config(&client_config), epoch_manager.clone()); + + let contract_cache = FilesystemContractRuntimeCache::test().expect("filesystem contract cache"); + let runtime_adapter = NightshadeRuntime::test_with_trie_config( + &homedir, + store.clone(), + ContractRuntimeCache::handle(&contract_cache), + &genesis.config, + epoch_manager.clone(), + runtime_config_store.clone(), + TrieConfig::from_store_config(&store_config), + StateSnapshotType::EveryEpoch, + client_config.gc.gc_num_epochs_to_keep, + ); + + let state_snapshot = StateSnapshotActor::new( + runtime_adapter.get_flat_storage_manager(), + network_adapter.as_multi_sender(), + runtime_adapter.get_tries(), + ); + + let delete_snapshot_callback = + get_delete_snapshot_callback(state_snapshot_adapter.as_multi_sender()); + let make_snapshot_callback = get_make_snapshot_callback( + state_snapshot_adapter.as_multi_sender(), + runtime_adapter.get_flat_storage_manager(), + ); + let snapshot_callbacks = SnapshotCallbacks { make_snapshot_callback, delete_snapshot_callback }; + + let validator_signer = MutableConfigValue::new( + Some(Arc::new(create_test_signer(account_id.as_str()))), + "validator_signer", + ); + + let shards_manager_adapter = LateBoundSender::new(); + let client_to_shards_manager_sender = Arc::new(ClientToShardsManagerSender { + sender: shards_manager_adapter.clone(), + chunks_storage: chunks_storage.clone(), + }); + + // Generate a PeerId. It doesn't matter what this is. We're just making it based on + // the account ID, so that it is stable across multiple runs in the same test. + let peer_id = PeerId::new(create_test_signer(account_id.as_str()).public_key()); + + let client = Client::new( + test_loop.clock(), + client_config.clone(), + chain_genesis.clone(), + epoch_manager.clone(), + shard_tracker.clone(), + runtime_adapter.clone(), + network_adapter.as_multi_sender(), + client_to_shards_manager_sender.as_sender(), + validator_signer.clone(), + true, + [0; 32], + Some(snapshot_callbacks), + Arc::new(test_loop.async_computation_spawner(identifier, |_| Duration::milliseconds(80))), + partial_witness_adapter.as_multi_sender(), + resharding_sender.as_multi_sender(), + Arc::new(test_loop.future_spawner(identifier)), + client_adapter.as_multi_sender(), + client_adapter.as_multi_sender(), + upgrade_schedule.clone(), + ) + .unwrap(); + + // If this is an archival node and split storage is initialized, then create view-specific + // versions of EpochManager, ShardTracker and RuntimeAdapter and use them to initialize the + // ViewClientActorInner. Otherwise, we use the regular versions created above. + let (view_epoch_manager, view_shard_tracker, view_runtime_adapter) = if let Some(split_store) = + &split_store + { + let view_epoch_manager = EpochManager::new_arc_handle_from_epoch_config_store( + split_store.clone(), &genesis.config, epoch_config_store.clone(), ); - let shard_tracker = + let view_shard_tracker = ShardTracker::new(TrackedConfig::from_config(&client_config), epoch_manager.clone()); - - let contract_cache = - FilesystemContractRuntimeCache::test().expect("filesystem contract cache"); - let runtime_adapter = NightshadeRuntime::test_with_trie_config( + let view_runtime_adapter = NightshadeRuntime::test_with_trie_config( &homedir, - store.clone(), + split_store.clone(), ContractRuntimeCache::handle(&contract_cache), &genesis.config, - epoch_manager.clone(), - self.runtime_config_store.clone(), + view_epoch_manager.clone(), + runtime_config_store.clone(), TrieConfig::from_store_config(&store_config), StateSnapshotType::EveryEpoch, client_config.gc.gc_num_epochs_to_keep, ); - - let state_snapshot = StateSnapshotActor::new( - runtime_adapter.get_flat_storage_manager(), - network_adapter.as_multi_sender(), - runtime_adapter.get_tries(), - ); - - let delete_snapshot_callback = - get_delete_snapshot_callback(state_snapshot_adapter.as_multi_sender()); - let make_snapshot_callback = get_make_snapshot_callback( - state_snapshot_adapter.as_multi_sender(), - runtime_adapter.get_flat_storage_manager(), - ); - let snapshot_callbacks = - SnapshotCallbacks { make_snapshot_callback, delete_snapshot_callback }; - - let validator_signer = MutableConfigValue::new( - Some(Arc::new(create_test_signer(account_id))), - "validator_signer", - ); - - let shards_manager_adapter = LateBoundSender::new(); - let client_to_shards_manager_sender = Arc::new(ClientToShardsManagerSender { - sender: shards_manager_adapter.clone(), - chunks_storage: self.chunks_storage.clone(), - }); - - // Generate a PeerId. It doesn't matter what this is. We're just making it based on - // the account ID, so that it is stable across multiple runs in the same test. - let peer_id = PeerId::new(create_test_signer(account_id).public_key()); - - let client = Client::new( - self.test_loop.clock(), - client_config.clone(), - chain_genesis.clone(), - epoch_manager.clone(), - shard_tracker.clone(), - runtime_adapter.clone(), - network_adapter.as_multi_sender(), - client_to_shards_manager_sender.as_sender(), - validator_signer.clone(), - true, - [0; 32], - Some(snapshot_callbacks), - Arc::new( - self.test_loop - .async_computation_spawner(account_id, |_| Duration::milliseconds(80)), - ), - partial_witness_adapter.as_multi_sender(), - resharding_sender.as_multi_sender(), - Arc::new(self.test_loop.future_spawner(account_id)), - client_adapter.as_multi_sender(), - client_adapter.as_multi_sender(), - self.upgrade_schedule.clone(), - ) - .unwrap(); - - // If this is an archival node and split storage is initialized, then create view-specific - // versions of EpochManager, ShardTracker and RuntimeAdapter and use them to initialize the - // ViewClientActorInner. Otherwise, we use the regular versions created above. - let (view_epoch_manager, view_shard_tracker, view_runtime_adapter) = - if let Some(split_store) = &split_store { - let view_epoch_manager = EpochManager::new_arc_handle_from_epoch_config_store( - split_store.clone(), - &genesis.config, - epoch_config_store.clone(), - ); - let view_shard_tracker = ShardTracker::new( - TrackedConfig::from_config(&client_config), - epoch_manager.clone(), - ); - let view_runtime_adapter = NightshadeRuntime::test_with_trie_config( - &homedir, - split_store.clone(), - ContractRuntimeCache::handle(&contract_cache), - &genesis.config, - view_epoch_manager.clone(), - self.runtime_config_store.clone(), - TrieConfig::from_store_config(&store_config), - StateSnapshotType::EveryEpoch, - client_config.gc.gc_num_epochs_to_keep, - ); - (view_epoch_manager, view_shard_tracker, view_runtime_adapter) - } else { - (epoch_manager.clone(), shard_tracker.clone(), runtime_adapter.clone()) - }; - let view_client_actor = ViewClientActorInner::new( - self.test_loop.clock(), - validator_signer.clone(), - chain_genesis.clone(), - view_epoch_manager.clone(), - view_shard_tracker, - view_runtime_adapter, - network_adapter.as_multi_sender(), - client_config.clone(), - near_client::adversarial::Controls::default(), - ) - .unwrap(); - - let shards_manager = ShardsManagerActor::new( - self.test_loop.clock(), - validator_signer.clone(), - epoch_manager.clone(), - view_epoch_manager, - shard_tracker.clone(), - network_adapter.as_sender(), - client_adapter.as_sender(), - store.chunk_store(), - client.chain.head().unwrap(), - client.chain.header_head().unwrap(), - Duration::milliseconds(100), - ); - - let client_actor = ClientActorInner::new( - self.test_loop.clock(), - client, - peer_id.clone(), - network_adapter.as_multi_sender(), - noop().into_sender(), - None, - Default::default(), - None, - sync_jobs_adapter.as_multi_sender(), - ) - .unwrap(); - - let partial_witness_actor = PartialWitnessActor::new( - self.test_loop.clock(), - network_adapter.as_multi_sender(), - client_adapter.as_multi_sender(), - validator_signer.clone(), - epoch_manager.clone(), - runtime_adapter.clone(), - Arc::new( - self.test_loop - .async_computation_spawner(account_id, |_| Duration::milliseconds(80)), - ), - Arc::new( - self.test_loop - .async_computation_spawner(account_id, |_| Duration::milliseconds(80)), - ), - ); - - let mut peer_manager_actor = TestLoopPeerManagerActor::new( - self.test_loop.clock(), - &self.clients[idx], - network_shared_state, - Arc::new(self.test_loop.future_spawner(account_id)), - ); - - let gc_actor = GCActor::new( - runtime_adapter.store().clone(), - &chain_genesis, - runtime_adapter.clone(), + (view_epoch_manager, view_shard_tracker, view_runtime_adapter) + } else { + (epoch_manager.clone(), shard_tracker.clone(), runtime_adapter.clone()) + }; + let view_client_actor = ViewClientActorInner::new( + test_loop.clock(), + validator_signer.clone(), + chain_genesis.clone(), + view_epoch_manager.clone(), + view_shard_tracker, + view_runtime_adapter, + network_adapter.as_multi_sender(), + client_config.clone(), + near_client::adversarial::Controls::default(), + ) + .unwrap(); + + let shards_manager = ShardsManagerActor::new( + test_loop.clock(), + validator_signer.clone(), + epoch_manager.clone(), + view_epoch_manager, + shard_tracker.clone(), + network_adapter.as_sender(), + client_adapter.as_sender(), + store.chunk_store(), + client.chain.head().unwrap(), + client.chain.header_head().unwrap(), + Duration::milliseconds(100), + ); + + let client_actor = ClientActorInner::new( + test_loop.clock(), + client, + peer_id.clone(), + network_adapter.as_multi_sender(), + noop().into_sender(), + None, + Default::default(), + None, + sync_jobs_adapter.as_multi_sender(), + ) + .unwrap(); + + let partial_witness_actor = PartialWitnessActor::new( + test_loop.clock(), + network_adapter.as_multi_sender(), + client_adapter.as_multi_sender(), + validator_signer.clone(), + epoch_manager.clone(), + runtime_adapter.clone(), + Arc::new(test_loop.async_computation_spawner(identifier, |_| Duration::milliseconds(80))), + Arc::new(test_loop.async_computation_spawner(identifier, |_| Duration::milliseconds(80))), + ); + + let mut peer_manager_actor = TestLoopPeerManagerActor::new( + test_loop.clock(), + &account_id, + network_shared_state, + Arc::new(test_loop.future_spawner(identifier)), + ); + + let gc_actor = GCActor::new( + runtime_adapter.store().clone(), + &chain_genesis, + runtime_adapter.clone(), + epoch_manager.clone(), + shard_tracker.clone(), + validator_signer.clone(), + client_config.gc.clone(), + client_config.archive, + ); + // We don't send messages to `GCActor` so adapter is not needed. + test_loop.data.register_actor(identifier, gc_actor, None); + + let resharding_actor = ReshardingActor::new(runtime_adapter.store().clone(), &chain_genesis); + + let state_sync_dumper = StateSyncDumper { + clock: test_loop.clock(), + client_config, + chain_genesis, + epoch_manager: epoch_manager.clone(), + shard_tracker, + runtime: runtime_adapter, + validator: validator_signer, + future_spawner: Arc::new(test_loop.future_spawner(identifier)), + handle: None, + }; + let state_sync_dumper_handle = test_loop.data.register_data(state_sync_dumper); + + let client_sender = + test_loop.data.register_actor(identifier, client_actor, Some(client_adapter)); + let view_client_sender = test_loop.data.register_actor(identifier, view_client_actor, None); + let shards_manager_sender = + test_loop.data.register_actor(identifier, shards_manager, Some(shards_manager_adapter)); + let partial_witness_sender = test_loop.data.register_actor( + identifier, + partial_witness_actor, + Some(partial_witness_adapter), + ); + test_loop.data.register_actor(identifier, sync_jobs_actor, Some(sync_jobs_adapter)); + test_loop.data.register_actor(identifier, state_snapshot, Some(state_snapshot_adapter)); + test_loop.data.register_actor(identifier, resharding_actor, Some(resharding_sender)); + + // State sync dumper is not an Actor, handle starting separately. + let state_sync_dumper_handle_clone = state_sync_dumper_handle.clone(); + test_loop.send_adhoc_event("start_state_sync_dumper".to_owned(), move |test_loop_data| { + test_loop_data.get_mut(&state_sync_dumper_handle_clone).start().unwrap(); + }); + + for condition in drop_condition_kinds { + register_drop_condition( + &mut peer_manager_actor, + chunks_storage.clone(), epoch_manager.clone(), - shard_tracker.clone(), - validator_signer.clone(), - client_config.gc.clone(), - client_config.archive, - ); - // We don't send messages to `GCActor` so adapter is not needed. - self.test_loop.data.register_actor(account_id, gc_actor, None); - - let resharding_actor = - ReshardingActor::new(runtime_adapter.store().clone(), &chain_genesis); - - let state_sync_dumper = StateSyncDumper { - clock: self.test_loop.clock(), - client_config, - chain_genesis, - epoch_manager: epoch_manager.clone(), - shard_tracker, - runtime: runtime_adapter, - validator: validator_signer, - future_spawner: Arc::new(self.test_loop.future_spawner(account_id)), - handle: None, - }; - let state_sync_dumper_handle = self.test_loop.data.register_data(state_sync_dumper); - - let client_sender = - self.test_loop.data.register_actor(account_id, client_actor, Some(client_adapter)); - let view_client_sender = - self.test_loop.data.register_actor(account_id, view_client_actor, None); - let shards_manager_sender = self.test_loop.data.register_actor( - account_id, - shards_manager, - Some(shards_manager_adapter), - ); - let partial_witness_sender = self.test_loop.data.register_actor( - account_id, - partial_witness_actor, - Some(partial_witness_adapter), - ); - self.test_loop.data.register_actor(account_id, sync_jobs_actor, Some(sync_jobs_adapter)); - self.test_loop.data.register_actor( - account_id, - state_snapshot, - Some(state_snapshot_adapter), - ); - self.test_loop.data.register_actor(account_id, resharding_actor, Some(resharding_sender)); - - // State sync dumper is not an Actor, handle starting separately. - let state_sync_dumper_handle_clone = state_sync_dumper_handle.clone(); - self.test_loop.send_adhoc_event( - "start_state_sync_dumper".to_owned(), - move |test_loop_data| { - test_loop_data.get_mut(&state_sync_dumper_handle_clone).start().unwrap(); - }, - ); - - for condition in &self.drop_condition_kinds { - register_drop_condition( - &mut peer_manager_actor, - self.chunks_storage.clone(), - epoch_manager.clone(), - condition, - ); - } - - let peer_manager_sender = self.test_loop.data.register_actor( - account_id, - peer_manager_actor, - Some(network_adapter), + condition, ); + } - let data = TestData { - account_id: self.clients[idx].clone(), - peer_id, - client_sender, - view_client_sender, - shards_manager_sender, - partial_witness_sender, - peer_manager_sender, - state_sync_dumper_handle, - }; + let peer_manager_sender = + test_loop.data.register_actor(identifier, peer_manager_actor, Some(network_adapter)); + + let data = TestData { + account_id, + peer_id, + client_sender, + view_client_sender, + shards_manager_sender, + partial_witness_sender, + peer_manager_sender, + state_sync_dumper_handle, + }; - // Add the client to the network shared state before returning data - network_shared_state.add_client(&data); - data - } + // Add the client to the network shared state before returning data + network_shared_state.add_client(&data); + data } diff --git a/test-loop-tests/src/env.rs b/test-loop-tests/src/setup/env.rs similarity index 65% rename from test-loop-tests/src/env.rs rename to test-loop-tests/src/setup/env.rs index 6c45c0ad4fa..4ff29faf8cb 100644 --- a/test-loop-tests/src/env.rs +++ b/test-loop-tests/src/setup/env.rs @@ -1,33 +1,23 @@ -use near_async::messaging::{CanSend, IntoMultiSender, IntoSender, LateBoundSender, Sender}; +use near_async::messaging::{CanSend, LateBoundSender}; use near_async::test_loop::TestLoopV2; -use near_async::test_loop::data::{TestLoopData, TestLoopDataHandle}; +use near_async::test_loop::data::TestLoopData; use near_async::test_loop::sender::TestLoopSender; use near_async::time::Duration; use near_chunks::adapter::ShardsManagerRequestFromClient; use near_chunks::shards_manager_actor::ShardsManagerActor; -use near_client::client_actor::ClientActorInner; -use near_client::{PartialWitnessActor, ViewClientActorInner}; -use near_jsonrpc::ViewClientSenderForRpc; -use near_network::shards_manager::ShardsManagerRequestFromNetwork; -use near_network::state_witness::PartialWitnessSenderForNetwork; -use near_network::test_loop::{ - ClientSenderForTestLoopNetwork, TestLoopPeerManagerActor, ViewClientSenderForTestLoopNetwork, -}; -use near_primitives::network::PeerId; use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; -use near_primitives::types::AccountId; use near_primitives_core::types::BlockHeight; -use nearcore::state_sync::StateSyncDumper; use std::collections::HashMap; +use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use tempfile::TempDir; -const NETWORK_DELAY: Duration = Duration::milliseconds(10); +use super::state::{SharedState, TestData}; pub struct TestLoopEnv { pub test_loop: TestLoopV2, - pub datas: Vec, - pub tempdir: TempDir, + pub node_datas: Vec, + pub shared_state: SharedState, } impl TestLoopEnv { @@ -37,7 +27,11 @@ impl TestLoopEnv { /// Needed because for smaller heights blocks may not get all chunks and/or /// approvals. pub fn warmup(self) -> Self { - let Self { mut test_loop, datas, tempdir } = self; + let Self { mut test_loop, node_datas: datas, shared_state } = self; + + // This may happen if you're calling warmup twice or have set skip_warmup in builder. + assert!(shared_state.warmup_pending.load(Ordering::Relaxed), "warmup already done"); + shared_state.warmup_pending.store(false, Ordering::Relaxed); let client_handle = datas[0].client_sender.actor_handle(); let genesis_height = test_loop.data.get(&client_handle).client.chain.genesis().height(); @@ -60,8 +54,7 @@ impl TestLoopEnv { test_loop.send_adhoc_event("assertions".to_owned(), Box::new(event)); } test_loop.run_instant(); - - Self { test_loop, datas, tempdir } + Self { test_loop, node_datas: datas, shared_state } } pub fn kill_node(&mut self, identifier: &str) { @@ -76,12 +69,12 @@ impl TestLoopEnv { /// Returns the test loop data dir, if the caller wishes to reuse it for another test loop. pub fn shutdown_and_drain_remaining_events(mut self, timeout: Duration) -> TempDir { // State sync dumper is not an Actor, handle stopping separately. - for node_data in self.datas { + for node_data in self.node_datas { self.test_loop.data.get_mut(&node_data.state_sync_dumper_handle).stop(); } self.test_loop.shutdown_and_drain_remaining_events(timeout); - self.tempdir + self.shared_state.tempdir } } @@ -142,57 +135,3 @@ impl CanSend for ClientToShardsManagerSender { self.sender.send(message); } } - -#[derive(Clone)] -pub struct TestData { - pub account_id: AccountId, - pub peer_id: PeerId, - pub client_sender: TestLoopSender, - pub view_client_sender: TestLoopSender, - pub shards_manager_sender: TestLoopSender, - pub partial_witness_sender: TestLoopSender, - pub peer_manager_sender: TestLoopSender, - pub state_sync_dumper_handle: TestLoopDataHandle, -} - -impl From<&TestData> for AccountId { - fn from(data: &TestData) -> AccountId { - data.account_id.clone() - } -} - -impl From<&TestData> for PeerId { - fn from(data: &TestData) -> PeerId { - data.peer_id.clone() - } -} - -impl From<&TestData> for ClientSenderForTestLoopNetwork { - fn from(data: &TestData) -> ClientSenderForTestLoopNetwork { - data.client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() - } -} - -impl From<&TestData> for ViewClientSenderForRpc { - fn from(data: &TestData) -> ViewClientSenderForRpc { - data.view_client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() - } -} - -impl From<&TestData> for ViewClientSenderForTestLoopNetwork { - fn from(data: &TestData) -> ViewClientSenderForTestLoopNetwork { - data.view_client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() - } -} - -impl From<&TestData> for PartialWitnessSenderForNetwork { - fn from(data: &TestData) -> PartialWitnessSenderForNetwork { - data.partial_witness_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() - } -} - -impl From<&TestData> for Sender { - fn from(data: &TestData) -> Sender { - data.shards_manager_sender.clone().with_delay(NETWORK_DELAY).into_sender() - } -} diff --git a/test-loop-tests/src/setup/mod.rs b/test-loop-tests/src/setup/mod.rs new file mode 100644 index 00000000000..3f31190edc9 --- /dev/null +++ b/test-loop-tests/src/setup/mod.rs @@ -0,0 +1,3 @@ +pub mod builder; +pub mod env; +pub mod state; diff --git a/test-loop-tests/src/setup/state.rs b/test-loop-tests/src/setup/state.rs new file mode 100644 index 00000000000..35ac631d0d6 --- /dev/null +++ b/test-loop-tests/src/setup/state.rs @@ -0,0 +1,118 @@ +use std::sync::atomic::AtomicBool; +use std::sync::{Arc, Mutex}; + +use near_async::messaging::{IntoMultiSender, IntoSender, Sender}; +use near_async::test_loop::data::TestLoopDataHandle; +use near_async::test_loop::sender::TestLoopSender; +use near_async::time::Duration; +use near_chain_configs::{ClientConfig, Genesis}; +use near_chunks::shards_manager_actor::ShardsManagerActor; +use near_client::client_actor::ClientActorInner; +use near_client::{PartialWitnessActor, ViewClientActorInner}; +use near_jsonrpc::ViewClientSenderForRpc; +use near_network::shards_manager::ShardsManagerRequestFromNetwork; +use near_network::state_witness::PartialWitnessSenderForNetwork; +use near_network::test_loop::{ + ClientSenderForTestLoopNetwork, TestLoopNetworkSharedState, TestLoopPeerManagerActor, + ViewClientSenderForTestLoopNetwork, +}; +use near_parameters::RuntimeConfigStore; +use near_primitives::epoch_manager::EpochConfigStore; +use near_primitives::network::PeerId; +use near_primitives::types::AccountId; +use near_primitives::upgrade_schedule::ProtocolUpgradeVotingSchedule; +use near_store::Store; +use nearcore::state_sync::StateSyncDumper; +use tempfile::TempDir; + +use super::builder::DropConditionKind; +use super::env::TestLoopChunksStorage; + +const NETWORK_DELAY: Duration = Duration::milliseconds(10); + +/// This is the state associate with the test loop environment. +/// This state is shared across all nodes and none of it belongs to a specific node. +pub struct SharedState { + pub genesis: Genesis, + /// Directory of the current test. This is automatically deleted once tempdir goes out of scope. + pub tempdir: TempDir, + pub epoch_config_store: EpochConfigStore, + pub runtime_config_store: Option, + /// Shared state across all the network actors. It handles the mapping between AccountId, + /// PeerId, and the route back CryptoHash, so that individual network actors can do routing. + pub network_shared_state: TestLoopNetworkSharedState, + pub upgrade_schedule: ProtocolUpgradeVotingSchedule, + /// Stores all chunks ever observed on chain. Used by drop conditions to simulate network drops. + pub chunks_storage: Arc>, + /// List of drop conditions that apply to all nodes in the network. + pub drop_condition_kinds: Vec, + pub load_memtries_for_tracked_shards: bool, + /// Flag to indicate if warmup is pending. This is used to ensure that warmup is only done once. + pub warmup_pending: Arc, +} + +/// This is the state associated with each node in the test loop environment before being built. +/// The setup_client function will be called for each node to build the node and return TestData +pub struct NodeState { + pub account_id: AccountId, + pub client_config: ClientConfig, + pub store: Store, + pub split_store: Option, +} + +/// This is the state associated with each node in the test loop environment after being built. +/// This state is specific to each node and is not shared across nodes. +/// We can access each of the individual actors and senders from this state. +#[derive(Clone)] +pub struct TestData { + pub account_id: AccountId, + pub peer_id: PeerId, + pub client_sender: TestLoopSender, + pub view_client_sender: TestLoopSender, + pub shards_manager_sender: TestLoopSender, + pub partial_witness_sender: TestLoopSender, + pub peer_manager_sender: TestLoopSender, + pub state_sync_dumper_handle: TestLoopDataHandle, +} + +impl From<&TestData> for AccountId { + fn from(data: &TestData) -> AccountId { + data.account_id.clone() + } +} + +impl From<&TestData> for PeerId { + fn from(data: &TestData) -> PeerId { + data.peer_id.clone() + } +} + +impl From<&TestData> for ClientSenderForTestLoopNetwork { + fn from(data: &TestData) -> ClientSenderForTestLoopNetwork { + data.client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() + } +} + +impl From<&TestData> for ViewClientSenderForRpc { + fn from(data: &TestData) -> ViewClientSenderForRpc { + data.view_client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() + } +} + +impl From<&TestData> for ViewClientSenderForTestLoopNetwork { + fn from(data: &TestData) -> ViewClientSenderForTestLoopNetwork { + data.view_client_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() + } +} + +impl From<&TestData> for PartialWitnessSenderForNetwork { + fn from(data: &TestData) -> PartialWitnessSenderForNetwork { + data.partial_witness_sender.clone().with_delay(NETWORK_DELAY).into_multi_sender() + } +} + +impl From<&TestData> for Sender { + fn from(data: &TestData) -> Sender { + data.shards_manager_sender.clone().with_delay(NETWORK_DELAY).into_sender() + } +} diff --git a/test-loop-tests/src/tests/bandwidth_scheduler.rs b/test-loop-tests/src/tests/bandwidth_scheduler.rs index 4b97126d5e3..1b7a6923524 100644 --- a/test-loop-tests/src/tests/bandwidth_scheduler.rs +++ b/test-loop-tests/src/tests/bandwidth_scheduler.rs @@ -56,8 +56,9 @@ use testlib::bandwidth_scheduler::{ TestScenario, TestScenarioBuilder, TestSummary, }; -use crate::builder::TestLoopBuilder; -use crate::env::{TestData, TestLoopEnv}; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use crate::utils::transactions::{TransactionRunner, run_txs_parallel}; use crate::utils::{ONE_NEAR, TGAS}; @@ -162,12 +163,13 @@ fn run_bandwidth_scheduler_test(scenario: TestScenario, tx_concurrency: usize) - .build(); let epoch_config_store = TestEpochConfigBuilder::build_store_from_genesis(&genesis); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = TestLoopBuilder::new() + let TestLoopEnv { mut test_loop, node_datas, shared_state } = TestLoopBuilder::new() .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(vec![node_account]) .drop_chunks_by_height(missing_chunks_map) - .build(); + .build() + .warmup(); // Initialize the workload generator. let mut workload_generator = WorkloadGenerator::init( @@ -219,7 +221,7 @@ fn run_bandwidth_scheduler_test(scenario: TestScenario, tx_concurrency: usize) - let bandwidth_stats = analyze_workload_blocks(first_height.unwrap(), last_height.unwrap(), client); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); let summary = bandwidth_stats.summarize(&active_links); diff --git a/test-loop-tests/src/tests/chunk_validator_kickout.rs b/test-loop-tests/src/tests/chunk_validator_kickout.rs index 6850adffaeb..c75541b1880 100644 --- a/test-loop-tests/src/tests/chunk_validator_kickout.rs +++ b/test-loop-tests/src/tests/chunk_validator_kickout.rs @@ -1,5 +1,5 @@ -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::validators::get_epoch_all_validators; use itertools::Itertools; @@ -87,8 +87,12 @@ fn run_test_chunk_validator_kickout(accounts: Vec, test_case: TestCas .target_validator_mandates_per_shard(num_validator_mandates_per_shard) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); // Run chain until our targeted chunk validator is (not) kicked out. let client_handle = node_datas[0].client_sender.actor_handle(); @@ -136,7 +140,7 @@ fn run_test_chunk_validator_kickout(accounts: Vec, test_case: TestCas Duration::seconds((5 * epoch_length) as i64), ); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/congestion_control.rs b/test-loop-tests/src/tests/congestion_control.rs index 705834a3769..86bf138d8dc 100644 --- a/test-loop-tests/src/tests/congestion_control.rs +++ b/test-loop-tests/src/tests/congestion_control.rs @@ -10,8 +10,9 @@ use near_o11y::testonly::init_test_logger; use near_primitives::shard_layout::ShardLayout; use near_primitives::types::{AccountId, BlockHeight}; -use crate::builder::TestLoopBuilder; -use crate::env::{TestData, TestLoopEnv}; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use crate::utils::transactions::{call_contract, check_txs, deploy_contract, make_accounts}; use crate::utils::{ONE_NEAR, TGAS}; @@ -35,7 +36,7 @@ fn slow_test_congestion_control_simple() { accounts.push(contract_id.clone()); let (env, rpc_id) = setup(&accounts); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = env; + let TestLoopEnv { mut test_loop, node_datas, shared_state } = env; // Test @@ -54,7 +55,7 @@ fn slow_test_congestion_control_simple() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } @@ -92,7 +93,8 @@ fn setup(accounts: &Vec) -> (TestLoopEnv, AccountId) { .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(clients) - .build(); + .build() + .warmup(); (env, rpc_id.clone()) } diff --git a/test-loop-tests/src/tests/congestion_control_genesis_bootstrap.rs b/test-loop-tests/src/tests/congestion_control_genesis_bootstrap.rs index 88d31f2df64..7f913ae127e 100644 --- a/test-loop-tests/src/tests/congestion_control_genesis_bootstrap.rs +++ b/test-loop-tests/src/tests/congestion_control_genesis_bootstrap.rs @@ -7,8 +7,8 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; use near_primitives::version::{PROTOCOL_VERSION, ProtocolFeature}; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; const NUM_SHARDS: usize = 4; @@ -43,11 +43,12 @@ fn test_congestion_control_genesis_bootstrap() { .minimum_validators_per_shard(1) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(clients.clone()) - .build(); + .build() + .warmup(); test_loop.run_for(Duration::seconds(5)); @@ -57,7 +58,7 @@ fn test_congestion_control_genesis_bootstrap() { ); } - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/contract_distribution_cross_shard.rs b/test-loop-tests/src/tests/contract_distribution_cross_shard.rs index 33b72fdf1b2..1d68044d801 100644 --- a/test-loop-tests/src/tests/contract_distribution_cross_shard.rs +++ b/test-loop-tests/src/tests/contract_distribution_cross_shard.rs @@ -6,8 +6,8 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; use near_vm_runner::ContractCode; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::contract_distribution::{ assert_all_chunk_endorsements_received, clear_compiled_contract_caches, run_until_caches_contain_contract, @@ -100,8 +100,12 @@ fn setup(accounts: &Vec) -> (TestLoopEnv, AccountId) { .minimum_validators_per_shard(2) .build_store_for_genesis_protocol_version(); - let env = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let env = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); (env, rpc_id) } @@ -122,7 +126,7 @@ fn deploy_contracts( ContractCode::new(near_test_contracts::sized_contract((i + 1) * 100).to_vec(), None); let tx = deploy_contract( &mut env.test_loop, - &env.datas, + &env.node_datas, rpc_id, contract_id, contract.code().to_vec(), @@ -133,7 +137,7 @@ fn deploy_contracts( contracts.push(contract); } env.test_loop.run_for(Duration::seconds(2)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &txs); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &txs); contracts } @@ -152,7 +156,7 @@ fn call_contracts( tracing::info!(target: "test", ?rpc_id, ?sender_id, ?contract_id, "Calling contract."); let tx = call_contract( &mut env.test_loop, - &env.datas, + &env.node_datas, rpc_id, sender_id, contract_id, @@ -165,5 +169,5 @@ fn call_contracts( } } env.test_loop.run_for(Duration::seconds(2)); - check_txs(&env.test_loop.data, &env.datas, &rpc_id, &txs); + check_txs(&env.test_loop.data, &env.node_datas, &rpc_id, &txs); } diff --git a/test-loop-tests/src/tests/contract_distribution_simple.rs b/test-loop-tests/src/tests/contract_distribution_simple.rs index e062381e04f..3921f0104b5 100644 --- a/test-loop-tests/src/tests/contract_distribution_simple.rs +++ b/test-loop-tests/src/tests/contract_distribution_simple.rs @@ -6,8 +6,8 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; use near_vm_runner::ContractCode; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::contract_distribution::{ assert_all_chunk_endorsements_received, clear_compiled_contract_caches, run_until_caches_contain_contract, @@ -238,7 +238,10 @@ fn setup(accounts: &Vec) -> TestLoopEnv { .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let env = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); - env + builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup() } diff --git a/test-loop-tests/src/tests/create_delete_account.rs b/test-loop-tests/src/tests/create_delete_account.rs index a60459df58d..adfd1b21676 100644 --- a/test-loop-tests/src/tests/create_delete_account.rs +++ b/test-loop-tests/src/tests/create_delete_account.rs @@ -6,8 +6,8 @@ use near_client::client_actor::ClientActorInner; use near_o11y::testonly::init_test_logger; use near_primitives::types::AccountId; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::{ call_contract, check_txs, do_create_account, do_delete_account, do_deploy_contract, @@ -17,10 +17,10 @@ use crate::utils::transactions::{ /// Write block height to contract storage. fn do_call_contract(env: &mut TestLoopEnv, rpc_id: &AccountId, contract_id: &AccountId) { tracing::info!(target: "test", "Calling contract."); - let nonce = get_next_nonce(&env.test_loop.data, &env.datas, contract_id); + let nonce = get_next_nonce(&env.test_loop.data, &env.node_datas, contract_id); let tx = call_contract( &mut env.test_loop, - &env.datas, + &env.node_datas, rpc_id, contract_id, contract_id, @@ -29,7 +29,7 @@ fn do_call_contract(env: &mut TestLoopEnv, rpc_id: &AccountId, contract_id: &Acc nonce, ); env.test_loop.run_for(Duration::seconds(5)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &[tx]); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &[tx]); } /// Tracks latest block heights and checks that all chunks are produced. @@ -79,8 +79,12 @@ fn test_create_delete_account() { .add_user_accounts_simple(&accounts, 1_000_000 * ONE_NEAR) .build(); let epoch_config_store = TestEpochConfigBuilder::build_store_from_genesis(&genesis); - let mut env = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let mut env = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); // Launch a task to check that all chunks are produced. // Needed to make sure that chunks are valid. Currently, if chunk @@ -88,7 +92,7 @@ fn test_create_delete_account() { // last chunk containing problematic tx or receipt is still executed on the // rpc nodes, and result will be considered final based on block // endorsements. - let mut client_sender = env.datas[0].client_sender.clone(); + let mut client_sender = env.node_datas[0].client_sender.clone(); client_sender.run_later("check_chunks", Duration::seconds(0), move |actor, runner| { check_chunks(actor, runner, std::cell::Cell::new(0)); }); diff --git a/test-loop-tests/src/tests/epoch_sync.rs b/test-loop-tests/src/tests/epoch_sync.rs index 07d969e55b4..24f24e51a8d 100644 --- a/test-loop-tests/src/tests/epoch_sync.rs +++ b/test-loop-tests/src/tests/epoch_sync.rs @@ -10,8 +10,8 @@ use near_primitives::types::{AccountId, BlockHeightDelta}; use near_store::{DBCol, Store}; use tempfile::TempDir; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::{BalanceMismatchError, execute_money_transfers}; use near_async::messaging::CanSend; @@ -62,11 +62,12 @@ fn setup_initial_blockchain( .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis.clone()) .epoch_config_store(epoch_config_store.clone()) .clients(clients) - .build(); + .build() + .warmup(); let first_epoch_tracked_shards = { let clients = node_datas @@ -120,7 +121,7 @@ fn setup_initial_blockchain( // Properly shut down the previous TestLoopEnv. // We must preserve the tempdir, since state dumps are stored there, // and are necessary for state sync to work on the new node. - let tempdir = TestLoopEnv { test_loop, datas: node_datas, tempdir } + let tempdir = TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(5)); TestNetworkSetup { tempdir, genesis, epoch_config_store, accounts, stores } @@ -133,7 +134,7 @@ fn bootstrap_node_via_epoch_sync(setup: TestNetworkSetup, source_node: usize) -> let clients = accounts.iter().take(num_existing_clients + 1).cloned().collect_vec(); stores.push(create_test_store()); // new node starts empty. - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = TestLoopBuilder::new() + let TestLoopEnv { mut test_loop, node_datas, shared_state } = TestLoopBuilder::new() .genesis(genesis.clone()) .epoch_config_store(epoch_config_store.clone()) .clients(clients) @@ -265,7 +266,7 @@ fn bootstrap_node_via_epoch_sync(setup: TestNetworkSetup, source_node: usize) -> ); } - let tempdir = TestLoopEnv { test_loop, datas: node_datas, tempdir } + let tempdir = TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(5)); TestNetworkSetup { tempdir, genesis, epoch_config_store, accounts, stores } diff --git a/test-loop-tests/src/tests/fix_chunk_producer_stake_threshold.rs b/test-loop-tests/src/tests/fix_chunk_producer_stake_threshold.rs index 6e5e067633d..d1e5a10103a 100644 --- a/test-loop-tests/src/tests/fix_chunk_producer_stake_threshold.rs +++ b/test-loop-tests/src/tests/fix_chunk_producer_stake_threshold.rs @@ -1,5 +1,5 @@ -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::validators::get_epoch_all_validators; use near_async::test_loop::data::TestLoopData; @@ -58,13 +58,14 @@ fn slow_test_fix_cp_stake_threshold() { .add_user_accounts_simple(&accounts, 1_000_000 * ONE_NEAR) .build(); let epoch_config_store = TestEpochConfigBuilder::build_store_from_genesis(&genesis); - let TestLoopEnv { mut test_loop, datas: node_data, tempdir } = TestLoopBuilder::new() + let TestLoopEnv { mut test_loop, node_datas, shared_state } = TestLoopBuilder::new() .genesis(genesis) .epoch_config_store(epoch_config_store.clone()) .clients(clients) - .build(); + .build() + .warmup(); - let sender = node_data[0].client_sender.clone(); + let sender = node_datas[0].client_sender.clone(); let handle = sender.actor_handle(); let client = &test_loop.data.get(&handle).client; @@ -134,6 +135,6 @@ fn slow_test_fix_cp_stake_threshold() { Duration::seconds(3 * epoch_length as i64), ); - TestLoopEnv { test_loop, datas: node_data, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/fix_min_stake_ratio.rs b/test-loop-tests/src/tests/fix_min_stake_ratio.rs index 57bb1ba35cb..ba2ce8fd6b2 100644 --- a/test-loop-tests/src/tests/fix_min_stake_ratio.rs +++ b/test-loop-tests/src/tests/fix_min_stake_ratio.rs @@ -1,5 +1,5 @@ -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::validators::get_epoch_all_validators; use itertools::Itertools; @@ -85,8 +85,12 @@ fn slow_test_fix_min_stake_ratio() { .add_user_accounts_simple(&accounts, initial_balance) .build(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); let client_sender = node_datas[0].client_sender.clone(); let client_handle = node_datas[0].client_sender.actor_handle(); @@ -153,6 +157,6 @@ fn slow_test_fix_min_stake_ratio() { Duration::seconds((5 * epoch_length) as i64), ); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/fix_stake_threshold.rs b/test-loop-tests/src/tests/fix_stake_threshold.rs index 0489612a95b..28cc96f6ef4 100644 --- a/test-loop-tests/src/tests/fix_stake_threshold.rs +++ b/test-loop-tests/src/tests/fix_stake_threshold.rs @@ -1,7 +1,7 @@ use std::cell::Cell; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::validators::get_epoch_all_validators; use itertools::Itertools; @@ -66,14 +66,15 @@ fn slow_test_fix_validator_stake_threshold_protocol_upgrade() { .add_user_accounts_simple(&accounts, initial_balance) .build(); - let TestLoopEnv { mut test_loop, datas: node_data, tempdir } = test_loop_builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = test_loop_builder .genesis(genesis) .epoch_config_store(epoch_config_store.clone()) .protocol_upgrade_schedule(protocol_upgrade_schedule) .clients(clients) - .build(); + .build() + .warmup(); - let sender = node_data[0].client_sender.clone(); + let sender = node_datas[0].client_sender.clone(); let handle = sender.actor_handle(); let client = &test_loop.data.get(&handle).client; @@ -139,6 +140,6 @@ fn slow_test_fix_validator_stake_threshold_protocol_upgrade() { Duration::seconds(4 * epoch_length as i64), ); - TestLoopEnv { test_loop, datas: node_data, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/global_contracts.rs b/test-loop-tests/src/tests/global_contracts.rs index 4fc25697d2c..5f12d284d0a 100644 --- a/test-loop-tests/src/tests/global_contracts.rs +++ b/test-loop-tests/src/tests/global_contracts.rs @@ -20,8 +20,8 @@ use near_primitives::views::{ }; use near_vm_runner::ContractCode; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::transactions::{self}; use crate::utils::{ONE_NEAR, TGAS}; @@ -193,7 +193,8 @@ impl GlobalContractsTestEnv { .clients(clients) .epoch_config_store(epoch_config_store) .runtime_config_store(runtime_config_store.clone()) - .build(); + .build() + .warmup(); let contract = ContractCode::new(near_test_contracts::rs_contract().to_vec(), None); Self { @@ -327,7 +328,7 @@ impl GlobalContractsTestEnv { self.env.test_loop.run_for(Duration::seconds(2)); let clients: Vec<&Client> = self .env - .datas + .node_datas .iter() .map(|data| &self.env.test_loop.data.get(&data.client_sender.actor_handle()).client) .collect(); @@ -344,7 +345,7 @@ impl GlobalContractsTestEnv { } fn get_tx_block_hash(&self) -> CryptoHash { - transactions::get_shared_block_hash(&self.env.datas, &self.env.test_loop.data) + transactions::get_shared_block_hash(&self.env.node_datas, &self.env.test_loop.data) } fn execute_tx(&mut self, tx: SignedTransaction) -> FinalExecutionOutcomeView { @@ -352,7 +353,7 @@ impl GlobalContractsTestEnv { &mut self.env.test_loop, &self.rpc, tx, - &self.env.datas, + &self.env.node_datas, Duration::seconds(5), ) .unwrap() @@ -363,7 +364,7 @@ impl GlobalContractsTestEnv { &mut self.env.test_loop, &self.rpc, tx, - &self.env.datas, + &self.env.node_datas, Duration::seconds(5), ); } diff --git a/test-loop-tests/src/tests/in_memory_tries.rs b/test-loop-tests/src/tests/in_memory_tries.rs index 12f0757b8cd..f50568c34c4 100644 --- a/test-loop-tests/src/tests/in_memory_tries.rs +++ b/test-loop-tests/src/tests/in_memory_tries.rs @@ -6,8 +6,8 @@ use near_o11y::testonly::init_test_logger; use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::execute_money_transfers; @@ -46,11 +46,12 @@ fn test_load_memtrie_after_empty_chunks() { .transaction_validity_period(1000) .build(); let epoch_config_store = TestEpochConfigBuilder::build_store_from_genesis(&genesis); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(client_accounts) - .build(); + .build() + .warmup(); execute_money_transfers(&mut test_loop, &node_datas, &accounts).unwrap(); @@ -91,6 +92,6 @@ fn test_load_memtrie_after_empty_chunks() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/malicious_chunk_producer.rs b/test-loop-tests/src/tests/malicious_chunk_producer.rs index f960e98e5b0..d4798f454d8 100644 --- a/test-loop-tests/src/tests/malicious_chunk_producer.rs +++ b/test-loop-tests/src/tests/malicious_chunk_producer.rs @@ -1,8 +1,8 @@ #![cfg(feature = "test_features")] // required for adversarial behaviors //! Test behaviors of the network when the chunk producer is malicious or misbehaving. -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::get_anchor_hash; use near_async::messaging::CanSend as _; @@ -37,8 +37,9 @@ fn test_producer_with_expired_transactions() { .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(accounts.clone()) - .build(); - let TestLoopEnv { test_loop, datas: node_datas, .. } = &mut test_loop_env; + .build() + .warmup(); + let TestLoopEnv { test_loop, node_datas, .. } = &mut test_loop_env; // First we're gonna ask the chunk producer to keep producing empty chunks and send some // transactions as well. This will keep transactions in the transaction pool for more blocks diff --git a/test-loop-tests/src/tests/max_receipt_size.rs b/test-loop-tests/src/tests/max_receipt_size.rs index 11ef081efe2..8e3b4d97d42 100644 --- a/test-loop-tests/src/tests/max_receipt_size.rs +++ b/test-loop-tests/src/tests/max_receipt_size.rs @@ -1,4 +1,5 @@ use assert_matches::assert_matches; +use near_async::test_loop::TestLoopV2; use near_async::time::Duration; use near_chain::{ReceiptFilter, get_incoming_receipts_for_shard}; use near_o11y::testonly::init_test_logger; @@ -15,7 +16,8 @@ use near_primitives::transaction::SignedTransaction; use near_primitives::types::AccountId; use near_primitives::views::FinalExecutionStatus; -use crate::env::TestLoopEnv; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use crate::utils::TGAS; use crate::utils::setups::standard_setup_1; use crate::utils::transactions::{execute_tx, get_shared_block_hash, run_tx}; @@ -24,7 +26,7 @@ use crate::utils::transactions::{execute_tx, get_shared_block_hash, run_tx}; #[test] fn slow_test_max_receipt_size() { init_test_logger(); - let mut env: TestLoopEnv = standard_setup_1(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = standard_setup_1(); let account0: AccountId = "account0".parse().unwrap(); let account0_signer = &create_user_test_signer(&account0).into(); @@ -37,10 +39,10 @@ fn slow_test_max_receipt_size() { &account0, vec![0u8; 2_000_000], account0_signer, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); let large_tx_exec_res = - execute_tx(&mut env.test_loop, &rpc_id, large_tx, &env.datas, Duration::seconds(5)); + execute_tx(&mut test_loop, &rpc_id, large_tx, &node_datas, Duration::seconds(5)); assert_matches!(large_tx_exec_res, Err(InvalidTxError::TransactionSizeExceeded { .. })); // Let's test it by running a contract that generates a large receipt. @@ -49,9 +51,9 @@ fn slow_test_max_receipt_size() { &account0, near_test_contracts::rs_contract().into(), &account0_signer, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, deploy_contract_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, deploy_contract_tx, &node_datas, Duration::seconds(5)); // Calling generate_large_receipt({"account_id": "account0", "method_name": "noop", "total_args_size": 3000000}) // will generate a receipt that has ~3_000_000 bytes. It'll be a single receipt with multiple FunctionCall actions. @@ -65,9 +67,9 @@ fn slow_test_max_receipt_size() { "generate_large_receipt".into(), r#"{"account_id": "account0", "method_name": "noop", "total_args_size": 3000000}"#.into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, large_receipt_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, large_receipt_tx, &node_datas, Duration::seconds(5)); // Generating a receipt that is 5 MB should fail, it's above the receipt size limit. let too_large_receipt_tx = SignedTransaction::call( @@ -79,13 +81,13 @@ fn slow_test_max_receipt_size() { "generate_large_receipt".into(), r#"{"account_id": "account0", "method_name": "noop", "total_args_size": 5000000}"#.into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); let too_large_receipt_tx_exec_res = execute_tx( - &mut env.test_loop, + &mut test_loop, &rpc_id, too_large_receipt_tx, - &env.datas, + &node_datas, Duration::seconds(5), ) .unwrap(); @@ -121,12 +123,13 @@ fn slow_test_max_receipt_size() { "sum_n".into(), 5_u64.to_le_bytes().to_vec(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - let sum_4_res = run_tx(&mut env.test_loop, &rpc_id, sum_4_tx, &env.datas, Duration::seconds(5)); + let sum_4_res = run_tx(&mut test_loop, &rpc_id, sum_4_tx, &node_datas, Duration::seconds(5)); assert_eq!(sum_4_res, 10u64.to_le_bytes().to_vec()); - env.shutdown_and_drain_remaining_events(Duration::seconds(20)); + TestLoopEnv { test_loop, node_datas, shared_state } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); } // A function call will generate a new receipt. Size of this receipt will be equal to @@ -137,7 +140,7 @@ fn slow_test_max_receipt_size() { #[test] fn test_max_receipt_size_promise_return() { init_test_logger(); - let mut env: TestLoopEnv = standard_setup_1(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = standard_setup_1(); let account: AccountId = "account0".parse().unwrap(); let account_signer = &create_user_test_signer(&account).into(); @@ -149,9 +152,9 @@ fn test_max_receipt_size_promise_return() { &account, near_test_contracts::rs_contract().into(), &account_signer, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, deploy_contract_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, deploy_contract_tx, &node_datas, Duration::seconds(5)); // User calls a contract method // Contract method creates a DAG with two promises: [A -then-> B] @@ -191,9 +194,9 @@ fn test_max_receipt_size_promise_return() { "max_receipt_size_promise_return_method1".into(), format!("{{\"args_size\": {}}}", args_size).into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, large_receipt_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, large_receipt_tx, &node_datas, Duration::seconds(5)); // Make sure that the last promise in the DAG was called let assert_test_completed = SignedTransaction::call( @@ -205,13 +208,14 @@ fn test_max_receipt_size_promise_return() { "assert_test_completed".into(), "".into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, assert_test_completed, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, assert_test_completed, &node_datas, Duration::seconds(5)); - assert_oversized_receipt_occurred(&env); + assert_oversized_receipt_occurred(&test_loop, &node_datas); - env.shutdown_and_drain_remaining_events(Duration::seconds(20)); + TestLoopEnv { test_loop, node_datas, shared_state } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); } /// Return a value that is as large as max_receipt_size. The value will be wrapped in a data receipt @@ -222,7 +226,7 @@ fn test_max_receipt_size_promise_return() { #[test] fn test_max_receipt_size_value_return() { init_test_logger(); - let mut env: TestLoopEnv = standard_setup_1(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = standard_setup_1(); let account: AccountId = "account0".parse().unwrap(); let account_signer = &create_user_test_signer(&account).into(); @@ -234,9 +238,9 @@ fn test_max_receipt_size_value_return() { &account, near_test_contracts::rs_contract().into(), &account_signer, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, deploy_contract_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, deploy_contract_tx, &node_datas, Duration::seconds(5)); let max_receipt_size = 4_194_304; @@ -250,9 +254,9 @@ fn test_max_receipt_size_value_return() { "max_receipt_size_value_return_method".into(), format!("{{\"value_size\": {}}}", max_receipt_size).into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, large_receipt_tx, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, large_receipt_tx, &node_datas, Duration::seconds(5)); // Make sure that the last promise in the DAG was called let assert_test_completed = SignedTransaction::call( @@ -264,13 +268,14 @@ fn test_max_receipt_size_value_return() { "assert_test_completed".into(), "".into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, assert_test_completed, &env.datas, Duration::seconds(5)); + run_tx(&mut test_loop, &rpc_id, assert_test_completed, &node_datas, Duration::seconds(5)); - assert_oversized_receipt_occurred(&env); + assert_oversized_receipt_occurred(&test_loop, &node_datas); - env.shutdown_and_drain_remaining_events(Duration::seconds(20)); + TestLoopEnv { test_loop, node_datas, shared_state } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); } /// Yielding produces a new action receipt, resuming produces a new data receipt. @@ -278,7 +283,7 @@ fn test_max_receipt_size_value_return() { #[test] fn test_max_receipt_size_yield_resume() { init_test_logger(); - let mut env: TestLoopEnv = standard_setup_1(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = standard_setup_1(); let account: AccountId = "account0".parse().unwrap(); let account_signer = &create_user_test_signer(&account).into(); @@ -290,9 +295,9 @@ fn test_max_receipt_size_yield_resume() { &account, near_test_contracts::rs_contract().into(), &account_signer, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - run_tx(&mut env.test_loop, &rpc_id, deploy_contract_tx, &env.datas, Duration::seconds(50)); + run_tx(&mut test_loop, &rpc_id, deploy_contract_tx, &node_datas, Duration::seconds(50)); let max_receipt_size = 4_194_304; @@ -307,16 +312,11 @@ fn test_max_receipt_size_yield_resume() { "yield_with_large_args".into(), format!("{{\"args_size\": {}}}", max_receipt_size).into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - let yield_receipt_res = execute_tx( - &mut env.test_loop, - &rpc_id, - yield_receipt_tx, - &env.datas, - Duration::seconds(10), - ) - .unwrap(); + let yield_receipt_res = + execute_tx(&mut test_loop, &rpc_id, yield_receipt_tx, &node_datas, Duration::seconds(10)) + .unwrap(); let expected_yield_status = FinalExecutionStatus::Failure(TxExecutionError::ActionError(ActionError { @@ -342,16 +342,11 @@ fn test_max_receipt_size_yield_resume() { "resume_with_large_payload".into(), format!("{{\"payload_size\": {}}}", 2000).into(), 300 * TGAS, - get_shared_block_hash(&env.datas, &env.test_loop.data), + get_shared_block_hash(&node_datas, &test_loop.data), ); - let resume_receipt_res = execute_tx( - &mut env.test_loop, - &rpc_id, - resume_receipt_tx, - &env.datas, - Duration::seconds(5), - ) - .unwrap(); + let resume_receipt_res = + execute_tx(&mut test_loop, &rpc_id, resume_receipt_tx, &node_datas, Duration::seconds(5)) + .unwrap(); let expected_resume_status = FinalExecutionStatus::Failure(TxExecutionError::ActionError(ActionError { @@ -362,13 +357,14 @@ fn test_max_receipt_size_yield_resume() { })); assert_eq!(resume_receipt_res.status, expected_resume_status); - env.shutdown_and_drain_remaining_events(Duration::seconds(20)); + TestLoopEnv { test_loop, node_datas, shared_state } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); } /// Assert that there was an incoming receipt with size above max_receipt_size -fn assert_oversized_receipt_occurred(env: &TestLoopEnv) { - let client_handle = env.datas[0].client_sender.actor_handle(); - let client = &env.test_loop.data.get(&client_handle).client; +fn assert_oversized_receipt_occurred(test_loop: &TestLoopV2, node_datas: &[TestData]) { + let client_handle = node_datas[0].client_sender.actor_handle(); + let client = &test_loop.data.get(&client_handle).client; let chain = &client.chain; let epoch_manager = &*client.epoch_manager; diff --git a/test-loop-tests/src/tests/multinode_stateless_validators.rs b/test-loop-tests/src/tests/multinode_stateless_validators.rs index a23c0ae5ad6..ffba22c51bd 100644 --- a/test-loop-tests/src/tests/multinode_stateless_validators.rs +++ b/test-loop-tests/src/tests/multinode_stateless_validators.rs @@ -10,8 +10,8 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::types::{AccountId, EpochId, EpochReference}; use near_primitives::views::{CurrentEpochValidatorInfo, EpochValidatorInfo}; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::execute_money_transfers; @@ -55,8 +55,12 @@ fn slow_test_stateless_validators_with_multi_test_loop() { let epoch_config_store = TestEpochConfigBuilder::from_genesis(&genesis) .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); // Capture the initial validator info in the first epoch. let client_handle = node_datas[0].client_sender.actor_handle(); @@ -90,7 +94,7 @@ fn slow_test_stateless_validators_with_multi_test_loop() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/multinode_test_loop_example.rs b/test-loop-tests/src/tests/multinode_test_loop_example.rs index fa6c4ef626d..8aad13626a3 100644 --- a/test-loop-tests/src/tests/multinode_test_loop_example.rs +++ b/test-loop-tests/src/tests/multinode_test_loop_example.rs @@ -6,8 +6,8 @@ use near_o11y::testonly::init_test_logger; use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::execute_money_transfers; @@ -37,8 +37,12 @@ fn slow_test_client_with_multi_test_loop() { let epoch_config_store = TestEpochConfigBuilder::from_genesis(&genesis) .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); let first_epoch_tracked_shards = { let clients = node_datas @@ -70,6 +74,6 @@ fn slow_test_client_with_multi_test_loop() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/optimistic_block.rs b/test-loop-tests/src/tests/optimistic_block.rs index bfc37b02aee..8cc76667f31 100644 --- a/test-loop-tests/src/tests/optimistic_block.rs +++ b/test-loop-tests/src/tests/optimistic_block.rs @@ -8,8 +8,8 @@ use near_primitives::shard_layout::ShardLayout; use near_primitives::types::AccountId; use near_primitives::version::{PROTOCOL_VERSION, ProtocolFeature}; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; fn get_builder(num_shards: usize) -> TestLoopBuilder { @@ -42,12 +42,12 @@ fn test_optimistic_block() { return; } let num_shards = 3; - let mut env: TestLoopEnv = get_builder(num_shards).build(); + let mut env: TestLoopEnv = get_builder(num_shards).build().warmup(); env.test_loop.run_for(Duration::seconds(10)); { let chain = - &env.test_loop.data.get(&env.datas[0].client_sender.actor_handle()).client.chain; + &env.test_loop.data.get(&env.node_datas[0].client_sender.actor_handle()).client.chain; // Under normal block processing, there can be only one optimistic // block waiting to be processed. assert!(chain.optimistic_block_chunks.num_blocks() <= 1); @@ -69,7 +69,7 @@ fn test_optimistic_block() { #[cfg(feature = "test_features")] /// Create an invalid optimistic block based on the adversarial type. fn make_invalid_ob(env: &TestLoopEnv, adv_type: OptimisticBlockAdvType) -> OptimisticBlock { - let client = &env.test_loop.data.get(&env.datas[0].client_sender.actor_handle()).client; + let client = &env.test_loop.data.get(&env.node_datas[0].client_sender.actor_handle()).client; let epoch_manager = &client.epoch_manager; let head = client.chain.head().unwrap(); @@ -79,7 +79,8 @@ fn make_invalid_ob(env: &TestLoopEnv, adv_type: OptimisticBlockAdvType) -> Optim let block_producer = epoch_manager.get_block_producer(&epoch_id, height).unwrap(); // Get producer client - let client_data = &env.datas.iter().find(|data| data.account_id == block_producer).unwrap(); + let client_data = + &env.node_datas.iter().find(|data| data.account_id == block_producer).unwrap(); let client = &env.test_loop.data.get(&client_data.client_sender.actor_handle()).client; let chain = &client.chain; @@ -104,9 +105,10 @@ fn test_invalid_optimistic_block() { if !ProtocolFeature::ProduceOptimisticBlock.enabled(PROTOCOL_VERSION) { return; } - let mut env = get_builder(3).build(); + let mut env = get_builder(3).build().warmup(); env.test_loop.run_for(Duration::seconds(10)); - let chain = &env.test_loop.data.get(&env.datas[0].client_sender.actor_handle()).client.chain; + let chain = + &env.test_loop.data.get(&env.node_datas[0].client_sender.actor_handle()).client.chain; let adversarial_behaviour = [ OptimisticBlockAdvType::InvalidVrfValue, OptimisticBlockAdvType::InvalidVrfProof, diff --git a/test-loop-tests/src/tests/protocol_upgrade.rs b/test-loop-tests/src/tests/protocol_upgrade.rs index d2a0d2393e2..4fdcfaecfe9 100644 --- a/test-loop-tests/src/tests/protocol_upgrade.rs +++ b/test-loop-tests/src/tests/protocol_upgrade.rs @@ -17,8 +17,8 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::ops::Deref; use std::sync::Arc; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; /// Test upgrading the blockchain to another protocol version. @@ -107,13 +107,14 @@ pub(crate) fn test_protocol_upgrade( // Immediately start voting for the new protocol version let protocol_upgrade_schedule = ProtocolUpgradeVotingSchedule::new_immediate(new_protocol); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis) .epoch_config_store(epoch_config_store) .protocol_upgrade_schedule(protocol_upgrade_schedule) .drop_protocol_upgrade_chunks(new_protocol, chunk_ranges_to_drop.clone()) .clients(clients) - .build(); + .build() + .warmup(); let client_handle = node_datas[0].client_sender.actor_handle(); let epoch_ids_with_old_protocol = RefCell::new(BTreeSet::new()); @@ -191,7 +192,7 @@ pub(crate) fn test_protocol_upgrade( } assert_eq!(&*observed_missing_chunks.borrow(), &expected_missing_chunks); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/reject_outdated_blocks.rs b/test-loop-tests/src/tests/reject_outdated_blocks.rs index e9fc5f8cf67..6522c911685 100644 --- a/test-loop-tests/src/tests/reject_outdated_blocks.rs +++ b/test-loop-tests/src/tests/reject_outdated_blocks.rs @@ -1,5 +1,5 @@ -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use itertools::Itertools; use near_async::test_loop::data::TestLoopData; @@ -79,14 +79,15 @@ fn slow_test_reject_blocks_with_outdated_protocol_version_protocol_upgrade() { .add_user_accounts_simple(&accounts, initial_balance) .build(); - let TestLoopEnv { mut test_loop, datas: node_data, tempdir } = test_loop_builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = test_loop_builder .genesis(genesis) .protocol_upgrade_schedule(protocol_upgrade_schedule) .epoch_config_store(epoch_config_store) .clients(clients) - .build(); + .build() + .warmup(); - let sender = node_data[0].client_sender.clone(); + let sender = node_datas[0].client_sender.clone(); let handle = sender.actor_handle(); let client = &mut test_loop.data.get_mut(&handle).client; @@ -146,6 +147,6 @@ fn slow_test_reject_blocks_with_outdated_protocol_version_protocol_upgrade() { let res = client.process_block_test(old_version_block.clone().into(), Provenance::NONE); assert!(matches!(res, Err(Error::InvalidProtocolVersion))); - TestLoopEnv { test_loop, datas: node_data, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/resharding_v3.rs b/test-loop-tests/src/tests/resharding_v3.rs index a4ba6afb11f..e73f52630f8 100644 --- a/test-loop-tests/src/tests/resharding_v3.rs +++ b/test-loop-tests/src/tests/resharding_v3.rs @@ -12,7 +12,7 @@ use std::cell::Cell; use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use crate::builder::TestLoopBuilder; +use crate::setup::builder::TestLoopBuilder; use crate::utils::loop_action::{LoopAction, LoopActionStatus}; use crate::utils::receipts::{ ReceiptKind, check_receipts_presence_after_resharding_block, @@ -441,13 +441,14 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { params.chunk_ranges_to_drop.clone(), ) .gc_num_epochs_to_keep(GC_NUM_EPOCHS_TO_KEEP) - .build(); + .build() + .warmup(); let mut test_setup_transactions = vec![]; for contract_id in ¶ms.deploy_test_contract { let deploy_contract_tx = deploy_contract( &mut env.test_loop, - &env.datas, + &env.node_datas, &client_account_id, contract_id, near_test_contracts::rs_contract().into(), @@ -468,10 +469,10 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { } // Wait for the test setup transactions to settle and ensure they all succeeded. env.test_loop.run_for(Duration::seconds(2)); - check_txs(&env.test_loop.data, &env.datas, &client_account_id, &test_setup_transactions); + check_txs(&env.test_loop.data, &env.node_datas, &client_account_id, &test_setup_transactions); let client_handles = - env.datas.iter().map(|data| data.client_sender.actor_handle()).collect_vec(); + env.node_datas.iter().map(|data| data.client_sender.actor_handle()).collect_vec(); #[cfg(feature = "test_features")] { @@ -495,10 +496,9 @@ fn test_resharding_v3_base(params: TestReshardingParameters) { let resharding_block_hash = Cell::new(None); let epoch_height_after_resharding = Cell::new(None); let success_condition = |test_loop_data: &mut TestLoopData| -> bool { - params - .loop_actions - .iter() - .for_each(|action| action.call(&env.datas, test_loop_data, client_account_id.clone())); + params.loop_actions.iter().for_each(|action| { + action.call(&env.node_datas, test_loop_data, client_account_id.clone()) + }); let clients = client_handles.iter().map(|handle| &test_loop_data.get(handle).client).collect_vec(); diff --git a/test-loop-tests/src/tests/restart_node.rs b/test-loop-tests/src/tests/restart_node.rs index d891fc3a289..8dd5d652391 100644 --- a/test-loop-tests/src/tests/restart_node.rs +++ b/test-loop-tests/src/tests/restart_node.rs @@ -4,7 +4,7 @@ use near_chain_configs::test_genesis::{TestEpochConfigBuilder, ValidatorsSpec}; use near_o11y::testonly::init_test_logger; use near_primitives::types::AccountId; -use crate::builder::TestLoopBuilder; +use crate::setup::builder::TestLoopBuilder; use crate::utils::ONE_NEAR; const NUM_CLIENTS: usize = 4; @@ -33,8 +33,12 @@ fn test_restart_node() { .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let mut env = - builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + let mut env = builder + .genesis(genesis) + .epoch_config_store(epoch_config_store) + .clients(clients) + .build() + .warmup(); env.test_loop.run_for(Duration::seconds(5)); diff --git a/test-loop-tests/src/tests/state_sync.rs b/test-loop-tests/src/tests/state_sync.rs index 9e0ae3adb1c..4ef3ac29196 100644 --- a/test-loop-tests/src/tests/state_sync.rs +++ b/test-loop-tests/src/tests/state_sync.rs @@ -17,8 +17,9 @@ use near_primitives::types::{ }; use near_primitives::version::{PROTOCOL_VERSION, ProtocolFeature}; -use crate::builder::TestLoopBuilder; -use crate::env::{TestData, TestLoopEnv}; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use crate::utils::ONE_NEAR; use crate::utils::transactions::{get_anchor_hash, get_smallest_height_head}; @@ -184,7 +185,8 @@ fn setup_initial_blockchain( .epoch_config_store(epoch_config_store) .clients(clients) .drop_chunks_by_height(chunks_produced) - .build(); + .build() + .warmup(); TestState { env, accounts, skip_block_height } } @@ -260,7 +262,7 @@ fn send_txs_between_shards( // what we think we should be. fn assert_fork_happened(env: &TestLoopEnv, skip_block_height: BlockHeight) { let client_handles = - env.datas.iter().map(|data| data.client_sender.actor_handle()).collect_vec(); + env.node_datas.iter().map(|data| data.client_sender.actor_handle()).collect_vec(); let clients = client_handles.iter().map(|handle| &env.test_loop.data.get(handle).client).collect_vec(); @@ -293,7 +295,7 @@ fn produce_chunks( mut accounts: Option>>, skip_block_height: Option, ) { - let handle = env.datas[0].client_sender.actor_handle(); + let handle = env.node_datas[0].client_sender.actor_handle(); let client = &env.test_loop.data.get(&handle).client; let mut tip = client.chain.head().unwrap(); // TODO: make this more precise. We don't have to wait 20 whole seconds, but the amount we wait will @@ -305,7 +307,7 @@ fn produce_chunks( env.test_loop.run_until( |data| { let clients = env - .datas + .node_datas .iter() .map(|test_data| &data.get(&test_data.client_sender.actor_handle()).client) .collect_vec(); @@ -316,7 +318,7 @@ fn produce_chunks( ); let clients = env - .datas + .node_datas .iter() .map(|test_data| { &env.test_loop.data.get(&test_data.client_sender.actor_handle()).client @@ -333,7 +335,7 @@ fn produce_chunks( break; } if let Some(accounts) = accounts.as_mut() { - send_txs_between_shards(&mut env.test_loop, &env.datas, accounts); + send_txs_between_shards(&mut env.test_loop, &env.node_datas, accounts); } } tip = new_tip; @@ -346,19 +348,19 @@ fn produce_chunks( fn run_test(state: TestState) { let TestState { mut env, mut accounts, skip_block_height } = state; - let handle = env.datas[0].client_sender.actor_handle(); + let handle = env.node_datas[0].client_sender.actor_handle(); let client = &env.test_loop.data.get(&handle).client; let first_epoch_time = client.config.min_block_production_delay * u32::try_from(EPOCH_LENGTH).unwrap_or(u32::MAX) + Duration::seconds(2); if let Some(accounts) = accounts.as_mut() { - send_txs_between_shards(&mut env.test_loop, &env.datas, accounts); + send_txs_between_shards(&mut env.test_loop, &env.node_datas, accounts); } env.test_loop.run_until( |data| { - let handle = env.datas[0].client_sender.actor_handle(); + let handle = env.node_datas[0].client_sender.actor_handle(); let client = &data.get(&handle).client; let tip = client.chain.head().unwrap(); tip.epoch_id != Default::default() @@ -674,7 +676,7 @@ fn slow_test_state_sync_fork_before_sync() { fn await_sync_hash(env: &mut TestLoopEnv) -> CryptoHash { env.test_loop.run_until( |data| { - let handle = env.datas[0].client_sender.actor_handle(); + let handle = env.node_datas[0].client_sender.actor_handle(); let client = &data.get(&handle).client; let tip = client.chain.head().unwrap(); if tip.epoch_id == Default::default() { @@ -684,7 +686,7 @@ fn await_sync_hash(env: &mut TestLoopEnv) -> CryptoHash { }, Duration::seconds(20), ); - let client_handle = env.datas[0].client_sender.actor_handle(); + let client_handle = env.node_datas[0].client_sender.actor_handle(); let client = &env.test_loop.data.get(&client_handle).client; let tip = client.chain.head().unwrap(); client.chain.get_sync_hash(&tip.last_block_hash).unwrap().unwrap() @@ -694,7 +696,7 @@ fn await_sync_hash(env: &mut TestLoopEnv) -> CryptoHash { fn spam_state_sync_header_reqs(env: &mut TestLoopEnv) { let sync_hash = await_sync_hash(env); - let view_client_handle = env.datas[0].view_client_sender.actor_handle(); + let view_client_handle = env.node_datas[0].view_client_sender.actor_handle(); let view_client = env.test_loop.data.get_mut(&view_client_handle); for _ in 0..30 { @@ -710,7 +712,7 @@ fn spam_state_sync_header_reqs(env: &mut TestLoopEnv) { env.test_loop.run_for(Duration::seconds(40)); let sync_hash = await_sync_hash(env); - let view_client_handle = env.datas[0].view_client_sender.actor_handle(); + let view_client_handle = env.node_datas[0].view_client_sender.actor_handle(); let view_client = env.test_loop.data.get_mut(&view_client_handle); let res = view_client.handle(StateRequestHeader { shard_id, sync_hash }); diff --git a/test-loop-tests/src/tests/syncing.rs b/test-loop-tests/src/tests/syncing.rs index 99b98cfff9e..1819892d0e9 100644 --- a/test-loop-tests/src/tests/syncing.rs +++ b/test-loop-tests/src/tests/syncing.rs @@ -1,5 +1,5 @@ -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; use crate::utils::transactions::execute_money_transfers; use itertools::Itertools; @@ -44,11 +44,12 @@ fn slow_test_sync_from_genesis() { let epoch_config_store = TestEpochConfigBuilder::from_genesis(&genesis) .shuffle_shard_assignment_for_chunk_producers(true) .build_store_for_genesis_protocol_version(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis.clone()) .epoch_config_store(epoch_config_store.clone()) .clients(clients) - .build(); + .build() + .warmup(); let first_epoch_tracked_shards = { let clients = node_datas @@ -90,14 +91,14 @@ fn slow_test_sync_from_genesis() { // Properly shut down the previous TestLoopEnv. // We must preserve the tempdir, since state dumps are stored there, // and are necessary for state sync to work on the new node. - let tempdir = TestLoopEnv { test_loop, datas: node_datas, tempdir } + let tempdir = TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); tracing::info!("Starting new TestLoopEnv with new node"); let clients = accounts.iter().take(NUM_CLIENTS + 1).cloned().collect_vec(); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = TestLoopBuilder::new() + let TestLoopEnv { mut test_loop, node_datas, shared_state } = TestLoopBuilder::new() .genesis(genesis.clone()) .epoch_config_store(epoch_config_store) .clients(clients) @@ -142,6 +143,6 @@ fn slow_test_sync_from_genesis() { |test_loop_data| test_loop_data.get(&new_node).client.chain.head().unwrap().height > 10050, Duration::seconds(20), ); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/tests/view_requests_to_archival_node.rs b/test-loop-tests/src/tests/view_requests_to_archival_node.rs index 2c31ec4f0e4..2eb73eb35d1 100644 --- a/test-loop-tests/src/tests/view_requests_to_archival_node.rs +++ b/test-loop-tests/src/tests/view_requests_to_archival_node.rs @@ -25,8 +25,9 @@ use near_primitives::views::{ StateChangeCauseView, StateChangeKindView, StateChangeValueView, StateChangesRequestView, }; -use crate::builder::TestLoopBuilder; -use crate::env::{TestData, TestLoopEnv}; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use crate::utils::ONE_NEAR; use crate::utils::transactions::execute_money_transfers; @@ -71,13 +72,14 @@ fn slow_test_view_requests_to_archival_node() { .genesis_height(GENESIS_HEIGHT) .build(); let epoch_config_store = TestEpochConfigBuilder::build_store_from_genesis(&genesis); - let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + let TestLoopEnv { mut test_loop, node_datas, shared_state } = builder .genesis(genesis) .epoch_config_store(epoch_config_store) .clients(all_clients) .archival_clients(archival_clients) .gc_num_epochs_to_keep(GC_NUM_EPOCHS_TO_KEEP) - .build(); + .build() + .warmup(); let non_validator_accounts = accounts.iter().skip(NUM_VALIDATORS).cloned().collect_vec(); execute_money_transfers(&mut test_loop, &node_datas, &non_validator_accounts).unwrap(); @@ -96,7 +98,7 @@ fn slow_test_view_requests_to_archival_node() { let mut view_client_tester = ViewClientTester::new(&mut test_loop, &node_datas); view_client_tester.run_tests(&shard_layout); - TestLoopEnv { test_loop, datas: node_datas, tempdir } + TestLoopEnv { test_loop, node_datas, shared_state } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/test-loop-tests/src/utils/contract_distribution.rs b/test-loop-tests/src/utils/contract_distribution.rs index 526acfe56bc..8237e19bda1 100644 --- a/test-loop-tests/src/utils/contract_distribution.rs +++ b/test-loop-tests/src/utils/contract_distribution.rs @@ -1,4 +1,4 @@ -use crate::env::TestLoopEnv; +use crate::setup::env::TestLoopEnv; use itertools::Itertools; use near_async::{test_loop::data::TestLoopData, time::Duration}; use near_chain::ChainStoreAccess; @@ -10,8 +10,8 @@ use near_vm_runner::get_contract_cache_key; pub(crate) fn run_until_caches_contain_contract(env: &mut TestLoopEnv, code_hash: &CryptoHash) { env.test_loop.run_until( |test_loop_data: &mut TestLoopData| -> bool { - for i in 0..env.datas.len() { - let client_handle = env.datas[i].client_sender.actor_handle(); + for i in 0..env.node_datas.len() { + let client_handle = env.node_datas[i].client_sender.actor_handle(); let client = &test_loop_data.get(&client_handle).client; let runtime_config = client.runtime_adapter.get_runtime_config(PROTOCOL_VERSION).unwrap(); @@ -35,7 +35,7 @@ pub(crate) fn assert_all_chunk_endorsements_received( start_height: u64, end_height: u64, ) { - let client_handle = env.datas[0].client_sender.actor_handle(); + let client_handle = env.node_datas[0].client_sender.actor_handle(); let client = &env.test_loop.data.get(&client_handle).client; let chain_store = client.chain.chain_store(); let epoch_manager = &client.epoch_manager; @@ -70,17 +70,12 @@ pub(crate) fn assert_all_chunk_endorsements_received( } /// Clears the compiled contract caches for all the clients. -pub(crate) fn clear_compiled_contract_caches(_env: &mut TestLoopEnv) { +pub(crate) fn clear_compiled_contract_caches(env: &mut TestLoopEnv) { #[cfg(feature = "test_features")] - for i in 0.._env.datas.len() { - let client_handle = _env.datas[i].client_sender.actor_handle(); - let contract_cache_handle = _env - .test_loop - .data - .get(&client_handle) - .client - .runtime_adapter - .compiled_contract_cache(); + for i in 0..env.node_datas.len() { + let client_handle = env.node_datas[i].client_sender.actor_handle(); + let contract_cache_handle = + env.test_loop.data.get(&client_handle).client.runtime_adapter.compiled_contract_cache(); contract_cache_handle.test_only_clear().unwrap(); } } diff --git a/test-loop-tests/src/utils/loop_action.rs b/test-loop-tests/src/utils/loop_action.rs index 68a46a81728..a98b69f9498 100644 --- a/test-loop-tests/src/utils/loop_action.rs +++ b/test-loop-tests/src/utils/loop_action.rs @@ -4,7 +4,7 @@ use std::rc::Rc; use near_async::test_loop::data::TestLoopData; use near_primitives::types::AccountId; -use crate::env::TestData; +use crate::setup::state::TestData; /// Signature of functions callable from inside the inner loop of a testloop test. pub(crate) type LoopActionFn = Box; diff --git a/test-loop-tests/src/utils/mod.rs b/test-loop-tests/src/utils/mod.rs index fcacfb0e16b..c1043498807 100644 --- a/test-loop-tests/src/utils/mod.rs +++ b/test-loop-tests/src/utils/mod.rs @@ -1,8 +1,10 @@ -use super::env::{TestData, TestLoopEnv}; use near_async::test_loop::data::TestLoopData; use near_client::client_actor::ClientActorInner; use near_primitives::types::AccountId; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; + pub(crate) mod contract_distribution; pub(crate) mod loop_action; pub(crate) mod network; @@ -19,7 +21,7 @@ pub(crate) const TGAS: u64 = 1_000_000_000_000; /// Returns the height of the chain head, by querying node at index 0. pub(crate) fn get_head_height(env: &mut TestLoopEnv) -> u64 { - let client_handle = env.datas[0].client_sender.actor_handle(); + let client_handle = env.node_datas[0].client_sender.actor_handle(); let client = &env.test_loop.data.get(&client_handle).client; client.chain.head().unwrap().height } diff --git a/test-loop-tests/src/utils/network.rs b/test-loop-tests/src/utils/network.rs index 4bef9fa658a..4bd7883f8b4 100644 --- a/test-loop-tests/src/utils/network.rs +++ b/test-loop-tests/src/utils/network.rs @@ -1,4 +1,4 @@ -use crate::env::TestLoopChunksStorage; +use crate::setup::env::TestLoopChunksStorage; use near_epoch_manager::EpochManagerAdapter; use near_network::types::NetworkRequests; use near_primitives::sharding::ShardChunkHeader; diff --git a/test-loop-tests/src/utils/receipts.rs b/test-loop-tests/src/utils/receipts.rs index a537a074585..f7f3a57a11c 100644 --- a/test-loop-tests/src/utils/receipts.rs +++ b/test-loop-tests/src/utils/receipts.rs @@ -1,7 +1,7 @@ use super::loop_action::LoopAction; use super::retrieve_client_actor; use super::sharding::{next_block_has_new_shard_layout, this_block_has_new_shard_layout}; -use crate::env::TestData; +use crate::setup::state::TestData; use crate::utils::sharding::get_memtrie_for_shard; use near_async::test_loop::data::TestLoopData; use near_chain::ChainStoreAccess; diff --git a/test-loop-tests/src/utils/resharding.rs b/test-loop-tests/src/utils/resharding.rs index 62c1bc1d1ac..62b39023407 100644 --- a/test-loop-tests/src/utils/resharding.rs +++ b/test-loop-tests/src/utils/resharding.rs @@ -33,7 +33,7 @@ use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; use super::sharding::{next_epoch_has_new_shard_layout, this_block_has_new_shard_layout}; -use crate::env::TestData; +use crate::setup::state::TestData; use crate::utils::loop_action::LoopAction; use crate::utils::sharding::{get_memtrie_for_shard, next_block_has_new_shard_layout}; use crate::utils::transactions::{ diff --git a/test-loop-tests/src/utils/setups.rs b/test-loop-tests/src/utils/setups.rs index 0a1a3d64f5a..09e2fe6a16f 100644 --- a/test-loop-tests/src/utils/setups.rs +++ b/test-loop-tests/src/utils/setups.rs @@ -9,8 +9,8 @@ use near_primitives::types::AccountId; use near_primitives::upgrade_schedule::ProtocolUpgradeVotingSchedule; use near_vm_runner::logic::ProtocolVersion; -use crate::builder::TestLoopBuilder; -use crate::env::TestLoopEnv; +use crate::setup::builder::TestLoopBuilder; +use crate::setup::env::TestLoopEnv; use crate::utils::ONE_NEAR; /// 2 producers, 2 validators, 1 rpc node, 4 shards, 20 accounts (account{i}) with 10k NEAR each. @@ -53,6 +53,7 @@ pub fn standard_setup_1() -> TestLoopEnv { .epoch_config_store(epoch_config_store) .clients(clients) .build() + .warmup() } pub fn derive_new_epoch_config_from_boundary( diff --git a/test-loop-tests/src/utils/transactions.rs b/test-loop-tests/src/utils/transactions.rs index 0312b96fbcb..146f59ec4ac 100644 --- a/test-loop-tests/src/utils/transactions.rs +++ b/test-loop-tests/src/utils/transactions.rs @@ -1,4 +1,5 @@ -use crate::env::{TestData, TestLoopEnv}; +use crate::setup::env::TestLoopEnv; +use crate::setup::state::TestData; use assert_matches::assert_matches; use itertools::Itertools; use near_async::messaging::{AsyncSendError, CanSend, SendAsync}; @@ -169,10 +170,10 @@ pub fn do_create_account( amount: u128, ) { tracing::info!(target: "test", "Creating account."); - let nonce = get_next_nonce(&env.test_loop.data, &env.datas, originator); + let nonce = get_next_nonce(&env.test_loop.data, &env.node_datas, originator); let tx = create_account(env, rpc_id, originator, new_account_id, amount, nonce); env.test_loop.run_for(Duration::seconds(5)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &[tx]); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &[tx]); } pub fn do_delete_account( @@ -182,9 +183,10 @@ pub fn do_delete_account( beneficiary_id: &AccountId, ) { tracing::info!(target: "test", "Deleting account."); - let tx = delete_account(&env.test_loop.data, &env.datas, rpc_id, account_id, beneficiary_id); + let tx = + delete_account(&env.test_loop.data, &env.node_datas, rpc_id, account_id, beneficiary_id); env.test_loop.run_for(Duration::seconds(5)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &[tx]); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &[tx]); } pub fn do_deploy_contract( @@ -194,10 +196,10 @@ pub fn do_deploy_contract( code: Vec, ) { tracing::info!(target: "test", "Deploying contract."); - let nonce = get_next_nonce(&env.test_loop.data, &env.datas, contract_id); - let tx = deploy_contract(&mut env.test_loop, &env.datas, rpc_id, contract_id, code, nonce); + let nonce = get_next_nonce(&env.test_loop.data, &env.node_datas, contract_id); + let tx = deploy_contract(&mut env.test_loop, &env.node_datas, rpc_id, contract_id, code, nonce); env.test_loop.run_for(Duration::seconds(2)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &[tx]); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &[tx]); } pub fn do_call_contract( @@ -209,10 +211,10 @@ pub fn do_call_contract( args: Vec, ) { tracing::info!(target: "test", "Calling contract."); - let nonce = get_next_nonce(&env.test_loop.data, &env.datas, contract_id); + let nonce = get_next_nonce(&env.test_loop.data, &env.node_datas, contract_id); let tx = call_contract( &mut env.test_loop, - &env.datas, + &env.node_datas, rpc_id, sender_id, contract_id, @@ -221,7 +223,7 @@ pub fn do_call_contract( nonce, ); env.test_loop.run_for(Duration::seconds(2)); - check_txs(&env.test_loop.data, &env.datas, rpc_id, &[tx]); + check_txs(&env.test_loop.data, &env.node_datas, rpc_id, &[tx]); } pub fn create_account( @@ -232,7 +234,7 @@ pub fn create_account( amount: u128, nonce: u64, ) -> CryptoHash { - let block_hash = get_shared_block_hash(&env.datas, &env.test_loop.data); + let block_hash = get_shared_block_hash(&env.node_datas, &env.test_loop.data); let signer = create_user_test_signer(&originator); let new_signer: Signer = create_user_test_signer(&new_account_id); @@ -247,7 +249,7 @@ pub fn create_account( ); let tx_hash = tx.get_hash(); - submit_tx(&env.datas, rpc_id, tx); + submit_tx(&env.node_datas, rpc_id, tx); tracing::debug!(target: "test", ?originator, ?new_account_id, ?tx_hash, "created account"); tx_hash }