From dddf83f772b5b269d96f7b4fe295afff82135ccc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 3 Apr 2020 19:08:46 +0200 Subject: [PATCH] Companion PR to splitting Roles (#960) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Companion PR to splitting Roles * Fix network tests * Fix service build * Even more fixing * Oops, quick fix * use is_network_authority in grandpa service config Co-authored-by: André Silva --- cli/src/command.rs | 4 ++-- collator/src/lib.rs | 10 +++++----- network/src/legacy/gossip/mod.rs | 14 +++++++------- network/src/protocol/mod.rs | 12 ++++++------ network/src/protocol/tests.rs | 6 +++--- network/test/src/lib.rs | 27 +++++++++++---------------- service/src/lib.rs | 19 +++++++------------ 7 files changed, 41 insertions(+), 51 deletions(-) diff --git a/cli/src/command.rs b/cli/src/command.rs index ba895ff6c453..ea03290f5b39 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -151,8 +151,8 @@ where TLightClient >, { - match config.roles { - service::Roles::LIGHT => + match config.role { + service::Role::Light => sc_cli::run_service_until_exit( config, |config| service::new_light::(config), diff --git a/collator/src/lib.rs b/collator/src/lib.rs index 5ae55b3331dc..68d88137f0e3 100644 --- a/collator/src/lib.rs +++ b/collator/src/lib.rs @@ -64,7 +64,7 @@ use polkadot_primitives::{ }; use polkadot_cli::{ ProvideRuntimeApi, AbstractService, ParachainHost, IsKusama, - service::{self, Roles} + service::{self, Role} }; pub use polkadot_cli::{VersionInfo, load_spec, service::Configuration}; pub use polkadot_validation::SignedStatement; @@ -344,8 +344,8 @@ where ::ProduceCandidate: Send, { let is_kusama = config.expect_chain_spec().is_kusama(); - match (is_kusama, config.roles) { - (_, Roles::LIGHT) => return Err( + match (is_kusama, &config.role) { + (_, Role::Light) => return Err( polkadot_service::Error::Other("light nodes are unsupported as collator".into()) ).into(), (true, _) => @@ -389,8 +389,8 @@ pub fn run_collator

( P::ParachainContext: Send + 'static, ::ProduceCandidate: Send, { - match (config.expect_chain_spec().is_kusama(), config.roles) { - (_, Roles::LIGHT) => return Err( + match (config.expect_chain_spec().is_kusama(), &config.role) { + (_, Role::Light) => return Err( polkadot_cli::Error::Input("light nodes are unsupported as collator".into()) ).into(), (true, _) => diff --git a/network/src/legacy/gossip/mod.rs b/network/src/legacy/gossip/mod.rs index 0afa604e2af8..033556f04f70 100644 --- a/network/src/legacy/gossip/mod.rs +++ b/network/src/legacy/gossip/mod.rs @@ -51,7 +51,7 @@ use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; use sp_blockchain::Error as ClientError; -use sc_network::{config::Roles, PeerId, ReputationChange}; +use sc_network::{ObservedRole, PeerId, ReputationChange}; use sc_network::NetworkService; use sc_network_gossip::{ ValidationResult as GossipValidationResult, @@ -635,7 +635,7 @@ impl MessageValidator { } impl sc_network_gossip::Validator for MessageValidator { - fn new_peer(&self, _context: &mut dyn ValidatorContext, who: &PeerId, _roles: Roles) { + fn new_peer(&self, _context: &mut dyn ValidatorContext, who: &PeerId, _roles: ObservedRole) { let mut inner = self.inner.write(); inner.peers.insert(who.clone(), PeerData::default()); } @@ -833,7 +833,7 @@ mod tests { let peer_a = PeerId::random(); let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, Roles::FULL); + validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); assert!(validator_context.events.is_empty()); validator_context.clear(); @@ -911,7 +911,7 @@ mod tests { let peer_a = PeerId::random(); let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, Roles::FULL); + validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); assert!(validator_context.events.is_empty()); validator_context.clear(); @@ -953,7 +953,7 @@ mod tests { let peer_a = PeerId::random(); let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, Roles::FULL); + validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); assert!(validator_context.events.is_empty()); validator_context.clear(); @@ -1007,7 +1007,7 @@ mod tests { let peer_a = PeerId::random(); let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, Roles::FULL); + validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); assert!(validator_context.events.is_empty()); validator_context.clear(); @@ -1099,7 +1099,7 @@ mod tests { let peer_a = PeerId::random(); let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, Roles::FULL); + validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); assert!(validator_context.events.is_empty()); validator_context.clear(); diff --git a/network/src/protocol/mod.rs b/network/src/protocol/mod.rs index f9c7fd8b62d9..ef95bd27d56b 100644 --- a/network/src/protocol/mod.rs +++ b/network/src/protocol/mod.rs @@ -41,7 +41,7 @@ use polkadot_validation::{ SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators, SignedStatement, }; -use sc_network::{config::Roles, Event, PeerId}; +use sc_network::{ObservedRole, Event, PeerId}; use sp_api::ProvideRuntimeApi; use sp_runtime::ConsensusEngineId; @@ -72,7 +72,7 @@ mod tests; // Messages from the service API or network adapter. enum ServiceToWorkerMsg { // basic peer messages. - PeerConnected(PeerId, Roles), + PeerConnected(PeerId, ObservedRole), PeerMessage(PeerId, Vec), PeerDisconnected(PeerId), @@ -255,11 +255,11 @@ pub fn start( Event::NotificationStreamOpened { remote, engine_id, - roles, + role, } => { if engine_id != POLKADOT_ENGINE_ID { continue } - worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, roles)).await + worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, role)).await }, Event::NotificationStreamClosed { remote, @@ -496,8 +496,8 @@ impl ProtocolHandler { } } - fn on_connect(&mut self, peer: PeerId, roles: Roles) { - let claimed_validator = roles.contains(Roles::AUTHORITY); + fn on_connect(&mut self, peer: PeerId, role: ObservedRole) { + let claimed_validator = matches!(role, ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority | ObservedRole::Authority); self.peers.insert(peer.clone(), PeerData { claimed_validator, diff --git a/network/src/protocol/tests.rs b/network/src/protocol/tests.rs index 991169561668..9658defc73ea 100644 --- a/network/src/protocol/tests.rs +++ b/network/src/protocol/tests.rs @@ -189,7 +189,7 @@ sp_api::mock_impl_runtime_apis! { } impl super::Service { - async fn connect_peer(&mut self, peer: PeerId, roles: Roles) { + async fn connect_peer(&mut self, peer: PeerId, roles: ObservedRole) { self.sender.send(ServiceToWorkerMsg::PeerConnected(peer, roles)).await.unwrap(); } @@ -373,7 +373,7 @@ fn validator_peer_cleaned_up() { pool.spawner().spawn_local(worker_task).unwrap(); pool.run_until(async move { - service.connect_peer(peer.clone(), Roles::AUTHORITY).await; + service.connect_peer(peer.clone(), ObservedRole::Authority).await; service.peer_message(peer.clone(), Message::Status(Status { version: VERSION, collating_for: None, @@ -433,7 +433,7 @@ fn validator_key_spillover_cleaned() { pool.spawner().spawn_local(worker_task).unwrap(); pool.run_until(async move { - service.connect_peer(peer.clone(), Roles::AUTHORITY).await; + service.connect_peer(peer.clone(), ObservedRole::Authority).await; service.peer_message(peer.clone(), Message::Status(Status { version: VERSION, collating_for: None, diff --git a/network/test/src/lib.rs b/network/test/src/lib.rs index 39bd80d9d8a3..358d59cf5297 100644 --- a/network/test/src/lib.rs +++ b/network/test/src/lib.rs @@ -22,7 +22,7 @@ mod block_import; use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}}; use log::trace; -use sc_network::config::{build_multiaddr, FinalityProofProvider}; +use sc_network::config::{build_multiaddr, FinalityProofProvider, Role}; use sp_blockchain::{ Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo, }; @@ -35,7 +35,6 @@ use sc_client_api::{ }; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client::LongestChain; -use sc_network::config::Roles; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_consensus::import_queue::{ BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, @@ -524,22 +523,21 @@ pub trait TestNetFactory: Sized { /// Create new test network with this many peers. fn new(n: usize) -> Self { trace!(target: "test_network", "Creating test network"); - let config = Self::default_config(); - let mut net = Self::from_config(&config); + let mut net = Self::from_config(&Default::default()); for i in 0..n { trace!(target: "test_network", "Adding peer {}", i); - net.add_full_peer(&config); + net.add_full_peer(); } net } - fn add_full_peer(&mut self, config: &ProtocolConfig) { - self.add_full_peer_with_states(config, None) + fn add_full_peer(&mut self,) { + self.add_full_peer_with_states(None) } /// Add a full peer. - fn add_full_peer_with_states(&mut self, config: &ProtocolConfig, keep_blocks: Option) { + fn add_full_peer_with_states(&mut self, keep_blocks: Option) { let test_client_builder = match keep_blocks { Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), None => TestClientBuilder::with_default_backend(), @@ -558,7 +556,7 @@ pub trait TestNetFactory: Sized { let verifier = self.make_verifier( PeersClient::Full(client.clone(), backend.clone()), - config, + &Default::default(), &data, ); let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); @@ -573,7 +571,7 @@ pub trait TestNetFactory: Sized { let listen_addr = build_multiaddr![Memory(rand::random::())]; let network = NetworkWorker::new(sc_network::config::Params { - roles: config.roles, + role: Role::Full, executor: None, network_config: NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], @@ -616,10 +614,7 @@ pub trait TestNetFactory: Sized { } /// Add a light peer. - fn add_light_peer(&mut self, config: &ProtocolConfig) { - let mut config = config.clone(); - config.roles = Roles::LIGHT; - + fn add_light_peer(&mut self) { let (c, backend) = polkadot_test_runtime_client::new_light(); let client = Arc::new(c); let ( @@ -632,7 +627,7 @@ pub trait TestNetFactory: Sized { let verifier = self.make_verifier( PeersClient::Light(client.clone(), backend.clone()), - &config, + &Default::default(), &data, ); let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); @@ -647,7 +642,7 @@ pub trait TestNetFactory: Sized { let listen_addr = build_multiaddr![Memory(rand::random::())]; let network = NetworkWorker::new(sc_network::config::Params { - roles: config.roles, + role: Role::Full, executor: None, network_config: NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], diff --git a/service/src/lib.rs b/service/src/lib.rs index 277da0cb2722..247cdbe1535f 100644 --- a/service/src/lib.rs +++ b/service/src/lib.rs @@ -31,7 +31,7 @@ use inherents::InherentDataProviders; use sc_executor::native_executor_instance; use log::info; pub use service::{ - AbstractService, Roles, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand, + AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, Configuration, ChainSpec, }; @@ -314,7 +314,8 @@ pub fn new_full( use futures::stream::StreamExt; let is_collator = collating_for.is_some(); - let is_authority = config.roles.is_authority() && !is_collator; + let role = config.role.clone(); + let is_authority = role.is_authority() && !is_collator; let force_authoring = config.force_authoring; let max_block_data_size = max_block_data_size; let db_path = if let DatabaseConfig::Path { ref path, .. } = config.expect_database() { @@ -325,14 +326,8 @@ pub fn new_full( let disable_grandpa = config.disable_grandpa; let name = config.name.clone(); let authority_discovery_enabled = authority_discovery_enabled; - let sentry_nodes = config.network.sentry_nodes.clone(); let slot_duration = slot_duration; - // sentry nodes announce themselves as authorities to the network - // and should run the same protocols authorities do, but it should - // never actively participate in any consensus process. - let participates_in_consensus = is_authority && !config.sentry_mode; - let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config, Runtime, Dispatch); let backend = builder.backend().clone(); @@ -388,7 +383,7 @@ pub fn new_full( service.spawn_task_handle(), ).map_err(|e| format!("Could not spawn network worker: {:?}", e))?; - if participates_in_consensus { + if let Role::Authority { sentry_nodes } = &role { let availability_store = { use std::path::PathBuf; @@ -470,7 +465,7 @@ pub fn new_full( let authority_discovery = authority_discovery::AuthorityDiscovery::new( service.client(), network, - sentry_nodes, + sentry_nodes.clone(), service.keystore(), dht_event_stream, service.prometheus_registry(), @@ -481,7 +476,7 @@ pub fn new_full( // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if participates_in_consensus { + let keystore = if is_authority { Some(service.keystore()) } else { None @@ -494,7 +489,7 @@ pub fn new_full( name: Some(name), observer_enabled: false, keystore, - is_authority, + is_authority: role.is_network_authority(), }; let enable_grandpa = !disable_grandpa;