From 8555f8468159f272b7a035b6ab82752150fc21f9 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 24 Oct 2022 10:50:21 +0200 Subject: [PATCH 01/36] Allow specification of multiple urls for relay chain rpc nodes --- client/cli/src/lib.rs | 9 +++++---- client/relay-chain-minimal-node/src/lib.rs | 2 +- client/relay-chain-rpc-interface/src/rpc_client.rs | 6 +++--- parachain-template/node/src/command.rs | 2 +- parachain-template/node/src/service.rs | 2 +- polkadot-parachain/src/command.rs | 2 +- polkadot-parachain/src/service.rs | 2 +- test/service/src/lib.rs | 10 +++++----- 8 files changed, 18 insertions(+), 17 deletions(-) diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index ae7943f99aa..eb8989ca9f3 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -293,9 +293,10 @@ pub struct RunCmd { /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with. #[arg( long, - value_parser = validate_relay_chain_url + value_parser = validate_relay_chain_url, + num_args = 1.. )] - pub relay_chain_rpc_url: Option, + pub relay_chain_rpc_urls: Option>, } impl RunCmd { @@ -310,7 +311,7 @@ impl RunCmd { /// Create [`CollatorOptions`] representing options only relevant to parachain collator nodes pub fn collator_options(&self) -> CollatorOptions { - CollatorOptions { relay_chain_rpc_url: self.relay_chain_rpc_url.clone() } + CollatorOptions { relay_chain_rpc_urls: self.relay_chain_rpc_urls.clone() } } } @@ -318,7 +319,7 @@ impl RunCmd { #[derive(Clone, Debug)] pub struct CollatorOptions { /// Location of relay chain full node - pub relay_chain_rpc_url: Option, + pub relay_chain_rpc_urls: Option>, } /// A non-redundant version of the `RunCmd` that sets the `validator` field when the diff --git a/client/relay-chain-minimal-node/src/lib.rs b/client/relay-chain-minimal-node/src/lib.rs index 60b8a809a9c..17ab947a1eb 100644 --- a/client/relay-chain-minimal-node/src/lib.rs +++ b/client/relay-chain-minimal-node/src/lib.rs @@ -84,7 +84,7 @@ fn build_authority_discovery_service( pub async fn build_minimal_relay_chain_node( polkadot_config: Configuration, task_manager: &mut TaskManager, - relay_chain_url: Url, + relay_chain_url: Vec, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { let client = cumulus_relay_chain_rpc_interface::create_client_and_start_worker( relay_chain_url, diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 3422248735f..817b21e0ea7 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -100,11 +100,11 @@ struct RpcStreamWorker { /// Entry point to create [`RelayChainRpcClient`] and start a worker that distributes notifications. pub async fn create_client_and_start_worker( - url: Url, + urls: Vec, task_manager: &mut TaskManager, ) -> RelayChainResult { - tracing::info!(target: LOG_TARGET, url = %url.to_string(), "Initializing RPC Client"); - let ws_client = WsClientBuilder::default().build(url.as_str()).await?; + let first_url = urls.first().map(|s| s.to_owned()).unwrap(); + let ws_client = WsClientBuilder::default().build(first_url.as_str()).await?; let best_head_stream = RelayChainRpcClient::subscribe_new_best_heads(&ws_client).await?; let finalized_head_stream = RelayChainRpcClient::subscribe_finalized_heads(&ws_client).await?; diff --git a/parachain-template/node/src/command.rs b/parachain-template/node/src/command.rs index 14a7e046bd1..3f3c0e51276 100644 --- a/parachain-template/node/src/command.rs +++ b/parachain-template/node/src/command.rs @@ -288,7 +288,7 @@ pub fn run() -> Result<()> { info!("Parachain genesis state: {}", genesis_state); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - if collator_options.relay_chain_rpc_url.is_some() && cli.relay_chain_args.len() > 0 { + if collator_options.relay_chain_rpc_urls.is_some() && cli.relay_chain_args.len() > 0 { warn!("Detected relay chain node arguments together with --relay-chain-rpc-url. This command starts a minimal Polkadot node that only uses a network-related subset of all relay chain CLI options."); } diff --git a/parachain-template/node/src/service.rs b/parachain-template/node/src/service.rs index eab33374529..759db03b6ef 100644 --- a/parachain-template/node/src/service.rs +++ b/parachain-template/node/src/service.rs @@ -143,7 +143,7 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - match collator_options.relay_chain_rpc_url { + match collator_options.relay_chain_rpc_urls { Some(relay_chain_url) => build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, None => build_inprocess_relay_chain( diff --git a/polkadot-parachain/src/command.rs b/polkadot-parachain/src/command.rs index c436a98dc69..7e7547131ad 100644 --- a/polkadot-parachain/src/command.rs +++ b/polkadot-parachain/src/command.rs @@ -679,7 +679,7 @@ pub fn run() -> Result<()> { info!("Parachain genesis state: {}", genesis_state); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - if collator_options.relay_chain_rpc_url.is_some() && cli.relaychain_args.len() > 0 { + if collator_options.relay_chain_rpc_urls.is_some() && cli.relaychain_args.len() > 0 { warn!("Detected relay chain node arguments together with --relay-chain-rpc-url. This command starts a minimal Polkadot node that only uses a network-related subset of all relay chain CLI options."); } diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index 4c87c20d772..e720f79ae3d 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -264,7 +264,7 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - match collator_options.relay_chain_rpc_url { + match collator_options.relay_chain_rpc_urls { Some(relay_chain_url) => build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, None => build_inprocess_relay_chain( diff --git a/test/service/src/lib.rs b/test/service/src/lib.rs index 29f37806ccf..3903fa1f783 100644 --- a/test/service/src/lib.rs +++ b/test/service/src/lib.rs @@ -189,7 +189,7 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, task_manager: &mut TaskManager, ) -> RelayChainResult> { - if let Some(relay_chain_url) = collator_options.relay_chain_rpc_url { + if let Some(relay_chain_url) = collator_options.relay_chain_rpc_urls { return build_minimal_relay_chain_node(relay_chain_config, task_manager, relay_chain_url) .await .map(|r| r.0) @@ -427,7 +427,7 @@ pub struct TestNodeBuilder { storage_update_func_parachain: Option>, storage_update_func_relay_chain: Option>, consensus: Consensus, - relay_chain_full_node_url: Option, + relay_chain_full_node_url: Option>, } impl TestNodeBuilder { @@ -543,7 +543,7 @@ impl TestNodeBuilder { /// Connect to full node via RPC. pub fn use_external_relay_chain_node_at_url(mut self, network_address: Url) -> Self { - self.relay_chain_full_node_url = Some(network_address); + self.relay_chain_full_node_url = Some(vec![network_address]); self } @@ -552,7 +552,7 @@ impl TestNodeBuilder { let mut localhost_url = Url::parse("ws://localhost").expect("Should be able to parse localhost Url"); localhost_url.set_port(Some(port)).expect("Should be able to set port"); - self.relay_chain_full_node_url = Some(localhost_url); + self.relay_chain_full_node_url = Some(vec![localhost_url]); self } @@ -578,7 +578,7 @@ impl TestNodeBuilder { ); let collator_options = - CollatorOptions { relay_chain_rpc_url: self.relay_chain_full_node_url }; + CollatorOptions { relay_chain_rpc_urls: self.relay_chain_full_node_url }; relay_chain_config.network.node_name = format!("{} (relay chain)", relay_chain_config.network.node_name); From 5a1217952620d6a4b12f6e198806afdc1a5df638 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 24 Oct 2022 17:12:58 +0200 Subject: [PATCH 02/36] Add pooled RPC client basics --- client/relay-chain-rpc-interface/src/lib.rs | 1 + .../src/resilient_ws_client.rs | 75 +++++++++++++++++++ .../src/rpc_client.rs | 14 ++-- .../0006-rpc_collator_builds_blocks.toml | 4 +- 4 files changed, 85 insertions(+), 9 deletions(-) create mode 100644 client/relay-chain-rpc-interface/src/resilient_ws_client.rs diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index 1d35ec4e747..7f3b7f17154 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -34,6 +34,7 @@ use std::pin::Pin; pub use url::Url; +mod resilient_ws_client; mod rpc_client; pub use rpc_client::{create_client_and_start_worker, RelayChainRpcClient}; diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs new file mode 100644 index 00000000000..bfe94016af6 --- /dev/null +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -0,0 +1,75 @@ +use jsonrpsee::{ + core::{ + client::{Client as JsonRpcClient, ClientT, Subscription, SubscriptionClientT}, + Error as JsonRpseeError, + }, + types::ParamsSer, +}; + +pub struct PooledClient { + ws_client: JsonRpcClient, +} + +impl PooledClient { + pub fn new(ws_client: JsonRpcClient) -> Self { + tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); + Self { ws_client } + } +} + +#[async_trait::async_trait] +impl ClientT for PooledClient { + async fn notification<'a>( + &self, + method: &'a str, + params: Option>, + ) -> Result<(), jsonrpsee::core::Error> { + self.ws_client.notification(method, params).await + } + + async fn request<'a, R>( + &self, + method: &'a str, + params: Option>, + ) -> Result + where + R: sp_runtime::DeserializeOwned, + { + self.ws_client.request(method, params).await + } + + async fn batch_request<'a, R>( + &self, + batch: Vec<(&'a str, Option>)>, + ) -> Result, jsonrpsee::core::Error> + where + R: sp_runtime::DeserializeOwned + Default + Clone, + { + self.ws_client.batch_request(batch).await + } +} + +#[async_trait::async_trait] +impl SubscriptionClientT for PooledClient { + async fn subscribe<'a, Notif>( + &self, + subscribe_method: &'a str, + params: Option>, + unsubscribe_method: &'a str, + ) -> Result, JsonRpseeError> + where + Notif: sp_runtime::DeserializeOwned, + { + self.ws_client.subscribe(subscribe_method, params, unsubscribe_method).await + } + + async fn subscribe_to_method<'a, Notif>( + &self, + method: &'a str, + ) -> Result, JsonRpseeError> + where + Notif: sp_runtime::DeserializeOwned, + { + self.ws_client.subscribe_to_method(method).await + } +} diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 817b21e0ea7..555586e5cc3 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::resilient_ws_client::PooledClient; use backoff::{future::retry_notify, ExponentialBackoff}; use cumulus_primitives_core::{ relay_chain::{ @@ -54,7 +55,6 @@ use std::sync::Arc; use tokio::sync::mpsc::{ channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, }; - pub use url::Url; const LOG_TARGET: &str = "relay-chain-rpc-client"; @@ -65,7 +65,7 @@ const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; #[derive(Clone)] pub struct RelayChainRpcClient { /// Websocket client to make calls - ws_client: Arc, + ws_client: Arc, /// Retry strategy that should be used for requests and subscriptions retry_strategy: ExponentialBackoff, @@ -104,7 +104,7 @@ pub async fn create_client_and_start_worker( task_manager: &mut TaskManager, ) -> RelayChainResult { let first_url = urls.first().map(|s| s.to_owned()).unwrap(); - let ws_client = WsClientBuilder::default().build(first_url.as_str()).await?; + let ws_client = PooledClient::new(WsClientBuilder::default().build(first_url.as_str()).await?); let best_head_stream = RelayChainRpcClient::subscribe_new_best_heads(&ws_client).await?; let finalized_head_stream = RelayChainRpcClient::subscribe_finalized_heads(&ws_client).await?; @@ -220,7 +220,7 @@ impl RpcStreamWorker { impl RelayChainRpcClient { /// Initialize new RPC Client. async fn new( - ws_client: JsonRpcClient, + ws_client: PooledClient, sender: TokioSender, ) -> RelayChainResult { let client = RelayChainRpcClient { @@ -677,7 +677,7 @@ impl RelayChainRpcClient { } async fn subscribe_imported_heads( - ws_client: &JsonRpcClient, + ws_client: &PooledClient, ) -> Result, RelayChainError> { Ok(ws_client .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") @@ -685,7 +685,7 @@ impl RelayChainRpcClient { } async fn subscribe_finalized_heads( - ws_client: &JsonRpcClient, + ws_client: &PooledClient, ) -> Result, RelayChainError> { Ok(ws_client .subscribe::( @@ -697,7 +697,7 @@ impl RelayChainRpcClient { } async fn subscribe_new_best_heads( - ws_client: &JsonRpcClient, + ws_client: &PooledClient, ) -> Result, RelayChainError> { Ok(ws_client .subscribe::( diff --git a/zombienet_tests/0006-rpc_collator_builds_blocks.toml b/zombienet_tests/0006-rpc_collator_builds_blocks.toml index 9414532682a..ade75983398 100644 --- a/zombienet_tests/0006-rpc_collator_builds_blocks.toml +++ b/zombienet_tests/0006-rpc_collator_builds_blocks.toml @@ -35,7 +35,7 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'one'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}}"] + args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'one'|zombie('wsUri')}}", "--relay-chain-rpc-url {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}}"] # run eve as parachain full node [[parachains.collators]] @@ -43,4 +43,4 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'two'|zombie('multiAddress')}}"] + args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'one'|zombie('wsUri')}}",, "--relay-chain-rpc-url {{'two'|zombie('wsUri')}}" "-- --bootnodes {{'two'|zombie('multiAddress')}}"] From 4cd0f65ed73315ff84fb5a32739784cf4e5fd47b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 25 Oct 2022 11:00:12 +0200 Subject: [PATCH 03/36] Add list of clients to pooled client --- .../src/resilient_ws_client.rs | 45 +++++++++++++++---- .../src/rpc_client.rs | 2 +- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index bfe94016af6..d692badae94 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -1,19 +1,46 @@ +use cumulus_relay_chain_interface::RelayChainResult; use jsonrpsee::{ core::{ client::{Client as JsonRpcClient, ClientT, Subscription, SubscriptionClientT}, Error as JsonRpseeError, }, types::ParamsSer, + ws_client::WsClientBuilder, }; +use url::Url; pub struct PooledClient { - ws_client: JsonRpcClient, + ws_clients: Vec<(Url, JsonRpcClient)>, + active_index: usize, } impl PooledClient { - pub fn new(ws_client: JsonRpcClient) -> Self { + pub async fn new(url: Url) -> RelayChainResult { tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); - Self { ws_client } + let ws_client = WsClientBuilder::default().build(url.as_str()).await?; + Ok(Self { ws_clients: vec![(url, ws_client)], active_index: 0 }) + } + + pub async fn new_from_urls(urls: Vec) -> RelayChainResult { + tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); + let clients: Vec<(Url, Result)> = + futures::future::join_all(urls.into_iter().map(|url| async move { + (url.clone(), WsClientBuilder::default().build(url.as_str()).await) + })) + .await; + let clients = clients.into_iter().filter_map(|element| { + match element.1 { + Ok(client) => Some((element.0, client)), + _ => { + tracing::warn!(target: "pooled-client", url = ?element.0, "Unable to connect to provided relay chain."); + None} + } + }).collect(); + Ok(Self { ws_clients: clients, active_index: 0 }) + } + + fn active_client(&self) -> &JsonRpcClient { + &self.ws_clients.get(self.active_index).unwrap().1 } } @@ -24,7 +51,7 @@ impl ClientT for PooledClient { method: &'a str, params: Option>, ) -> Result<(), jsonrpsee::core::Error> { - self.ws_client.notification(method, params).await + self.active_client().notification(method, params).await } async fn request<'a, R>( @@ -35,7 +62,7 @@ impl ClientT for PooledClient { where R: sp_runtime::DeserializeOwned, { - self.ws_client.request(method, params).await + self.active_client().request(method, params).await } async fn batch_request<'a, R>( @@ -45,7 +72,7 @@ impl ClientT for PooledClient { where R: sp_runtime::DeserializeOwned + Default + Clone, { - self.ws_client.batch_request(batch).await + self.active_client().batch_request(batch).await } } @@ -60,7 +87,9 @@ impl SubscriptionClientT for PooledClient { where Notif: sp_runtime::DeserializeOwned, { - self.ws_client.subscribe(subscribe_method, params, unsubscribe_method).await + self.active_client() + .subscribe(subscribe_method, params, unsubscribe_method) + .await } async fn subscribe_to_method<'a, Notif>( @@ -70,6 +99,6 @@ impl SubscriptionClientT for PooledClient { where Notif: sp_runtime::DeserializeOwned, { - self.ws_client.subscribe_to_method(method).await + self.active_client().subscribe_to_method(method).await } } diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 555586e5cc3..5e7301b2063 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -104,7 +104,7 @@ pub async fn create_client_and_start_worker( task_manager: &mut TaskManager, ) -> RelayChainResult { let first_url = urls.first().map(|s| s.to_owned()).unwrap(); - let ws_client = PooledClient::new(WsClientBuilder::default().build(first_url.as_str()).await?); + let ws_client = PooledClient::new(first_url).await?; let best_head_stream = RelayChainRpcClient::subscribe_new_best_heads(&ws_client).await?; let finalized_head_stream = RelayChainRpcClient::subscribe_finalized_heads(&ws_client).await?; From a1f54e3ee5db3186936c5ff5427e138f866d1614 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 7 Nov 2022 11:06:32 +0100 Subject: [PATCH 04/36] Improve --- .../src/blockchain_rpc_client.rs | 9 +- client/relay-chain-rpc-interface/src/lib.rs | 10 +- .../src/resilient_ws_client.rs | 262 ++++++++++++++---- .../src/rpc_client.rs | 225 +-------------- polkadot-parachain/src/service.rs | 6 +- 5 files changed, 234 insertions(+), 278 deletions(-) diff --git a/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index bf1a3c9a38c..b00affb9197 100644 --- a/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -349,11 +349,12 @@ impl BlockChainRpcClient { for address in addresses { match MultiaddrWithPeerId::from_str(&address) { Ok(addr) => result_vec.push(addr), - Err(err) => + Err(err) => { return Err(RelayChainError::GenericError(format!( "Failed to parse a local listen addresses from the RPC node: {}", err - ))), + ))) + }, } } @@ -363,13 +364,13 @@ impl BlockChainRpcClient { pub async fn import_notification_stream( &self, ) -> RelayChainResult + Send>>> { - Ok(self.rpc_client.get_imported_heads_stream().await?.boxed()) + Ok(self.rpc_client.get_imported_heads_stream()?.boxed()) } pub async fn finality_notification_stream( &self, ) -> RelayChainResult + Send>>> { - Ok(self.rpc_client.get_finalized_heads_stream().await?.boxed()) + Ok(self.rpc_client.get_finalized_heads_stream()?.boxed()) } } diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index 7f3b7f17154..272a6a8a722 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -106,7 +106,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn import_notification_stream( &self, ) -> RelayChainResult + Send>>> { - let imported_headers_stream = self.rpc_client.get_imported_heads_stream().await?; + let imported_headers_stream = self.rpc_client.get_imported_heads_stream()?; Ok(imported_headers_stream.boxed()) } @@ -114,7 +114,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn finality_notification_stream( &self, ) -> RelayChainResult + Send>>> { - let imported_headers_stream = self.rpc_client.get_finalized_heads_stream().await?; + let imported_headers_stream = self.rpc_client.get_finalized_heads_stream()?; Ok(imported_headers_stream.boxed()) } @@ -169,10 +169,10 @@ impl RelayChainInterface for RelayChainRpcInterface { /// 3. Wait for the block to be imported via subscription. /// 4. If timeout is reached, we return an error. async fn wait_for_block(&self, wait_for_hash: PHash) -> RelayChainResult<()> { - let mut head_stream = self.rpc_client.get_imported_heads_stream().await?; + let mut head_stream = self.rpc_client.get_imported_heads_stream()?; if self.rpc_client.chain_get_header(Some(wait_for_hash)).await?.is_some() { - return Ok(()) + return Ok(()); } let mut timeout = futures_timer::Delay::new(Duration::from_secs(TIMEOUT_IN_SECONDS)).fuse(); @@ -193,7 +193,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn new_best_notification_stream( &self, ) -> RelayChainResult + Send>>> { - let imported_headers_stream = self.rpc_client.get_best_heads_stream().await?; + let imported_headers_stream = self.rpc_client.get_best_heads_stream()?; Ok(imported_headers_stream.boxed()) } } diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index d692badae94..e47f44a8fef 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -1,46 +1,42 @@ -use cumulus_relay_chain_interface::RelayChainResult; +use cumulus_primitives_core::relay_chain::Header as PHeader; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; +use futures::{ + channel::mpsc::{Receiver, Sender}, + channel::oneshot::{Receiver as OneshotReceiver, Sender as OneshotSender}, + StreamExt, +}; use jsonrpsee::{ core::{ - client::{Client as JsonRpcClient, ClientT, Subscription, SubscriptionClientT}, - Error as JsonRpseeError, + client::{Client as JsonRpcClient, ClientT, SubscriptionClientT}, + Error as JsonRpseeError, JsonValue, }, - types::ParamsSer, ws_client::WsClientBuilder, }; +use polkadot_service::TaskManager; +use tokio::sync::mpsc::{ + channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, +}; use url::Url; +const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; +const LOG_TARGET: &str = "pooled-rpc-client"; + pub struct PooledClient { - ws_clients: Vec<(Url, JsonRpcClient)>, - active_index: usize, + /// Channel to communicate with the RPC worker + to_worker_channel: TokioSender, } impl PooledClient { - pub async fn new(url: Url) -> RelayChainResult { + pub async fn new(urls: Vec, task_manager: &mut TaskManager) -> RelayChainResult { tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); - let ws_client = WsClientBuilder::default().build(url.as_str()).await?; - Ok(Self { ws_clients: vec![(url, ws_client)], active_index: 0 }) - } - pub async fn new_from_urls(urls: Vec) -> RelayChainResult { - tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); - let clients: Vec<(Url, Result)> = - futures::future::join_all(urls.into_iter().map(|url| async move { - (url.clone(), WsClientBuilder::default().build(url.as_str()).await) - })) - .await; - let clients = clients.into_iter().filter_map(|element| { - match element.1 { - Ok(client) => Some((element.0, client)), - _ => { - tracing::warn!(target: "pooled-client", url = ?element.0, "Unable to connect to provided relay chain."); - None} - } - }).collect(); - Ok(Self { ws_clients: clients, active_index: 0 }) - } + let (worker, sender) = RpcStreamWorker::new(urls).await; - fn active_client(&self) -> &JsonRpcClient { - &self.ws_clients.get(self.active_index).unwrap().1 + task_manager + .spawn_essential_handle() + .spawn("relay-chain-rpc-worker", None, worker.run()); + + Ok(Self { to_worker_channel: sender }) } } @@ -51,7 +47,7 @@ impl ClientT for PooledClient { method: &'a str, params: Option>, ) -> Result<(), jsonrpsee::core::Error> { - self.active_client().notification(method, params).await + todo!() } async fn request<'a, R>( @@ -62,7 +58,10 @@ impl ClientT for PooledClient { where R: sp_runtime::DeserializeOwned, { - self.active_client().request(method, params).await + let (tx, rx) = futures::channel::oneshot::channel(); + let message = RpcDispatcherMessage::Request("bla".to_string(), None, tx); + self.to_worker_channel.send(message); + rx.await } async fn batch_request<'a, R>( @@ -72,33 +71,190 @@ impl ClientT for PooledClient { where R: sp_runtime::DeserializeOwned + Default + Clone, { - self.active_client().batch_request(batch).await + todo!() } } -#[async_trait::async_trait] -impl SubscriptionClientT for PooledClient { - async fn subscribe<'a, Notif>( - &self, - subscribe_method: &'a str, - params: Option>, - unsubscribe_method: &'a str, - ) -> Result, JsonRpseeError> - where - Notif: sp_runtime::DeserializeOwned, - { - self.active_client() - .subscribe(subscribe_method, params, unsubscribe_method) - .await +impl PooledClient { + /// Get a stream of new best relay chain headers + pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + self.send_register_message_to_worker(RpcDispatcherMessage::RegisterBestHeadListener(tx))?; + Ok(rx) + } + + /// Get a stream of finalized relay chain headers + pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + self.send_register_message_to_worker(RpcDispatcherMessage::RegisterFinalizationListener( + tx, + ))?; + Ok(rx) + } + + /// Get a stream of all imported relay chain headers + pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + self.send_register_message_to_worker(RpcDispatcherMessage::RegisterImportListener(tx))?; + Ok(rx) } - async fn subscribe_to_method<'a, Notif>( + fn send_register_message_to_worker( &self, - method: &'a str, - ) -> Result, JsonRpseeError> - where - Notif: sp_runtime::DeserializeOwned, - { - self.active_client().subscribe_to_method(method).await + message: RpcDispatcherMessage, + ) -> Result<(), RelayChainError> { + self.to_worker_channel + .try_send(message) + .map_err(|e| RelayChainError::WorkerCommunicationError(e.to_string())) + } +} + +/// Worker messages to register new notification listeners +#[derive(Clone, Debug)] +pub enum RpcDispatcherMessage { + RegisterBestHeadListener(Sender), + RegisterImportListener(Sender), + RegisterFinalizationListener(Sender), + Request(String, Option>, OneshotSender), +} + +/// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. +struct RpcStreamWorker { + ws_clients: Vec<(Url, JsonRpcClient)>, + // Communication channel with the RPC client + client_receiver: TokioReceiver, + + // Senders to distribute incoming header notifications to + imported_header_listeners: Vec>, + finalized_header_listeners: Vec>, + best_header_listeners: Vec>, +} + +fn handle_event_distribution( + event: Option>, + senders: &mut Vec>, +) -> Result<(), String> { + match event { + Some(Ok(header)) => { + senders.retain_mut(|e| { + match e.try_send(header.clone()) { + // Receiver has been dropped, remove Sender from list. + Err(error) if error.is_disconnected() => false, + // Channel is full. This should not happen. + // TODO: Improve error handling here + // https://github.com/paritytech/cumulus/issues/1482 + Err(error) => { + tracing::error!(target: LOG_TARGET, ?error, "Event distribution channel has reached its limit. This can lead to missed notifications."); + true + }, + _ => true, + } + }); + Ok(()) + }, + None => Err("RPC Subscription closed.".to_string()), + Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), + } +} + +impl RpcStreamWorker { + /// Create new worker. Returns the worker and a channel to register new listeners. + async fn new(urls: Vec) -> (RpcStreamWorker, TokioSender) { + let clients: Vec<(Url, Result)> = + futures::future::join_all(urls.into_iter().map(|url| async move { + (url.clone(), WsClientBuilder::default().build(url.as_str()).await) + })) + .await; + let clients = clients.into_iter().filter_map(|element| { + match element.1 { + Ok(client) => Some((element.0, client)), + _ => { + tracing::warn!(target: "pooled-client", url = ?element.0, "Unable to connect to provided relay chain."); + None} + } + }).collect(); + + let (tx, rx) = tokio_channel(100); + let worker = RpcStreamWorker { + ws_clients: clients, + client_receiver: rx, + imported_header_listeners: Vec::new(), + finalized_header_listeners: Vec::new(), + best_header_listeners: Vec::new(), + }; + (worker, tx) + } + + /// Run this worker to drive notification streams. + /// The worker does two things: + /// 1. Listen for `NotificationRegisterMessage` and register new listeners for the notification streams + /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. + /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. + pub async fn run(mut self) { + let ws_client = &self.ws_clients.first().unwrap().1; + + let mut import_sub = ws_client + .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") + .await + .expect("should not fail") + .fuse(); + + let mut best_head_sub = ws_client + .subscribe::( + "chain_subscribeNewHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .expect("should not fail"); + let mut finalized_sub = ws_client + .subscribe::( + "chain_subscribeFinalizedHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .expect("should not fail") + .fuse(); + loop { + tokio::select! { + evt = self.client_receiver.recv() => match evt { + Some(RpcDispatcherMessage::RegisterBestHeadListener(tx)) => { + self.best_header_listeners.push(tx); + }, + Some(RpcDispatcherMessage::RegisterImportListener(tx)) => { + self.imported_header_listeners.push(tx) + }, + Some(RpcDispatcherMessage::RegisterFinalizationListener(tx)) => { + self.finalized_header_listeners.push(tx) + }, + Some(RpcDispatcherMessage::Request(_, _, _)) => { + todo!(); + }, + None => { + tracing::error!(target: LOG_TARGET, "RPC client receiver closed. Stopping RPC Worker."); + return; + } + }, + import_event = import_sub.next() => { + if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification. Stopping RPC Worker."); + return; + } + }, + best_header_event = best_head_sub.next() => { + if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification. Stopping RPC Worker."); + return; + } + } + finalized_event = finalized_sub.next() => { + if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification. Stopping RPC Worker."); + return; + } + } + } + } } } diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 5e7301b2063..a49c4ba8487 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -29,18 +29,11 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; -use futures::{ - channel::mpsc::{Receiver, Sender}, - StreamExt, -}; +use futures::channel::mpsc::Receiver; use jsonrpsee::{ - core::{ - client::{Client as JsonRpcClient, ClientT, Subscription, SubscriptionClientT}, - Error as JsonRpseeError, - }, + core::{client::ClientT, Error as JsonRpseeError}, rpc_params, types::ParamsSer, - ws_client::WsClientBuilder, }; use parity_scale_codec::{Decode, Encode}; use polkadot_service::{BlockNumber, TaskManager}; @@ -52,15 +45,10 @@ use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_runtime::DeserializeOwned; use sp_storage::StorageKey; use std::sync::Arc; -use tokio::sync::mpsc::{ - channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, -}; pub use url::Url; const LOG_TARGET: &str = "relay-chain-rpc-client"; -const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; - /// Client that maps RPC methods and deserializes results #[derive(Clone)] pub struct RelayChainRpcClient { @@ -69,33 +57,6 @@ pub struct RelayChainRpcClient { /// Retry strategy that should be used for requests and subscriptions retry_strategy: ExponentialBackoff, - - /// Channel to communicate with the RPC worker - to_worker_channel: TokioSender, -} - -/// Worker messages to register new notification listeners -#[derive(Clone, Debug)] -pub enum NotificationRegisterMessage { - RegisterBestHeadListener(Sender), - RegisterImportListener(Sender), - RegisterFinalizationListener(Sender), -} - -/// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. -struct RpcStreamWorker { - // Communication channel with the RPC client - client_receiver: TokioReceiver, - - // Senders to distribute incoming header notifications to - imported_header_listeners: Vec>, - finalized_header_listeners: Vec>, - best_header_listeners: Vec>, - - // Incoming notification subscriptions - rpc_imported_header_subscription: Subscription, - rpc_finalized_header_subscription: Subscription, - rpc_best_header_subscription: Subscription, } /// Entry point to create [`RelayChainRpcClient`] and start a worker that distributes notifications. @@ -103,128 +64,17 @@ pub async fn create_client_and_start_worker( urls: Vec, task_manager: &mut TaskManager, ) -> RelayChainResult { - let first_url = urls.first().map(|s| s.to_owned()).unwrap(); - let ws_client = PooledClient::new(first_url).await?; - - let best_head_stream = RelayChainRpcClient::subscribe_new_best_heads(&ws_client).await?; - let finalized_head_stream = RelayChainRpcClient::subscribe_finalized_heads(&ws_client).await?; - let imported_head_stream = RelayChainRpcClient::subscribe_imported_heads(&ws_client).await?; - - let (worker, sender) = - RpcStreamWorker::new(imported_head_stream, best_head_stream, finalized_head_stream); - let client = RelayChainRpcClient::new(ws_client, sender).await?; + let ws_client = PooledClient::new(urls, task_manager).await?; - task_manager - .spawn_essential_handle() - .spawn("relay-chain-rpc-worker", None, worker.run()); + let client = RelayChainRpcClient::new(ws_client).await?; Ok(client) } -fn handle_event_distribution( - event: Option>, - senders: &mut Vec>, -) -> Result<(), String> { - match event { - Some(Ok(header)) => { - senders.retain_mut(|e| { - match e.try_send(header.clone()) { - // Receiver has been dropped, remove Sender from list. - Err(error) if error.is_disconnected() => false, - // Channel is full. This should not happen. - // TODO: Improve error handling here - // https://github.com/paritytech/cumulus/issues/1482 - Err(error) => { - tracing::error!(target: LOG_TARGET, ?error, "Event distribution channel has reached its limit. This can lead to missed notifications."); - true - }, - _ => true, - } - }); - Ok(()) - }, - None => Err("RPC Subscription closed.".to_string()), - Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), - } -} - -impl RpcStreamWorker { - /// Create new worker. Returns the worker and a channel to register new listeners. - fn new( - import_sub: Subscription, - best_sub: Subscription, - finalized_sub: Subscription, - ) -> (RpcStreamWorker, TokioSender) { - let (tx, rx) = tokio_channel(100); - let worker = RpcStreamWorker { - client_receiver: rx, - imported_header_listeners: Vec::new(), - finalized_header_listeners: Vec::new(), - best_header_listeners: Vec::new(), - rpc_imported_header_subscription: import_sub, - rpc_best_header_subscription: best_sub, - rpc_finalized_header_subscription: finalized_sub, - }; - (worker, tx) - } - - /// Run this worker to drive notification streams. - /// The worker does two things: - /// 1. Listen for `NotificationRegisterMessage` and register new listeners for the notification streams - /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. - /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. - pub async fn run(mut self) { - let mut import_sub = self.rpc_imported_header_subscription.fuse(); - let mut best_head_sub = self.rpc_best_header_subscription.fuse(); - let mut finalized_sub = self.rpc_finalized_header_subscription.fuse(); - loop { - tokio::select! { - evt = self.client_receiver.recv() => match evt { - Some(NotificationRegisterMessage::RegisterBestHeadListener(tx)) => { - self.best_header_listeners.push(tx); - }, - Some(NotificationRegisterMessage::RegisterImportListener(tx)) => { - self.imported_header_listeners.push(tx) - }, - Some(NotificationRegisterMessage::RegisterFinalizationListener(tx)) => { - self.finalized_header_listeners.push(tx) - }, - None => { - tracing::error!(target: LOG_TARGET, "RPC client receiver closed. Stopping RPC Worker."); - return; - } - }, - import_event = import_sub.next() => { - if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification. Stopping RPC Worker."); - return; - } - }, - best_header_event = best_head_sub.next() => { - if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification. Stopping RPC Worker."); - return; - } - } - finalized_event = finalized_sub.next() => { - if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification. Stopping RPC Worker."); - return; - } - } - } - } - } -} - impl RelayChainRpcClient { /// Initialize new RPC Client. - async fn new( - ws_client: PooledClient, - sender: TokioSender, - ) -> RelayChainResult { + async fn new(ws_client: PooledClient) -> RelayChainResult { let client = RelayChainRpcClient { - to_worker_channel: sender, ws_client: Arc::new(ws_client), retry_strategy: ExponentialBackoff::default(), }; @@ -641,70 +491,17 @@ impl RelayChainRpcClient { } /// Get a stream of all imported relay chain headers - pub async fn get_imported_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); - self.send_register_message_to_worker(NotificationRegisterMessage::RegisterImportListener( - tx, - ))?; - Ok(rx) + pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { + self.ws_client.get_imported_heads_stream() } /// Get a stream of new best relay chain headers - pub async fn get_best_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); - self.send_register_message_to_worker( - NotificationRegisterMessage::RegisterBestHeadListener(tx), - )?; - Ok(rx) + pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { + self.ws_client.get_best_heads_stream() } /// Get a stream of finalized relay chain headers - pub async fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); - self.send_register_message_to_worker( - NotificationRegisterMessage::RegisterFinalizationListener(tx), - )?; - Ok(rx) - } - - fn send_register_message_to_worker( - &self, - message: NotificationRegisterMessage, - ) -> Result<(), RelayChainError> { - self.to_worker_channel - .try_send(message) - .map_err(|e| RelayChainError::WorkerCommunicationError(e.to_string())) - } - - async fn subscribe_imported_heads( - ws_client: &PooledClient, - ) -> Result, RelayChainError> { - Ok(ws_client - .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") - .await?) - } - - async fn subscribe_finalized_heads( - ws_client: &PooledClient, - ) -> Result, RelayChainError> { - Ok(ws_client - .subscribe::( - "chain_subscribeFinalizedHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await?) - } - - async fn subscribe_new_best_heads( - ws_client: &PooledClient, - ) -> Result, RelayChainError> { - Ok(ws_client - .subscribe::( - "chain_subscribeNewHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await?) + pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { + self.ws_client.get_finalized_heads_stream() } } diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index e720f79ae3d..d6246cc566d 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -265,8 +265,9 @@ async fn build_relay_chain_interface( hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { match collator_options.relay_chain_rpc_urls { - Some(relay_chain_url) => - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, + Some(relay_chain_url) => { + build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await + }, None => build_inprocess_relay_chain( polkadot_config, parachain_config, @@ -379,6 +380,7 @@ where sc_service::spawn_tasks(sc_service::SpawnTasksParams { rpc_builder, client: client.clone(), + transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, config: parachain_config, From 77babd239c7c2875648d71aadbc2cb93cbc9ad08 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 9 Nov 2022 13:17:45 +0100 Subject: [PATCH 05/36] Forward requests to dispatcher --- Cargo.lock | 5 +- client/relay-chain-rpc-interface/Cargo.toml | 3 +- .../src/resilient_ws_client.rs | 56 ++++++++----------- .../src/rpc_client.rs | 24 +++++--- .../0006-rpc_collator_builds_blocks.toml | 4 +- 5 files changed, 47 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c4876da8d8..85438fe68b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2058,6 +2058,7 @@ dependencies = [ "polkadot-service", "sc-client-api", "sc-rpc-api", + "serde_json", "sp-api", "sp-authority-discovery", "sp-consensus-babe", @@ -10531,9 +10532,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa 1.0.1", "ryu", diff --git a/client/relay-chain-rpc-interface/Cargo.toml b/client/relay-chain-rpc-interface/Cargo.toml index dcd070e06ac..456473e7352 100644 --- a/client/relay-chain-rpc-interface/Cargo.toml +++ b/client/relay-chain-rpc-interface/Cargo.toml @@ -20,7 +20,7 @@ sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = " sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -tokio = { version = "1.21.2", features = ["sync"] } +tokio = { version = "1.21.2", features = ["sync", "time"] } futures = "0.3.24" futures-timer = "3.0.2" @@ -30,3 +30,4 @@ tracing = "0.1.37" async-trait = "0.1.58" url = "2.3.1" backoff = { version = "0.4.0", features = ["tokio"] } +serde_json = "1.0.87" diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index e47f44a8fef..40cf2dcb388 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -2,7 +2,8 @@ use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ channel::mpsc::{Receiver, Sender}, - channel::oneshot::{Receiver as OneshotReceiver, Sender as OneshotSender}, + channel::oneshot::Sender as OneshotSender, + stream::FuturesUnordered, StreamExt, }; use jsonrpsee::{ @@ -40,42 +41,23 @@ impl PooledClient { } } -#[async_trait::async_trait] -impl ClientT for PooledClient { - async fn notification<'a>( - &self, - method: &'a str, - params: Option>, - ) -> Result<(), jsonrpsee::core::Error> { - todo!() - } +impl PooledClient {} - async fn request<'a, R>( +impl PooledClient { + pub async fn request( &self, - method: &'a str, - params: Option>, + method: &str, + params: Option>, ) -> Result where R: sp_runtime::DeserializeOwned, { let (tx, rx) = futures::channel::oneshot::channel(); - let message = RpcDispatcherMessage::Request("bla".to_string(), None, tx); - self.to_worker_channel.send(message); - rx.await - } - - async fn batch_request<'a, R>( - &self, - batch: Vec<(&'a str, Option>)>, - ) -> Result, jsonrpsee::core::Error> - where - R: sp_runtime::DeserializeOwned + Default + Clone, - { - todo!() + let message = RpcDispatcherMessage::Request(method.to_string(), params, tx); + self.to_worker_channel.send(message).await.expect("worker sending fucked up"); + let value = rx.await.expect("fucked up during deserialization")?; + serde_json::from_value(value).map_err(JsonRpseeError::ParseError) } -} - -impl PooledClient { /// Get a stream of new best relay chain headers pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); @@ -110,12 +92,12 @@ impl PooledClient { } /// Worker messages to register new notification listeners -#[derive(Clone, Debug)] +#[derive(Debug)] pub enum RpcDispatcherMessage { RegisterBestHeadListener(Sender), RegisterImportListener(Sender), RegisterFinalizationListener(Sender), - Request(String, Option>, OneshotSender), + Request(String, Option>, OneshotSender>), } /// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. @@ -192,6 +174,7 @@ impl RpcStreamWorker { /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. pub async fn run(mut self) { let ws_client = &self.ws_clients.first().unwrap().1; + let mut pending_requests = FuturesUnordered::new(); let mut import_sub = ws_client .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") @@ -216,6 +199,7 @@ impl RpcStreamWorker { .await .expect("should not fail") .fuse(); + loop { tokio::select! { evt = self.client_receiver.recv() => match evt { @@ -228,14 +212,20 @@ impl RpcStreamWorker { Some(RpcDispatcherMessage::RegisterFinalizationListener(tx)) => { self.finalized_header_listeners.push(tx) }, - Some(RpcDispatcherMessage::Request(_, _, _)) => { - todo!(); + Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { + pending_requests.push(async move { + let resp = ws_client.request(&method, params.map(|p| p.into())).await; + if let Err(err) = response_sender.send(resp) { + tracing::error!(target: LOG_TARGET, ?err, "Recipient no longer interested in request result"); + } + }); }, None => { tracing::error!(target: LOG_TARGET, "RPC client receiver closed. Stopping RPC Worker."); return; } }, + _ = pending_requests.next() => {}, import_event = import_sub.next() => { if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification. Stopping RPC Worker."); diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index a49c4ba8487..6e865f81a3b 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -30,11 +30,7 @@ use cumulus_primitives_core::{ }; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::channel::mpsc::Receiver; -use jsonrpsee::{ - core::{client::ClientT, Error as JsonRpseeError}, - rpc_params, - types::ParamsSer, -}; +use jsonrpsee::core::{Error as JsonRpseeError, JsonValue}; use parity_scale_codec::{Decode, Encode}; use polkadot_service::{BlockNumber, TaskManager}; use sc_client_api::StorageData; @@ -59,6 +55,20 @@ pub struct RelayChainRpcClient { retry_strategy: ExponentialBackoff, } +macro_rules! rpc_params { + ($($param:expr),*) => { + { + let mut __params = vec![]; + $( + __params.push(serde_json::to_value($param).expect("json serialization is infallible; qed.")); + )* + Some(__params) + } + }; + () => { + None + } +} /// Entry point to create [`RelayChainRpcClient`] and start a worker that distributes notifications. pub async fn create_client_and_start_worker( urls: Vec, @@ -114,7 +124,7 @@ impl RelayChainRpcClient { async fn request<'a, R>( &self, method: &'a str, - params: Option>, + params: Option>, ) -> Result where R: DeserializeOwned + std::fmt::Debug, @@ -131,7 +141,7 @@ impl RelayChainRpcClient { async fn request_tracing<'a, R, OR>( &self, method: &'a str, - params: Option>, + params: Option>, trace_error: OR, ) -> Result where diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.toml b/zombienet/tests/0006-rpc_collator_builds_blocks.toml index ade75983398..bfc3de8d988 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.toml +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.toml @@ -35,7 +35,7 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'one'|zombie('wsUri')}}", "--relay-chain-rpc-url {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}}"] + args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}}"] # run eve as parachain full node [[parachains.collators]] @@ -43,4 +43,4 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-url {{'one'|zombie('wsUri')}}",, "--relay-chain-rpc-url {{'two'|zombie('wsUri')}}" "-- --bootnodes {{'two'|zombie('multiAddress')}}"] + args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'two'|zombie('multiAddress')}}"] From 49efacccc9b1c16fce1c5c04c60e2248266b6f2f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 10 Nov 2022 16:44:40 +0100 Subject: [PATCH 06/36] Switch clients on error --- .../src/resilient_ws_client.rs | 158 ++++++++++++++---- 1 file changed, 124 insertions(+), 34 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index 40cf2dcb388..cf6044a1a2d 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ @@ -14,8 +16,9 @@ use jsonrpsee::{ ws_client::WsClientBuilder, }; use polkadot_service::TaskManager; -use tokio::sync::mpsc::{ - channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, +use tokio::sync::{ + mpsc::{channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender}, + RwLock, }; use url::Url; @@ -102,9 +105,10 @@ pub enum RpcDispatcherMessage { /// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. struct RpcStreamWorker { - ws_clients: Vec<(Url, JsonRpcClient)>, + ws_urls: Vec, // Communication channel with the RPC client client_receiver: TokioReceiver, + self_sender: TokioSender, // Senders to distribute incoming header notifications to imported_header_listeners: Vec>, @@ -134,32 +138,63 @@ fn handle_event_distribution( }); Ok(()) }, - None => Err("RPC Subscription closed.".to_string()), + None => { + // TODO skunert We should replace the stream at this point + tracing::error!(target: LOG_TARGET, "The subscription was closed"); + return Err("Subscription closed".to_string()); + }, Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), } } +#[derive(Clone, Debug)] +struct ClientManager { + urls: Vec, + ws_client: Arc, + bla: bool, +} + +impl ClientManager { + pub async fn new(urls: Vec) -> Result { + let first_url = urls.first().unwrap(); + let ws_client = WsClientBuilder::default().build(first_url.clone()).await?; + Ok(Self { urls, ws_client: Arc::new(ws_client), bla: false }) + } + + pub async fn refresh(&mut self) { + tracing::info!( + target: LOG_TARGET, + is_connected = self.ws_client.is_connected(), + "Client 1 disconnected, switching to url 2" + ); + if self.bla { + tracing::info!(target: LOG_TARGET, "Nothing to do, already switched connections"); + return; + } + let new_url = self.urls.get(1).unwrap(); + self.ws_client = Arc::new( + WsClientBuilder::default() + .build(new_url.clone()) + .await + .expect("Second url also not running"), + ); + self.bla = true; + } + + pub async fn get_client(&mut self) -> Result, JsonRpseeError> { + tracing::info!(target: LOG_TARGET, "Client still connected, everything ok"); + return Ok(self.ws_client.clone()); + } +} + impl RpcStreamWorker { /// Create new worker. Returns the worker and a channel to register new listeners. async fn new(urls: Vec) -> (RpcStreamWorker, TokioSender) { - let clients: Vec<(Url, Result)> = - futures::future::join_all(urls.into_iter().map(|url| async move { - (url.clone(), WsClientBuilder::default().build(url.as_str()).await) - })) - .await; - let clients = clients.into_iter().filter_map(|element| { - match element.1 { - Ok(client) => Some((element.0, client)), - _ => { - tracing::warn!(target: "pooled-client", url = ?element.0, "Unable to connect to provided relay chain."); - None} - } - }).collect(); - let (tx, rx) = tokio_channel(100); let worker = RpcStreamWorker { - ws_clients: clients, + ws_urls: urls, client_receiver: rx, + self_sender: tx.clone(), imported_header_listeners: Vec::new(), finalized_header_listeners: Vec::new(), best_header_listeners: Vec::new(), @@ -173,16 +208,18 @@ impl RpcStreamWorker { /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. pub async fn run(mut self) { - let ws_client = &self.ws_clients.first().unwrap().1; + let mut client_manager = ClientManager::new(self.ws_urls.clone()).await.expect("jojo"); + + let mut subscription_client = client_manager.get_client().await.expect("jojo"); let mut pending_requests = FuturesUnordered::new(); - let mut import_sub = ws_client + let mut import_sub = subscription_client .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") .await .expect("should not fail") .fuse(); - let mut best_head_sub = ws_client + let mut best_head_sub = subscription_client .subscribe::( "chain_subscribeNewHeads", None, @@ -190,7 +227,8 @@ impl RpcStreamWorker { ) .await .expect("should not fail"); - let mut finalized_sub = ws_client + + let mut finalized_sub = subscription_client .subscribe::( "chain_subscribeFinalizedHeads", None, @@ -200,7 +238,42 @@ impl RpcStreamWorker { .expect("should not fail") .fuse(); + let mut reset_streams = false; loop { + if reset_streams { + tracing::info!(target: LOG_TARGET, "Resetting streams"); + client_manager.refresh().await; + subscription_client = client_manager.get_client().await.expect("jap"); + import_sub = subscription_client + .subscribe::( + "chain_subscribeAllHeads", + None, + "chain_unsubscribeAllHeads", + ) + .await + .expect("should not fail") + .fuse(); + + best_head_sub = subscription_client + .subscribe::( + "chain_subscribeNewHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .expect("should not fail"); + + finalized_sub = subscription_client + .subscribe::( + "chain_subscribeFinalizedHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .expect("should not fail") + .fuse(); + reset_streams = false; + }; tokio::select! { evt = self.client_receiver.recv() => match evt { Some(RpcDispatcherMessage::RegisterBestHeadListener(tx)) => { @@ -213,11 +286,23 @@ impl RpcStreamWorker { self.finalized_header_listeners.push(tx) }, Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { + let Ok(closure_client) = client_manager.get_client().await else { + tracing::error!("Unable to get the new client, terminating worker"); + return; + }; pending_requests.push(async move { - let resp = ws_client.request(&method, params.map(|p| p.into())).await; + let resp = closure_client.request(&method, params.clone().map(|p| p.into())).await; + + let mut result = Ok(()); + if resp.is_err() { + result = Err(()); + } + tracing::info!(target: LOG_TARGET, ?resp, "Got response"); + if let Err(err) = response_sender.send(resp) { tracing::error!(target: LOG_TARGET, ?err, "Recipient no longer interested in request result"); } + return result; }); }, None => { @@ -225,23 +310,28 @@ impl RpcStreamWorker { return; } }, - _ = pending_requests.next() => {}, - import_event = import_sub.next() => { + should_retry = pending_requests.next() => { + if let Some(Err(())) = should_retry { + tracing::info!(target: LOG_TARGET, "Refreshing client"); + reset_streams = true; + } + }, + import_event = import_sub.next(), if !reset_streams => { if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification. Stopping RPC Worker."); - return; + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification."); + reset_streams = true; } }, - best_header_event = best_head_sub.next() => { + best_header_event = best_head_sub.next(), if !reset_streams => { if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification. Stopping RPC Worker."); - return; + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification."); + reset_streams = true; } } - finalized_event = finalized_sub.next() => { + finalized_event = finalized_sub.next(), if !reset_streams => { if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification. Stopping RPC Worker."); - return; + tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification."); + reset_streams = true; } } } From 835204dd45eb1d388d7b82403264c9aef806330c Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 14 Nov 2022 09:02:35 +0100 Subject: [PATCH 07/36] Implement rotation logic --- .../src/resilient_ws_client.rs | 89 ++++++++++--------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index cf6044a1a2d..338867cdec5 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -16,9 +16,8 @@ use jsonrpsee::{ ws_client::WsClientBuilder, }; use polkadot_service::TaskManager; -use tokio::sync::{ - mpsc::{channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender}, - RwLock, +use tokio::sync::mpsc::{ + channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, }; use url::Url; @@ -150,40 +149,46 @@ fn handle_event_distribution( #[derive(Clone, Debug)] struct ClientManager { urls: Vec, - ws_client: Arc, - bla: bool, + active_client: (usize, Arc), +} + +async fn find_next_client( + urls: &Vec, + starting_position: usize, +) -> Result<(usize, Arc), ()> { + for (counter, url) in urls.iter().cycle().skip(starting_position).take(urls.len()).enumerate() { + let index = (starting_position + counter) % urls.len(); + tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); + if let Ok(ws_client) = WsClientBuilder::default().build(url.clone()).await { + tracing::info!(target: LOG_TARGET, ?url, "Successfully switched."); + return Ok((index, Arc::new(ws_client))); + }; + } + Err(()) } impl ClientManager { - pub async fn new(urls: Vec) -> Result { - let first_url = urls.first().unwrap(); - let ws_client = WsClientBuilder::default().build(first_url.clone()).await?; - Ok(Self { urls, ws_client: Arc::new(ws_client), bla: false }) + pub async fn new(urls: Vec) -> Result { + if urls.is_empty() { + return Err(()); + } + let active_client = find_next_client(&urls, 1).await?; + Ok(Self { urls, active_client }) } - pub async fn refresh(&mut self) { + pub async fn connect_to_new_rpc_server(&mut self) -> Result<(), ()> { tracing::info!( target: LOG_TARGET, - is_connected = self.ws_client.is_connected(), - "Client 1 disconnected, switching to url 2" - ); - if self.bla { - tracing::info!(target: LOG_TARGET, "Nothing to do, already switched connections"); - return; - } - let new_url = self.urls.get(1).unwrap(); - self.ws_client = Arc::new( - WsClientBuilder::default() - .build(new_url.clone()) - .await - .expect("Second url also not running"), + is_connected = self.active_client.1.is_connected(), + "Trying to find new RPC server." ); - self.bla = true; + let new_active = find_next_client(&self.urls, self.active_client.0 + 1).await?; + self.active_client = new_active; + Ok(()) } - pub async fn get_client(&mut self) -> Result, JsonRpseeError> { - tracing::info!(target: LOG_TARGET, "Client still connected, everything ok"); - return Ok(self.ws_client.clone()); + pub fn get_client(&mut self) -> Arc { + return self.active_client.1.clone(); } } @@ -210,7 +215,7 @@ impl RpcStreamWorker { pub async fn run(mut self) { let mut client_manager = ClientManager::new(self.ws_urls.clone()).await.expect("jojo"); - let mut subscription_client = client_manager.get_client().await.expect("jojo"); + let mut subscription_client = client_manager.get_client(); let mut pending_requests = FuturesUnordered::new(); let mut import_sub = subscription_client @@ -241,9 +246,13 @@ impl RpcStreamWorker { let mut reset_streams = false; loop { if reset_streams { - tracing::info!(target: LOG_TARGET, "Resetting streams"); - client_manager.refresh().await; - subscription_client = client_manager.get_client().await.expect("jap"); + if let Err(()) = client_manager.connect_to_new_rpc_server().await { + tracing::error!( + target: LOG_TARGET, + "Unable to find valid external RPC server, shutting down." + ); + }; + subscription_client = client_manager.get_client(); import_sub = subscription_client .subscribe::( "chain_subscribeAllHeads", @@ -286,23 +295,18 @@ impl RpcStreamWorker { self.finalized_header_listeners.push(tx) }, Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { - let Ok(closure_client) = client_manager.get_client().await else { - tracing::error!("Unable to get the new client, terminating worker"); - return; - }; + let closure_client = client_manager.get_client(); pending_requests.push(async move { let resp = closure_client.request(&method, params.clone().map(|p| p.into())).await; - let mut result = Ok(()); - if resp.is_err() { - result = Err(()); + if let Err(JsonRpseeError::RestartNeeded(_)) = resp { + return Err(RpcDispatcherMessage::Request(method, params, response_sender)); } - tracing::info!(target: LOG_TARGET, ?resp, "Got response"); if let Err(err) = response_sender.send(resp) { tracing::error!(target: LOG_TARGET, ?err, "Recipient no longer interested in request result"); } - return result; + return Ok(()); }); }, None => { @@ -311,9 +315,10 @@ impl RpcStreamWorker { } }, should_retry = pending_requests.next() => { - if let Some(Err(())) = should_retry { - tracing::info!(target: LOG_TARGET, "Refreshing client"); + if let Some(Err(req)) = should_retry { reset_streams = true; + tracing::info!(target: LOG_TARGET, ?req, "Retrying request"); + self.self_sender.send(req).await.expect("This should work"); } }, import_event = import_sub.next(), if !reset_streams => { From 1821ae17a50b3923d201206d85dc1f6a5a76a90d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 14 Nov 2022 15:28:05 +0100 Subject: [PATCH 08/36] Improve subscription handling --- .../src/collator_overseer.rs | 8 +- .../src/resilient_ws_client.rs | 188 +++++++++++------- .../0006-rpc_collator_builds_blocks.toml | 8 +- 3 files changed, 121 insertions(+), 83 deletions(-) diff --git a/client/relay-chain-minimal-node/src/collator_overseer.rs b/client/relay-chain-minimal-node/src/collator_overseer.rs index 6efb1a9ce2e..b9b599a3a64 100644 --- a/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -252,8 +252,8 @@ async fn forward_collator_events( f = finality.next() => { match f { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received finalized block via RPC: #{} ({})", header.number, header.hash()); - let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; + tracing::info!(target: "minimal-polkadot-node", "Received finalized block via RPC: #{} ({} -> {})", header.number, header.parent_hash, header.hash()); + let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_finalized(block_info).await; } None => return Err(RelayChainError::GenericError("Relay chain finality stream ended.".to_string())), @@ -262,8 +262,8 @@ async fn forward_collator_events( i = imports.next() => { match i { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received imported block via RPC: #{} ({})", header.number, header.hash()); - let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; + tracing::info!(target: "minimal-polkadot-node", "Received imported block via RPC: #{} ({} -> {})", header.number, header.parent_hash, header.hash()); + let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_imported(block_info).await; } None => return Err(RelayChainError::GenericError("Relay chain import stream ended.".to_string())), diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index 338867cdec5..c62b11f25a7 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Instant}; use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; @@ -10,7 +10,7 @@ use futures::{ }; use jsonrpsee::{ core::{ - client::{Client as JsonRpcClient, ClientT, SubscriptionClientT}, + client::{Client as JsonRpcClient, ClientT, Subscription, SubscriptionClientT}, Error as JsonRpseeError, JsonValue, }, ws_client::WsClientBuilder, @@ -121,6 +121,12 @@ fn handle_event_distribution( ) -> Result<(), String> { match event { Some(Ok(header)) => { + tracing::info!( + target: LOG_TARGET, + "Received block via RPC: #{} ({})", + header.number, + header.hash() + ); senders.retain_mut(|e| { match e.try_send(header.clone()) { // Receiver has been dropped, remove Sender from list. @@ -152,6 +158,12 @@ struct ClientManager { active_client: (usize, Arc), } +struct RelayChainSubscriptions { + import_subscription: Subscription, + finalized_subscription: Subscription, + best_subscription: Subscription, +} + async fn find_next_client( urls: &Vec, starting_position: usize, @@ -167,16 +179,38 @@ async fn find_next_client( Err(()) } +async fn create_request( + client: Arc, + method: String, + params: Option>, + response_sender: OneshotSender>, +) -> Result<(), RpcDispatcherMessage> { + let resp = client.request(&method, params.clone().map(|p| p.into())).await; + + if let Err(JsonRpseeError::RestartNeeded(_)) = resp { + return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + } + + if let Err(err) = response_sender.send(resp) { + tracing::error!( + target: LOG_TARGET, + ?err, + "Recipient no longer interested in request result" + ); + } + return Ok(()); +} + impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { return Err(()); } - let active_client = find_next_client(&urls, 1).await?; + let active_client = find_next_client(&urls, 0).await?; Ok(Self { urls, active_client }) } - pub async fn connect_to_new_rpc_server(&mut self) -> Result<(), ()> { + pub async fn connect_to_new_rpc_server(&mut self) -> Result, ()> { tracing::info!( target: LOG_TARGET, is_connected = self.active_client.1.is_connected(), @@ -184,7 +218,7 @@ impl ClientManager { ); let new_active = find_next_client(&self.urls, self.active_client.0 + 1).await?; self.active_client = new_active; - Ok(()) + Ok(self.active_client.1.clone()) } pub fn get_client(&mut self) -> Arc { @@ -213,76 +247,38 @@ impl RpcStreamWorker { /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. pub async fn run(mut self) { - let mut client_manager = ClientManager::new(self.ws_urls.clone()).await.expect("jojo"); + let Ok(mut client_manager) = ClientManager::new(self.ws_urls.clone()).await else { + tracing::error!(target: LOG_TARGET, "No valid RPC url found. Stopping RPC worker."); + return; + }; - let mut subscription_client = client_manager.get_client(); + let active_client = client_manager.get_client(); let mut pending_requests = FuturesUnordered::new(); - let mut import_sub = subscription_client - .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") - .await - .expect("should not fail") - .fuse(); - - let mut best_head_sub = subscription_client - .subscribe::( - "chain_subscribeNewHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await - .expect("should not fail"); - - let mut finalized_sub = subscription_client - .subscribe::( - "chain_subscribeFinalizedHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await - .expect("should not fail") - .fuse(); + let Ok(mut subscriptions) = get_subscriptions(&active_client).await else { + return; + }; let mut reset_streams = false; loop { if reset_streams { - if let Err(()) = client_manager.connect_to_new_rpc_server().await { + let Ok(active_client) = client_manager.connect_to_new_rpc_server().await else { tracing::error!( target: LOG_TARGET, "Unable to find valid external RPC server, shutting down." ); + return; }; - subscription_client = client_manager.get_client(); - import_sub = subscription_client - .subscribe::( - "chain_subscribeAllHeads", - None, - "chain_unsubscribeAllHeads", - ) - .await - .expect("should not fail") - .fuse(); - - best_head_sub = subscription_client - .subscribe::( - "chain_subscribeNewHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await - .expect("should not fail"); - - finalized_sub = subscription_client - .subscribe::( - "chain_subscribeFinalizedHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await - .expect("should not fail") - .fuse(); + + if let Ok(new_subscriptions) = get_subscriptions(&active_client).await { + subscriptions = new_subscriptions; + } else { + return; + }; + reset_streams = false; }; + tokio::select! { evt = self.client_receiver.recv() => match evt { Some(RpcDispatcherMessage::RegisterBestHeadListener(tx)) => { @@ -295,19 +291,8 @@ impl RpcStreamWorker { self.finalized_header_listeners.push(tx) }, Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { - let closure_client = client_manager.get_client(); - pending_requests.push(async move { - let resp = closure_client.request(&method, params.clone().map(|p| p.into())).await; - - if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); - } - - if let Err(err) = response_sender.send(resp) { - tracing::error!(target: LOG_TARGET, ?err, "Recipient no longer interested in request result"); - } - return Ok(()); - }); + let client = client_manager.get_client(); + pending_requests.push(create_request(client, method, params, response_sender)); }, None => { tracing::error!(target: LOG_TARGET, "RPC client receiver closed. Stopping RPC Worker."); @@ -321,19 +306,19 @@ impl RpcStreamWorker { self.self_sender.send(req).await.expect("This should work"); } }, - import_event = import_sub.next(), if !reset_streams => { + import_event = subscriptions.import_subscription.next() => { if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification."); reset_streams = true; } }, - best_header_event = best_head_sub.next(), if !reset_streams => { + best_header_event = subscriptions.best_subscription.next() => { if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification."); reset_streams = true; } } - finalized_event = finalized_sub.next(), if !reset_streams => { + finalized_event = subscriptions.finalized_subscription.next() => { if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification."); reset_streams = true; @@ -343,3 +328,52 @@ impl RpcStreamWorker { } } } + +async fn get_imported_header_subscription( + subscription_client: &Arc, +) -> Result, JsonRpseeError> { + subscription_client + .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + }) +} + +async fn get_best_head_subscription( + subscription_client: &Arc, +) -> Result, JsonRpseeError> { + subscription_client + .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + }) +} + +async fn get_finalized_head_subscription( + subscription_client: &Arc, +) -> Result, JsonRpseeError> { + subscription_client + .subscribe::( + "chain_subscribeFinalizedHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + }) +} + +async fn get_subscriptions( + active_client: &Arc, +) -> Result { + let import_subscription = get_imported_header_subscription(&active_client).await?; + let best_subscription = get_best_head_subscription(&active_client).await?; + let finalized_subscription = get_finalized_head_subscription(&active_client).await?; + Ok(RelayChainSubscriptions { import_subscription, best_subscription, finalized_subscription }) +} diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.toml b/zombienet/tests/0006-rpc_collator_builds_blocks.toml index bfc3de8d988..963b6f5d9e1 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.toml +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.toml @@ -25,6 +25,10 @@ chain = "rococo-local" name = "two" validator = false + [[relaychain.nodes]] + name = "three" + validator = false + [[parachains]] id = 2000 cumulus_based = true @@ -35,7 +39,7 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}}"] + args = ["-lparachain=trace,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}} {{'three'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}} {{'two'|zombie('multiAddress')}} {{'three'|zombie('multiAddress')}}"] # run eve as parachain full node [[parachains.collators]] @@ -43,4 +47,4 @@ cumulus_based = true validator = true image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain=debug,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}}", "-- --bootnodes {{'two'|zombie('multiAddress')}}"] + args = ["-lparachain=trace,blockchain-rpc-client=debug", "--relay-chain-rpc-urls {{'one'|zombie('wsUri')}} {{'two'|zombie('wsUri')}} {{'three'|zombie('wsUri')}}", "-- --bootnodes {{'one'|zombie('multiAddress')}} {{'two'|zombie('multiAddress')}} {{'three'|zombie('multiAddress')}}"] From 364b78c9d753468e971acdedea92acfddf566e4a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 09:39:07 +0100 Subject: [PATCH 09/36] Error handling cleanup --- client/relay-chain-interface/Cargo.toml | 1 + client/relay-chain-interface/src/lib.rs | 24 +++++++++++++-- .../src/resilient_ws_client.rs | 30 +++++++++++-------- .../src/rpc_client.rs | 6 ++-- 4 files changed, 42 insertions(+), 19 deletions(-) diff --git a/client/relay-chain-interface/Cargo.toml b/client/relay-chain-interface/Cargo.toml index b7d1d14b085..750f279e3f3 100644 --- a/client/relay-chain-interface/Cargo.toml +++ b/client/relay-chain-interface/Cargo.toml @@ -15,6 +15,7 @@ sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "mas sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +tokio = { version = "1.21.2", features = ["sync"] } futures = "0.3.24" async-trait = "0.1.58" thiserror = "1.0.37" diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 4505ac70973..33aa06c8463 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -51,11 +51,11 @@ pub enum RelayChainError { BlockchainError(#[from] sp_blockchain::Error), #[error("State machine error occured: {0}")] StateMachineError(Box), - #[error("Unable to call RPC method '{0}' due to error: {1}")] - RpcCallError(String, JsonRpcError), + #[error("Unable to call RPC method '{0}'")] + RpcCallError(String), #[error("RPC Error: '{0}'")] JsonRpcError(#[from] JsonRpcError), - #[error("Unable to reach RpcStreamWorker: {0}")] + #[error("Unable to communicate with RPC worker: {0}")] WorkerCommunicationError(String), #[error("Scale codec deserialization error: {0}")] DeserializationError(CodecError), @@ -87,6 +87,24 @@ impl From for sp_blockchain::Error { } } +impl From> for RelayChainError { + fn from(e: tokio::sync::mpsc::error::SendError) -> Self { + return RelayChainError::WorkerCommunicationError(format!( + "Unable to send message to RPC worker: {}", + e.to_string() + )); + } +} + +impl From for RelayChainError { + fn from(e: futures::channel::oneshot::Canceled) -> Self { + return RelayChainError::WorkerCommunicationError(format!( + "Unexpected channel close on RPC worker side: {}", + e.to_string() + )); + } +} + /// Trait that provides all necessary methods for interaction between collator and relay chain. #[async_trait] pub trait RelayChainInterface: Send + Sync { diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs index c62b11f25a7..e5e232983c2 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/resilient_ws_client.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Instant}; +use std::sync::Arc; use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; @@ -50,15 +50,16 @@ impl PooledClient { &self, method: &str, params: Option>, - ) -> Result + ) -> Result where R: sp_runtime::DeserializeOwned, { let (tx, rx) = futures::channel::oneshot::channel(); let message = RpcDispatcherMessage::Request(method.to_string(), params, tx); - self.to_worker_channel.send(message).await.expect("worker sending fucked up"); - let value = rx.await.expect("fucked up during deserialization")?; - serde_json::from_value(value).map_err(JsonRpseeError::ParseError) + self.to_worker_channel.send(message).await?; + let value = rx.await??; + serde_json::from_value(value) + .map_err(|_| RelayChainError::GenericError("Unable to deserialize value".to_string())) } /// Get a stream of new best relay chain headers pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { @@ -144,8 +145,6 @@ fn handle_event_distribution( Ok(()) }, None => { - // TODO skunert We should replace the stream at this point - tracing::error!(target: LOG_TARGET, "The subscription was closed"); return Err("Subscription closed".to_string()); }, Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), @@ -172,7 +171,6 @@ async fn find_next_client( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url.clone()).await { - tracing::info!(target: LOG_TARGET, ?url, "Successfully switched."); return Ok((index, Arc::new(ws_client))); }; } @@ -247,13 +245,13 @@ impl RpcStreamWorker { /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. pub async fn run(mut self) { + let mut pending_requests = FuturesUnordered::new(); + let Ok(mut client_manager) = ClientManager::new(self.ws_urls.clone()).await else { tracing::error!(target: LOG_TARGET, "No valid RPC url found. Stopping RPC worker."); return; }; - let active_client = client_manager.get_client(); - let mut pending_requests = FuturesUnordered::new(); let Ok(mut subscriptions) = get_subscriptions(&active_client).await else { return; @@ -273,6 +271,10 @@ impl RpcStreamWorker { if let Ok(new_subscriptions) = get_subscriptions(&active_client).await { subscriptions = new_subscriptions; } else { + tracing::error!( + target: LOG_TARGET, + "Not able to create streams from newly connected RPC server, shutting down." + ); return; }; @@ -299,11 +301,13 @@ impl RpcStreamWorker { return; } }, - should_retry = pending_requests.next() => { + should_retry = pending_requests.next(), if pending_requests.len() > 0 => { if let Some(Err(req)) = should_retry { reset_streams = true; - tracing::info!(target: LOG_TARGET, ?req, "Retrying request"); - self.self_sender.send(req).await.expect("This should work"); + if let Err(e) = self.self_sender.send(req).await { + tracing::error!(target: LOG_TARGET, ?e, "Unable to retry request, channel closed."); + return; + }; } }, import_event = subscriptions.import_subscription.next() => { diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 6e865f81a3b..4c7d4cac5a2 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -146,13 +146,13 @@ impl RelayChainRpcClient { ) -> Result where R: DeserializeOwned + std::fmt::Debug, - OR: Fn(&jsonrpsee::core::Error), + OR: Fn(&RelayChainError), { retry_notify( self.retry_strategy.clone(), || async { self.ws_client.request(method, params.clone()).await.map_err(|err| match err { - JsonRpseeError::Transport(_) => + RelayChainError::JsonRpcError(JsonRpseeError::Transport(_)) => backoff::Error::Transient { err, retry_after: None }, _ => backoff::Error::Permanent(err), }) @@ -162,7 +162,7 @@ impl RelayChainRpcClient { .await .map_err(|err| { trace_error(&err); - RelayChainError::RpcCallError(method.to_string(), err)}) + RelayChainError::RpcCallError(method.to_string())}) } /// Returns information regarding the current epoch. From 3df52c33a51065086f826a5955d6616764111795 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 10:01:55 +0100 Subject: [PATCH 10/36] Remove retry from rpc-client --- client/relay-chain-rpc-interface/Cargo.toml | 1 - .../src/rpc_client.rs | 28 ++++--------------- 2 files changed, 5 insertions(+), 24 deletions(-) diff --git a/client/relay-chain-rpc-interface/Cargo.toml b/client/relay-chain-rpc-interface/Cargo.toml index 456473e7352..8bdd976dc05 100644 --- a/client/relay-chain-rpc-interface/Cargo.toml +++ b/client/relay-chain-rpc-interface/Cargo.toml @@ -29,5 +29,4 @@ jsonrpsee = { version = "0.15.1", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.58" url = "2.3.1" -backoff = { version = "0.4.0", features = ["tokio"] } serde_json = "1.0.87" diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 4c7d4cac5a2..d98f3aba4b7 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -15,7 +15,6 @@ // along with Cumulus. If not, see . use crate::resilient_ws_client::PooledClient; -use backoff::{future::retry_notify, ExponentialBackoff}; use cumulus_primitives_core::{ relay_chain::{ v2::{ @@ -30,7 +29,7 @@ use cumulus_primitives_core::{ }; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::channel::mpsc::Receiver; -use jsonrpsee::core::{Error as JsonRpseeError, JsonValue}; +use jsonrpsee::core::JsonValue; use parity_scale_codec::{Decode, Encode}; use polkadot_service::{BlockNumber, TaskManager}; use sc_client_api::StorageData; @@ -50,9 +49,6 @@ const LOG_TARGET: &str = "relay-chain-rpc-client"; pub struct RelayChainRpcClient { /// Websocket client to make calls ws_client: Arc, - - /// Retry strategy that should be used for requests and subscriptions - retry_strategy: ExponentialBackoff, } macro_rules! rpc_params { @@ -84,10 +80,7 @@ pub async fn create_client_and_start_worker( impl RelayChainRpcClient { /// Initialize new RPC Client. async fn new(ws_client: PooledClient) -> RelayChainResult { - let client = RelayChainRpcClient { - ws_client: Arc::new(ws_client), - retry_strategy: ExponentialBackoff::default(), - }; + let client = RelayChainRpcClient { ws_client: Arc::new(ws_client) }; Ok(client) } @@ -148,21 +141,10 @@ impl RelayChainRpcClient { R: DeserializeOwned + std::fmt::Debug, OR: Fn(&RelayChainError), { - retry_notify( - self.retry_strategy.clone(), - || async { - self.ws_client.request(method, params.clone()).await.map_err(|err| match err { - RelayChainError::JsonRpcError(JsonRpseeError::Transport(_)) => - backoff::Error::Transient { err, retry_after: None }, - _ => backoff::Error::Permanent(err), - }) - }, - |error, dur| tracing::trace!(target: LOG_TARGET, %error, ?dur, "Encountered transport error, retrying."), - ) - .await - .map_err(|err| { + self.ws_client.request(method, params.clone()).await.map_err(|err| { trace_error(&err); - RelayChainError::RpcCallError(method.to_string())}) + RelayChainError::RpcCallError(method.to_string()) + }) } /// Returns information regarding the current epoch. From 3274c8d464eada22f33d9d0b48f0144f347aee3d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 10:41:29 +0100 Subject: [PATCH 11/36] Improve naming --- client/relay-chain-rpc-interface/src/lib.rs | 2 +- ...ws_client.rs => reconnecting_ws_client.rs} | 82 ++++++++++++------- .../src/rpc_client.rs | 8 +- 3 files changed, 57 insertions(+), 35 deletions(-) rename client/relay-chain-rpc-interface/src/{resilient_ws_client.rs => reconnecting_ws_client.rs} (87%) diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index 272a6a8a722..c85a142aa64 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -34,7 +34,7 @@ use std::pin::Pin; pub use url::Url; -mod resilient_ws_client; +mod reconnecting_ws_client; mod rpc_client; pub use rpc_client::{create_client_and_start_worker, RelayChainRpcClient}; diff --git a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs similarity index 87% rename from client/relay-chain-rpc-interface/src/resilient_ws_client.rs rename to client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index e5e232983c2..b6227eb2e28 100644 --- a/client/relay-chain-rpc-interface/src/resilient_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -24,16 +24,27 @@ use url::Url; const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; const LOG_TARGET: &str = "pooled-rpc-client"; -pub struct PooledClient { +/// Messages for communication between [`ReconnectingWsClient`] and [`ReconnectingWebsocketWorker`]. +#[derive(Debug)] +pub enum RpcDispatcherMessage { + RegisterBestHeadListener(Sender), + RegisterImportListener(Sender), + RegisterFinalizationListener(Sender), + Request(String, Option>, OneshotSender>), +} + +/// Frontend for performing websocket requests. +/// Requests and stream requests are forwarded to [`ReconnectingWebsocketWorker`]. +pub struct ReconnectingWsClient { /// Channel to communicate with the RPC worker to_worker_channel: TokioSender, } -impl PooledClient { +impl ReconnectingWsClient { pub async fn new(urls: Vec, task_manager: &mut TaskManager) -> RelayChainResult { tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); - let (worker, sender) = RpcStreamWorker::new(urls).await; + let (worker, sender) = ReconnectingWebsocketWorker::new(urls).await; task_manager .spawn_essential_handle() @@ -43,9 +54,8 @@ impl PooledClient { } } -impl PooledClient {} - -impl PooledClient { +impl ReconnectingWsClient { + /// Perform a request via websocket connection. pub async fn request( &self, method: &str, @@ -94,17 +104,8 @@ impl PooledClient { } } -/// Worker messages to register new notification listeners -#[derive(Debug)] -pub enum RpcDispatcherMessage { - RegisterBestHeadListener(Sender), - RegisterImportListener(Sender), - RegisterFinalizationListener(Sender), - Request(String, Option>, OneshotSender>), -} - /// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. -struct RpcStreamWorker { +struct ReconnectingWebsocketWorker { ws_urls: Vec, // Communication channel with the RPC client client_receiver: TokioReceiver, @@ -163,7 +164,7 @@ struct RelayChainSubscriptions { best_subscription: Subscription, } -async fn find_next_client( +async fn connect_next_available_rpc_server( urls: &Vec, starting_position: usize, ) -> Result<(usize, Arc), ()> { @@ -204,7 +205,7 @@ impl ClientManager { if urls.is_empty() { return Err(()); } - let active_client = find_next_client(&urls, 0).await?; + let active_client = connect_next_available_rpc_server(&urls, 0).await?; Ok(Self { urls, active_client }) } @@ -214,7 +215,9 @@ impl ClientManager { is_connected = self.active_client.1.is_connected(), "Trying to find new RPC server." ); - let new_active = find_next_client(&self.urls, self.active_client.0 + 1).await?; + + let new_active = + connect_next_available_rpc_server(&self.urls, self.active_client.0 + 1).await?; self.active_client = new_active; Ok(self.active_client.1.clone()) } @@ -224,11 +227,13 @@ impl ClientManager { } } -impl RpcStreamWorker { +impl ReconnectingWebsocketWorker { /// Create new worker. Returns the worker and a channel to register new listeners. - async fn new(urls: Vec) -> (RpcStreamWorker, TokioSender) { + async fn new( + urls: Vec, + ) -> (ReconnectingWebsocketWorker, TokioSender) { let (tx, rx) = tokio_channel(100); - let worker = RpcStreamWorker { + let worker = ReconnectingWebsocketWorker { ws_urls: urls, client_receiver: rx, self_sender: tx.clone(), @@ -257,9 +262,16 @@ impl RpcStreamWorker { return; }; - let mut reset_streams = false; + let mut should_reconnect = false; loop { - if reset_streams { + if should_reconnect { + let mut requests_to_retry: Vec = Vec::new(); + while pending_requests.len() > 0 { + if let Some(Err(req)) = pending_requests.next().await { + requests_to_retry.push(req); + } + } + let Ok(active_client) = client_manager.connect_to_new_rpc_server().await else { tracing::error!( target: LOG_TARGET, @@ -278,7 +290,17 @@ impl RpcStreamWorker { return; }; - reset_streams = false; + requests_to_retry.into_iter().for_each(|req| { + if let RpcDispatcherMessage::Request(method, params, response_sender) = req { + pending_requests.push(create_request( + active_client.clone(), + method, + params, + response_sender, + )) + } + }); + should_reconnect = false; }; tokio::select! { @@ -293,7 +315,7 @@ impl RpcStreamWorker { self.finalized_header_listeners.push(tx) }, Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { - let client = client_manager.get_client(); + let client = active_client.clone(); pending_requests.push(create_request(client, method, params, response_sender)); }, None => { @@ -303,7 +325,7 @@ impl RpcStreamWorker { }, should_retry = pending_requests.next(), if pending_requests.len() > 0 => { if let Some(Err(req)) = should_retry { - reset_streams = true; + should_reconnect = true; if let Err(e) = self.self_sender.send(req).await { tracing::error!(target: LOG_TARGET, ?e, "Unable to retry request, channel closed."); return; @@ -313,19 +335,19 @@ impl RpcStreamWorker { import_event = subscriptions.import_subscription.next() => { if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification."); - reset_streams = true; + should_reconnect = true; } }, best_header_event = subscriptions.best_subscription.next() => { if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification."); - reset_streams = true; + should_reconnect = true; } } finalized_event = subscriptions.finalized_subscription.next() => { if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification."); - reset_streams = true; + should_reconnect = true; } } } diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index d98f3aba4b7..bb13e6aec8c 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::resilient_ws_client::PooledClient; +use crate::reconnecting_ws_client::ReconnectingWsClient; use cumulus_primitives_core::{ relay_chain::{ v2::{ @@ -48,7 +48,7 @@ const LOG_TARGET: &str = "relay-chain-rpc-client"; #[derive(Clone)] pub struct RelayChainRpcClient { /// Websocket client to make calls - ws_client: Arc, + ws_client: Arc, } macro_rules! rpc_params { @@ -70,7 +70,7 @@ pub async fn create_client_and_start_worker( urls: Vec, task_manager: &mut TaskManager, ) -> RelayChainResult { - let ws_client = PooledClient::new(urls, task_manager).await?; + let ws_client = ReconnectingWsClient::new(urls, task_manager).await?; let client = RelayChainRpcClient::new(ws_client).await?; @@ -79,7 +79,7 @@ pub async fn create_client_and_start_worker( impl RelayChainRpcClient { /// Initialize new RPC Client. - async fn new(ws_client: PooledClient) -> RelayChainResult { + async fn new(ws_client: ReconnectingWsClient) -> RelayChainResult { let client = RelayChainRpcClient { ws_client: Arc::new(ws_client) }; Ok(client) From 5606d1c7780606551c571fb0e0c897b77f9c4cc8 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 11:31:04 +0100 Subject: [PATCH 12/36] Improve documentation --- .../src/reconnecting_ws_client.rs | 40 +++++++++---------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index b6227eb2e28..96f0ae0e77b 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -245,10 +245,12 @@ impl ReconnectingWebsocketWorker { } /// Run this worker to drive notification streams. - /// The worker does two things: - /// 1. Listen for `NotificationRegisterMessage` and register new listeners for the notification streams - /// 2. Distribute incoming import, best head and finalization notifications to registered listeners. - /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. + /// The worker does the following: + /// - Listen for [`RpcDispatcherMessage`], perform requests and register new listeners for the notification streams + /// - Distribute incoming import, best head and finalization notifications to registered listeners. + /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. + /// - Find a new valid RPC server to connect to in case the websocket connection is terminated. + /// If the worker is not able to connec to an RPC server from the list, the worker shuts down. pub async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); @@ -264,11 +266,18 @@ impl ReconnectingWebsocketWorker { let mut should_reconnect = false; loop { + // This branch is taken if the websocket connection to the current RPC server is closed. if should_reconnect { - let mut requests_to_retry: Vec = Vec::new(); - while pending_requests.len() > 0 { + // At this point, all pending requests will return an error since the + // JsonRpSee background thread is dead. So draining the pending requests should be fast. + while !pending_requests.is_empty() { if let Some(Err(req)) = pending_requests.next().await { - requests_to_retry.push(req); + // Put the failed requests into the queue again, they will be processed + // once we have a new rpc server to connect to. + self.self_sender + .send(req) + .await + .expect("This worker owns the channel; qed"); } } @@ -290,16 +299,6 @@ impl ReconnectingWebsocketWorker { return; }; - requests_to_retry.into_iter().for_each(|req| { - if let RpcDispatcherMessage::Request(method, params, response_sender) = req { - pending_requests.push(create_request( - active_client.clone(), - method, - params, - response_sender, - )) - } - }); should_reconnect = false; }; @@ -323,13 +322,10 @@ impl ReconnectingWebsocketWorker { return; } }, - should_retry = pending_requests.next(), if pending_requests.len() > 0 => { + should_retry = pending_requests.next(), if !pending_requests.is_empty() => { if let Some(Err(req)) = should_retry { + self.self_sender.send(req).await.expect("This worker owns the channel; qed"); should_reconnect = true; - if let Err(e) = self.self_sender.send(req).await { - tracing::error!(target: LOG_TARGET, ?e, "Unable to retry request, channel closed."); - return; - }; } }, import_event = subscriptions.import_subscription.next() => { From 2b518d2c97aa85a312e66fd9b036f4c5dfbda616 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 11:58:14 +0100 Subject: [PATCH 13/36] Improve `ClientManager` abstraction --- Cargo.lock | 16 +- .../src/reconnecting_ws_client.rs | 187 ++++++++---------- 2 files changed, 88 insertions(+), 115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 680260abb91..2eca2760531 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -389,20 +389,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "futures-core", - "getrandom 0.2.3", - "instant", - "pin-project-lite 0.2.9", - "rand 0.8.5", - "tokio", -] - [[package]] name = "backtrace" version = "0.3.64" @@ -1973,6 +1959,7 @@ dependencies = [ "sp-blockchain", "sp-state-machine", "thiserror", + "tokio", ] [[package]] @@ -2023,7 +2010,6 @@ name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" dependencies = [ "async-trait", - "backoff", "cumulus-primitives-core", "cumulus-relay-chain-interface", "futures", diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 96f0ae0e77b..ed59b55c598 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ @@ -16,6 +14,8 @@ use jsonrpsee::{ ws_client::WsClientBuilder, }; use polkadot_service::TaskManager; +use std::future::Future; +use std::sync::Arc; use tokio::sync::mpsc::{ channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, }; @@ -35,12 +35,14 @@ pub enum RpcDispatcherMessage { /// Frontend for performing websocket requests. /// Requests and stream requests are forwarded to [`ReconnectingWebsocketWorker`]. +#[derive(Debug, Clone)] pub struct ReconnectingWsClient { /// Channel to communicate with the RPC worker to_worker_channel: TokioSender, } impl ReconnectingWsClient { + /// Create a new websocket client frontend. pub async fn new(urls: Vec, task_manager: &mut TaskManager) -> RelayChainResult { tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); @@ -109,6 +111,8 @@ struct ReconnectingWebsocketWorker { ws_urls: Vec, // Communication channel with the RPC client client_receiver: TokioReceiver, + + // Communication channel with ourself self_sender: TokioSender, // Senders to distribute incoming header notifications to @@ -123,12 +127,6 @@ fn handle_event_distribution( ) -> Result<(), String> { match event { Some(Ok(header)) => { - tracing::info!( - target: LOG_TARGET, - "Received block via RPC: #{} ({})", - header.number, - header.hash() - ); senders.retain_mut(|e| { match e.try_send(header.clone()) { // Receiver has been dropped, remove Sender from list. @@ -164,10 +162,13 @@ struct RelayChainSubscriptions { best_subscription: Subscription, } +/// Try to find a new RPC server to connect to. async fn connect_next_available_rpc_server( urls: &Vec, starting_position: usize, ) -> Result<(usize, Arc), ()> { + tracing::info!(target: LOG_TARGET, "Connecting to RPC server."); + for (counter, url) in urls.iter().cycle().skip(starting_position).take(urls.len()).enumerate() { let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); @@ -178,28 +179,6 @@ async fn connect_next_available_rpc_server( Err(()) } -async fn create_request( - client: Arc, - method: String, - params: Option>, - response_sender: OneshotSender>, -) -> Result<(), RpcDispatcherMessage> { - let resp = client.request(&method, params.clone().map(|p| p.into())).await; - - if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); - } - - if let Err(err) = response_sender.send(resp) { - tracing::error!( - target: LOG_TARGET, - ?err, - "Recipient no longer interested in request result" - ); - } - return Ok(()); -} - impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { @@ -209,21 +188,82 @@ impl ClientManager { Ok(Self { urls, active_client }) } - pub async fn connect_to_new_rpc_server(&mut self) -> Result, ()> { - tracing::info!( - target: LOG_TARGET, - is_connected = self.active_client.1.is_connected(), - "Trying to find new RPC server." - ); - + pub async fn connect_to_new_rpc_server(&mut self) -> Result<(), ()> { let new_active = connect_next_available_rpc_server(&self.urls, self.active_client.0 + 1).await?; self.active_client = new_active; - Ok(self.active_client.1.clone()) + Ok(()) } - pub fn get_client(&mut self) -> Arc { - return self.active_client.1.clone(); + async fn get_subscriptions(&self) -> Result { + let import_subscription = self + .active_client + .1 + .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + })?; + let best_subscription = self + .active_client + .1 + .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + })?; + let finalized_subscription = self + .active_client + .1 + .subscribe::( + "chain_subscribeFinalizedHeads", + None, + "chain_unsubscribeFinalizedHeads", + ) + .await + .map_err(|e| { + tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + e + })?; + + Ok(RelayChainSubscriptions { + import_subscription, + best_subscription, + finalized_subscription, + }) + } + + /// Create a request future that performs an RPC request and sends the results to the caller. + /// In case of a dead websocket connection, it returns the original request parameters to + /// enable retries. + fn create_request( + &self, + method: String, + params: Option>, + response_sender: OneshotSender>, + ) -> impl Future> { + let future_client = self.active_client.1.clone(); + async move { + let resp = future_client.request(&method, params.clone().map(|p| p.into())).await; + + // We should only return the original request in case + // the websocket connection is dead and requires a restart. + // Other errors should be forwarded to the request caller. + if let Err(JsonRpseeError::RestartNeeded(_)) = resp { + return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + } + + if let Err(err) = response_sender.send(resp) { + tracing::error!( + target: LOG_TARGET, + ?err, + "Recipient no longer interested in request result" + ); + } + return Ok(()); + } } } @@ -251,16 +291,14 @@ impl ReconnectingWebsocketWorker { /// If an error occurs during sending, the receiver has been closed and we remove the sender from the list. /// - Find a new valid RPC server to connect to in case the websocket connection is terminated. /// If the worker is not able to connec to an RPC server from the list, the worker shuts down. - pub async fn run(mut self) { + async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); let Ok(mut client_manager) = ClientManager::new(self.ws_urls.clone()).await else { tracing::error!(target: LOG_TARGET, "No valid RPC url found. Stopping RPC worker."); return; }; - let active_client = client_manager.get_client(); - - let Ok(mut subscriptions) = get_subscriptions(&active_client).await else { + let Ok(mut subscriptions) = client_manager.get_subscriptions().await else { return; }; @@ -269,7 +307,7 @@ impl ReconnectingWebsocketWorker { // This branch is taken if the websocket connection to the current RPC server is closed. if should_reconnect { // At this point, all pending requests will return an error since the - // JsonRpSee background thread is dead. So draining the pending requests should be fast. + // websocket connection is dead. So draining the pending requests should be fast. while !pending_requests.is_empty() { if let Some(Err(req)) = pending_requests.next().await { // Put the failed requests into the queue again, they will be processed @@ -281,7 +319,7 @@ impl ReconnectingWebsocketWorker { } } - let Ok(active_client) = client_manager.connect_to_new_rpc_server().await else { + if let Err(_) = client_manager.connect_to_new_rpc_server().await { tracing::error!( target: LOG_TARGET, "Unable to find valid external RPC server, shutting down." @@ -289,9 +327,7 @@ impl ReconnectingWebsocketWorker { return; }; - if let Ok(new_subscriptions) = get_subscriptions(&active_client).await { - subscriptions = new_subscriptions; - } else { + let Ok(new_subscriptions) = client_manager.get_subscriptions().await else { tracing::error!( target: LOG_TARGET, "Not able to create streams from newly connected RPC server, shutting down." @@ -299,6 +335,7 @@ impl ReconnectingWebsocketWorker { return; }; + subscriptions = new_subscriptions; should_reconnect = false; }; @@ -314,8 +351,7 @@ impl ReconnectingWebsocketWorker { self.finalized_header_listeners.push(tx) }, Some(RpcDispatcherMessage::Request(method, params, response_sender)) => { - let client = active_client.clone(); - pending_requests.push(create_request(client, method, params, response_sender)); + pending_requests.push(client_manager.create_request(method, params, response_sender)); }, None => { tracing::error!(target: LOG_TARGET, "RPC client receiver closed. Stopping RPC Worker."); @@ -350,52 +386,3 @@ impl ReconnectingWebsocketWorker { } } } - -async fn get_imported_header_subscription( - subscription_client: &Arc, -) -> Result, JsonRpseeError> { - subscription_client - .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") - .await - .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); - e - }) -} - -async fn get_best_head_subscription( - subscription_client: &Arc, -) -> Result, JsonRpseeError> { - subscription_client - .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") - .await - .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); - e - }) -} - -async fn get_finalized_head_subscription( - subscription_client: &Arc, -) -> Result, JsonRpseeError> { - subscription_client - .subscribe::( - "chain_subscribeFinalizedHeads", - None, - "chain_unsubscribeFinalizedHeads", - ) - .await - .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); - e - }) -} - -async fn get_subscriptions( - active_client: &Arc, -) -> Result { - let import_subscription = get_imported_header_subscription(&active_client).await?; - let best_subscription = get_best_head_subscription(&active_client).await?; - let finalized_subscription = get_finalized_head_subscription(&active_client).await?; - Ok(RelayChainSubscriptions { import_subscription, best_subscription, finalized_subscription }) -} From 79625ddaf46d45ff323ca859baa850e26416588a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 13:10:11 +0100 Subject: [PATCH 14/36] Adjust zombienet test --- zombienet/tests/0006-rpc_collator_builds_blocks.feature | 3 +++ 1 file changed, 3 insertions(+) diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.feature b/zombienet/tests/0006-rpc_collator_builds_blocks.feature index 558e65f96db..05fa672d8a5 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.feature +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.feature @@ -7,6 +7,7 @@ bob: is up charlie: is up one: is up two: is up +three: is up dave: is up eve: is up @@ -15,3 +16,5 @@ alice: parachain 2000 block height is at least 10 within 250 seconds dave: reports block height is at least 12 within 250 seconds eve: reports block height is at least 12 within 250 seconds +one: pause +dave: reports block height is at least 20 within 200 seconds From 5f6fe6b8e9bbce033cc3d14b2d73ea64f100b007 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 13:17:11 +0100 Subject: [PATCH 15/36] Add more comments --- .../relay-chain-rpc-interface/src/reconnecting_ws_client.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index ed59b55c598..2aef423f46a 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -150,7 +150,10 @@ fn handle_event_distribution( } } -#[derive(Clone, Debug)] +/// Manages the active websocket client. +/// Responsible for creating request futures, subscription streams +/// and reconnections. +#[derive(Debug)] struct ClientManager { urls: Vec, active_client: (usize, Arc), From 98341d7d48c0f11b40fd20b910d6d61e7c310159 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 15 Nov 2022 15:53:43 +0100 Subject: [PATCH 16/36] fmt --- client/relay-chain-interface/src/lib.rs | 4 ++-- .../src/blockchain_rpc_client.rs | 5 ++-- client/relay-chain-rpc-interface/src/lib.rs | 2 +- .../src/reconnecting_ws_client.rs | 23 +++++++++---------- polkadot-parachain/src/service.rs | 5 ++-- 5 files changed, 18 insertions(+), 21 deletions(-) diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 33aa06c8463..0e7d3d37e86 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -92,7 +92,7 @@ impl From> for RelayChainError { return RelayChainError::WorkerCommunicationError(format!( "Unable to send message to RPC worker: {}", e.to_string() - )); + )) } } @@ -101,7 +101,7 @@ impl From for RelayChainError { return RelayChainError::WorkerCommunicationError(format!( "Unexpected channel close on RPC worker side: {}", e.to_string() - )); + )) } } diff --git a/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index b00affb9197..1ed896533a3 100644 --- a/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -349,12 +349,11 @@ impl BlockChainRpcClient { for address in addresses { match MultiaddrWithPeerId::from_str(&address) { Ok(addr) => result_vec.push(addr), - Err(err) => { + Err(err) => return Err(RelayChainError::GenericError(format!( "Failed to parse a local listen addresses from the RPC node: {}", err - ))) - }, + ))), } } diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index c85a142aa64..58e37b4e6a7 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -172,7 +172,7 @@ impl RelayChainInterface for RelayChainRpcInterface { let mut head_stream = self.rpc_client.get_imported_heads_stream()?; if self.rpc_client.chain_get_header(Some(wait_for_hash)).await?.is_some() { - return Ok(()); + return Ok(()) } let mut timeout = futures_timer::Delay::new(Duration::from_secs(TIMEOUT_IN_SECONDS)).fuse(); diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 2aef423f46a..512b9d6c298 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -1,8 +1,10 @@ use cumulus_primitives_core::relay_chain::Header as PHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ - channel::mpsc::{Receiver, Sender}, - channel::oneshot::Sender as OneshotSender, + channel::{ + mpsc::{Receiver, Sender}, + oneshot::Sender as OneshotSender, + }, stream::FuturesUnordered, StreamExt, }; @@ -14,8 +16,7 @@ use jsonrpsee::{ ws_client::WsClientBuilder, }; use polkadot_service::TaskManager; -use std::future::Future; -use std::sync::Arc; +use std::{future::Future, sync::Arc}; use tokio::sync::mpsc::{ channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, }; @@ -143,9 +144,7 @@ fn handle_event_distribution( }); Ok(()) }, - None => { - return Err("Subscription closed".to_string()); - }, + None => return Err("Subscription closed".to_string()), Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), } } @@ -176,7 +175,7 @@ async fn connect_next_available_rpc_server( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url.clone()).await { - return Ok((index, Arc::new(ws_client))); + return Ok((index, Arc::new(ws_client))) }; } Err(()) @@ -185,7 +184,7 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()); + return Err(()) } let active_client = connect_next_available_rpc_server(&urls, 0).await?; Ok(Self { urls, active_client }) @@ -255,7 +254,7 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + return Err(RpcDispatcherMessage::Request(method, params, response_sender)) } if let Err(err) = response_sender.send(resp) { @@ -265,7 +264,7 @@ impl ClientManager { "Recipient no longer interested in request result" ); } - return Ok(()); + return Ok(()) } } } @@ -327,7 +326,7 @@ impl ReconnectingWebsocketWorker { target: LOG_TARGET, "Unable to find valid external RPC server, shutting down." ); - return; + return }; let Ok(new_subscriptions) = client_manager.get_subscriptions().await else { diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index 45ed857799b..08c1b0cf930 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -265,9 +265,8 @@ async fn build_relay_chain_interface( hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { match collator_options.relay_chain_rpc_urls { - Some(relay_chain_url) => { - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await - }, + Some(relay_chain_url) => + build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, None => build_inprocess_relay_chain( polkadot_config, parachain_config, From 8d46204c0b22d37e8acd46ed2e36d2bf398a13b3 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 21 Nov 2022 09:55:23 +0100 Subject: [PATCH 17/36] Apply reviewers comments --- client/cli/src/lib.rs | 8 +- client/relay-chain-interface/src/lib.rs | 8 +- .../src/collator_overseer.rs | 6 +- .../src/reconnecting_ws_client.rs | 68 +++++---- .../src/rpc_client.rs | 9 +- parachain-template/node/src/command.rs | 17 ++- parachain-template/node/src/service.rs | 15 +- polkadot-parachain/src/command.rs | 132 +++++++++++------- polkadot-parachain/src/service.rs | 15 +- test/service/src/lib.rs | 20 +-- 10 files changed, 178 insertions(+), 120 deletions(-) diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index eb8989ca9f3..c3c3c4cb4c3 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -97,7 +97,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()) + return Ok(()); }, } } @@ -294,9 +294,9 @@ pub struct RunCmd { #[arg( long, value_parser = validate_relay_chain_url, - num_args = 1.. + num_args = 0.. )] - pub relay_chain_rpc_urls: Option>, + pub relay_chain_rpc_urls: Vec, } impl RunCmd { @@ -319,7 +319,7 @@ impl RunCmd { #[derive(Clone, Debug)] pub struct CollatorOptions { /// Location of relay chain full node - pub relay_chain_rpc_urls: Option>, + pub relay_chain_rpc_urls: Vec, } /// A non-redundant version of the `RunCmd` that sets the `validator` field when the diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 0e7d3d37e86..36500e51252 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -89,18 +89,18 @@ impl From for sp_blockchain::Error { impl From> for RelayChainError { fn from(e: tokio::sync::mpsc::error::SendError) -> Self { - return RelayChainError::WorkerCommunicationError(format!( + RelayChainError::WorkerCommunicationError(format!( "Unable to send message to RPC worker: {}", - e.to_string() + e )) } } impl From for RelayChainError { fn from(e: futures::channel::oneshot::Canceled) -> Self { - return RelayChainError::WorkerCommunicationError(format!( + RelayChainError::WorkerCommunicationError(format!( "Unexpected channel close on RPC worker side: {}", - e.to_string() + e )) } } diff --git a/client/relay-chain-minimal-node/src/collator_overseer.rs b/client/relay-chain-minimal-node/src/collator_overseer.rs index b9b599a3a64..0280e8cc283 100644 --- a/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -252,7 +252,8 @@ async fn forward_collator_events( f = finality.next() => { match f { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received finalized block via RPC: #{} ({} -> {})", header.number, header.parent_hash, header.hash()); + tracing::info!(target: "minimal-polkadot-node", "Received finalized block via RPC: #{} ({} -> {})", + header.number, header.parent_hash, header.hash()); let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_finalized(block_info).await; } @@ -262,7 +263,8 @@ async fn forward_collator_events( i = imports.next() => { match i { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received imported block via RPC: #{} ({} -> {})", header.number, header.parent_hash, header.hash()); + tracing::info!(target: "minimal-polkadot-node", "Received imported block via RPC: #{} ({} -> {})", + header.number, header.parent_hash, header.hash()); let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_imported(block_info).await; } diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 512b9d6c298..c93208e027b 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -109,14 +109,14 @@ impl ReconnectingWsClient { /// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. struct ReconnectingWebsocketWorker { - ws_urls: Vec, - // Communication channel with the RPC client + ws_urls: Option>, + /// Communication channel with the RPC client client_receiver: TokioReceiver, - // Communication channel with ourself + /// Communication channel with ourself self_sender: TokioSender, - // Senders to distribute incoming header notifications to + /// Senders to distribute incoming header notifications to imported_header_listeners: Vec>, finalized_header_listeners: Vec>, best_header_listeners: Vec>, @@ -144,7 +144,7 @@ fn handle_event_distribution( }); Ok(()) }, - None => return Err("Subscription closed".to_string()), + None => Err("Subscription closed".to_string()), Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), } } @@ -155,7 +155,8 @@ fn handle_event_distribution( #[derive(Debug)] struct ClientManager { urls: Vec, - active_client: (usize, Arc), + active_client: Arc, + active_index: usize, } struct RelayChainSubscriptions { @@ -174,8 +175,8 @@ async fn connect_next_available_rpc_server( for (counter, url) in urls.iter().cycle().skip(starting_position).take(urls.len()).enumerate() { let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); - if let Ok(ws_client) = WsClientBuilder::default().build(url.clone()).await { - return Ok((index, Arc::new(ws_client))) + if let Ok(ws_client) = WsClientBuilder::default().build(url).await { + return Ok((index, Arc::new(ws_client))); }; } Err(()) @@ -184,23 +185,23 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()) + return Err(()); } let active_client = connect_next_available_rpc_server(&urls, 0).await?; - Ok(Self { urls, active_client }) + Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) } pub async fn connect_to_new_rpc_server(&mut self) -> Result<(), ()> { let new_active = - connect_next_available_rpc_server(&self.urls, self.active_client.0 + 1).await?; - self.active_client = new_active; + connect_next_available_rpc_server(&self.urls, self.active_index + 1).await?; + self.active_client = new_active.1; + self.active_index = new_active.0; Ok(()) } async fn get_subscriptions(&self) -> Result { let import_subscription = self .active_client - .1 .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") .await .map_err(|e| { @@ -209,7 +210,6 @@ impl ClientManager { })?; let best_subscription = self .active_client - .1 .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") .await .map_err(|e| { @@ -218,7 +218,6 @@ impl ClientManager { })?; let finalized_subscription = self .active_client - .1 .subscribe::( "chain_subscribeFinalizedHeads", None, @@ -246,7 +245,7 @@ impl ClientManager { params: Option>, response_sender: OneshotSender>, ) -> impl Future> { - let future_client = self.active_client.1.clone(); + let future_client = self.active_client.clone(); async move { let resp = future_client.request(&method, params.clone().map(|p| p.into())).await; @@ -254,17 +253,17 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)) + return Err(RpcDispatcherMessage::Request(method, params, response_sender)); } if let Err(err) = response_sender.send(resp) { - tracing::error!( + tracing::debug!( target: LOG_TARGET, ?err, "Recipient no longer interested in request result" ); } - return Ok(()) + Ok(()) } } } @@ -276,7 +275,7 @@ impl ReconnectingWebsocketWorker { ) -> (ReconnectingWebsocketWorker, TokioSender) { let (tx, rx) = tokio_channel(100); let worker = ReconnectingWebsocketWorker { - ws_urls: urls, + ws_urls: Some(urls), client_receiver: rx, self_sender: tx.clone(), imported_header_listeners: Vec::new(), @@ -296,11 +295,13 @@ impl ReconnectingWebsocketWorker { async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); - let Ok(mut client_manager) = ClientManager::new(self.ws_urls.clone()).await else { + let urls = self.ws_urls.take().expect("Constructor requires passing of urls; qed"); + let Ok(mut client_manager) = ClientManager::new(urls).await else { tracing::error!(target: LOG_TARGET, "No valid RPC url found. Stopping RPC worker."); return; }; let Ok(mut subscriptions) = client_manager.get_subscriptions().await else { + tracing::error!(target: LOG_TARGET, "Not able to fetch subscription after reconnect."); return; }; @@ -314,19 +315,23 @@ impl ReconnectingWebsocketWorker { if let Some(Err(req)) = pending_requests.next().await { // Put the failed requests into the queue again, they will be processed // once we have a new rpc server to connect to. - self.self_sender - .send(req) - .await - .expect("This worker owns the channel; qed"); + if let Err(err) = self.self_sender.try_send(req) { + tracing::error!( + target: LOG_TARGET, + ?err, + "Unable to retry requests, queue is unexpectedly full." + ); + return; + }; } } - if let Err(_) = client_manager.connect_to_new_rpc_server().await { + if client_manager.connect_to_new_rpc_server().await.is_err() { tracing::error!( target: LOG_TARGET, "Unable to find valid external RPC server, shutting down." ); - return + return; }; let Ok(new_subscriptions) = client_manager.get_subscriptions().await else { @@ -362,7 +367,14 @@ impl ReconnectingWebsocketWorker { }, should_retry = pending_requests.next(), if !pending_requests.is_empty() => { if let Some(Err(req)) = should_retry { - self.self_sender.send(req).await.expect("This worker owns the channel; qed"); + if let Err(err) = self.self_sender.try_send(req) { + tracing::error!( + target: LOG_TARGET, + ?err, + "Unable to retry requests, queue is unexpectedly full." + ); + return; + }; should_reconnect = true; } }, diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index bb13e6aec8c..79236ae0fde 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -54,10 +54,11 @@ pub struct RelayChainRpcClient { macro_rules! rpc_params { ($($param:expr),*) => { { - let mut __params = vec![]; - $( - __params.push(serde_json::to_value($param).expect("json serialization is infallible; qed.")); - )* + let mut __params = vec![ + $( + serde_json::to_value($param).expect("json serialization is infallible; qed."), + )* + ]; Some(__params) } }; diff --git a/parachain-template/node/src/command.rs b/parachain-template/node/src/command.rs index 3f3c0e51276..3cf5622a018 100644 --- a/parachain-template/node/src/command.rs +++ b/parachain-template/node/src/command.rs @@ -192,26 +192,28 @@ pub fn run() -> Result<()> { let runner = cli.create_runner(cmd)?; // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => + BenchmarkCmd::Pallet(cmd) => { if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into()) - }, + } + }, BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => + BenchmarkCmd::Storage(_) => { return Err(sc_cli::Error::Input( "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." .into(), ) - .into()), + .into()) + }, #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; @@ -219,8 +221,9 @@ pub fn run() -> Result<()> { let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) }), - BenchmarkCmd::Machine(cmd) => - runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), + BenchmarkCmd::Machine(cmd) => { + runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())) + }, // NOTE: this allows the Client to leniently implement // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] @@ -288,7 +291,7 @@ pub fn run() -> Result<()> { info!("Parachain genesis state: {}", genesis_state); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - if collator_options.relay_chain_rpc_urls.is_some() && cli.relay_chain_args.len() > 0 { + if !collator_options.relay_chain_rpc_urls.is_empty() && cli.relay_chain_args.len() > 0 { warn!("Detected relay chain node arguments together with --relay-chain-rpc-url. This command starts a minimal Polkadot node that only uses a network-related subset of all relay chain CLI options."); } diff --git a/parachain-template/node/src/service.rs b/parachain-template/node/src/service.rs index 759db03b6ef..dfb29ded9a5 100644 --- a/parachain-template/node/src/service.rs +++ b/parachain-template/node/src/service.rs @@ -143,16 +143,21 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - match collator_options.relay_chain_rpc_urls { - Some(relay_chain_url) => - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, - None => build_inprocess_relay_chain( + if !collator_options.relay_chain_rpc_urls.is_empty() { + build_minimal_relay_chain_node( + polkadot_config, + task_manager, + collator_options.relay_chain_rpc_urls, + ) + .await + } else { + build_inprocess_relay_chain( polkadot_config, parachain_config, telemetry_worker_handle, task_manager, hwbench, - ), + ) } } diff --git a/polkadot-parachain/src/command.rs b/polkadot-parachain/src/command.rs index 7e7547131ad..23a3fa88a23 100644 --- a/polkadot-parachain/src/command.rs +++ b/polkadot-parachain/src/command.rs @@ -114,20 +114,24 @@ fn load_spec(id: &str) -> std::result::Result, String> { let (id, _, para_id) = extract_parachain_id(id); Ok(match id { // - Defaul-like - "staging" => - Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), - "tick" => + "staging" => { + Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()) + }, + "tick" => { Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/tick.json")[..], - )?), - "trick" => + )?) + }, + "trick" => { Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/trick.json")[..], - )?), - "track" => + )?) + }, + "track" => { Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/track.json")[..], - )?), + )?) + }, // -- Starters "shell" => Box::new(chain_spec::shell::get_shell_chain_spec()), @@ -164,29 +168,36 @@ fn load_spec(id: &str) -> std::result::Result, String> { )?), // -- Polkadot Collectives - "collectives-polkadot-dev" => - Box::new(chain_spec::collectives::collectives_polkadot_development_config()), - "collectives-polkadot-local" => - Box::new(chain_spec::collectives::collectives_polkadot_local_config()), - "collectives-polkadot" => + "collectives-polkadot-dev" => { + Box::new(chain_spec::collectives::collectives_polkadot_development_config()) + }, + "collectives-polkadot-local" => { + Box::new(chain_spec::collectives::collectives_polkadot_local_config()) + }, + "collectives-polkadot" => { Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/collectives-polkadot.json")[..], - )?), - "collectives-westend" => + )?) + }, + "collectives-westend" => { Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/collectives-westend.json")[..], - )?), + )?) + }, // -- Contracts on Rococo - "contracts-rococo-dev" => - Box::new(chain_spec::contracts::contracts_rococo_development_config()), - "contracts-rococo-local" => - Box::new(chain_spec::contracts::contracts_rococo_local_config()), + "contracts-rococo-dev" => { + Box::new(chain_spec::contracts::contracts_rococo_development_config()) + }, + "contracts-rococo-local" => { + Box::new(chain_spec::contracts::contracts_rococo_local_config()) + }, "contracts-rococo-genesis" => Box::new(chain_spec::contracts::contracts_rococo_config()), - "contracts-rococo" => + "contracts-rococo" => { Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/contracts-rococo.json")[..], - )?), + )?) + }, // -- Penpall "penpal-kusama" => Box::new(chain_spec::penpal::get_penpal_chain_spec( @@ -208,23 +219,30 @@ fn load_spec(id: &str) -> std::result::Result, String> { path => { let path: PathBuf = path.into(); match path.runtime() { - Runtime::Statemint => - Box::new(chain_spec::statemint::StatemintChainSpec::from_json_file(path)?), - Runtime::Statemine => - Box::new(chain_spec::statemint::StatemineChainSpec::from_json_file(path)?), - Runtime::Westmint => - Box::new(chain_spec::statemint::WestmintChainSpec::from_json_file(path)?), + Runtime::Statemint => { + Box::new(chain_spec::statemint::StatemintChainSpec::from_json_file(path)?) + }, + Runtime::Statemine => { + Box::new(chain_spec::statemint::StatemineChainSpec::from_json_file(path)?) + }, + Runtime::Westmint => { + Box::new(chain_spec::statemint::WestmintChainSpec::from_json_file(path)?) + }, Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, ), - Runtime::Shell => - Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), - Runtime::Seedling => - Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?), - Runtime::ContractsRococo => - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?), - Runtime::Penpal(_para_id) => - Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), + Runtime::Shell => { + Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?) + }, + Runtime::Seedling => { + Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?) + }, + Runtime::ContractsRococo => { + Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?) + }, + Runtime::Penpal(_para_id) => { + Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?) + }, Runtime::Default => Box::new( chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_file(path)?, ), @@ -295,8 +313,9 @@ impl SubstrateCli for Cli { Runtime::Statemint => &statemint_runtime::VERSION, Runtime::Statemine => &statemine_runtime::VERSION, Runtime::Westmint => &westmint_runtime::VERSION, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => - &collectives_polkadot_runtime::VERSION, + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + &collectives_polkadot_runtime::VERSION + }, Runtime::Shell => &shell_runtime::VERSION, Runtime::Seedling => &seedling_runtime::VERSION, Runtime::ContractsRococo => &contracts_rococo_runtime::VERSION, @@ -545,16 +564,19 @@ pub fn run() -> Result<()> { // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => + BenchmarkCmd::Pallet(cmd) => { if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| match config.chain_spec.runtime() { - Runtime::Statemine => - cmd.run::(config), + Runtime::Statemine => { + cmd.run::(config) + }, Runtime::Westmint => cmd.run::(config), - Runtime::Statemint => - cmd.run::(config), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => - cmd.run::(config), + Runtime::Statemint => { + cmd.run::(config) + }, + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + cmd.run::(config) + }, _ => Err(format!( "Chain '{:?}' doesn't support benchmarking", config.chain_spec.runtime() @@ -565,18 +587,20 @@ pub fn run() -> Result<()> { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into()) - }, + } + }, BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { construct_benchmark_partials!(config, |partials| cmd.run(partials.client)) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => + BenchmarkCmd::Storage(_) => { return Err(sc_cli::Error::Input( "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." .into(), ) - .into()), + .into()) + }, #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { construct_benchmark_partials!(config, |partials| { @@ -586,8 +610,9 @@ pub fn run() -> Result<()> { cmd.run(config, partials.client.clone(), db, storage) }) }), - BenchmarkCmd::Machine(cmd) => - runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), + BenchmarkCmd::Machine(cmd) => { + runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())) + }, // NOTE: this allows the Client to leniently implement // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] @@ -613,13 +638,14 @@ pub fn run() -> Result<()> { Runtime::Statemint => runner.async_run(|config| { Ok((cmd.run::(config), task_manager)) }), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { runner.async_run(|config| { Ok(( cmd.run::(config), task_manager, )) - }), + }) + }, Runtime::Shell => runner.async_run(|config| { Ok(( cmd.run::(config), @@ -679,7 +705,7 @@ pub fn run() -> Result<()> { info!("Parachain genesis state: {}", genesis_state); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - if collator_options.relay_chain_rpc_urls.is_some() && cli.relaychain_args.len() > 0 { + if !collator_options.relay_chain_rpc_urls.is_empty() && cli.relaychain_args.len() > 0 { warn!("Detected relay chain node arguments together with --relay-chain-rpc-url. This command starts a minimal Polkadot node that only uses a network-related subset of all relay chain CLI options."); } diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index 08c1b0cf930..1bd36e9e27d 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -264,16 +264,21 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, hwbench: Option, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - match collator_options.relay_chain_rpc_urls { - Some(relay_chain_url) => - build_minimal_relay_chain_node(polkadot_config, task_manager, relay_chain_url).await, - None => build_inprocess_relay_chain( + if !collator_options.relay_chain_rpc_urls.is_empty() { + build_minimal_relay_chain_node( + polkadot_config, + task_manager, + collator_options.relay_chain_rpc_urls, + ) + .await + } else { + build_inprocess_relay_chain( polkadot_config, parachain_config, telemetry_worker_handle, task_manager, hwbench, - ), + ) } } diff --git a/test/service/src/lib.rs b/test/service/src/lib.rs index 3903fa1f783..6d3b6a4d2dd 100644 --- a/test/service/src/lib.rs +++ b/test/service/src/lib.rs @@ -189,10 +189,14 @@ async fn build_relay_chain_interface( collator_options: CollatorOptions, task_manager: &mut TaskManager, ) -> RelayChainResult> { - if let Some(relay_chain_url) = collator_options.relay_chain_rpc_urls { - return build_minimal_relay_chain_node(relay_chain_config, task_manager, relay_chain_url) - .await - .map(|r| r.0) + if !collator_options.relay_chain_rpc_urls.is_empty() { + return build_minimal_relay_chain_node( + relay_chain_config, + task_manager, + collator_options.relay_chain_rpc_urls, + ) + .await + .map(|r| r.0); } let relay_chain_full_node = polkadot_test_service::new_full( @@ -427,7 +431,7 @@ pub struct TestNodeBuilder { storage_update_func_parachain: Option>, storage_update_func_relay_chain: Option>, consensus: Consensus, - relay_chain_full_node_url: Option>, + relay_chain_full_node_url: Vec, } impl TestNodeBuilder { @@ -449,7 +453,7 @@ impl TestNodeBuilder { storage_update_func_parachain: None, storage_update_func_relay_chain: None, consensus: Consensus::RelayChain, - relay_chain_full_node_url: None, + relay_chain_full_node_url: vec![], } } @@ -543,7 +547,7 @@ impl TestNodeBuilder { /// Connect to full node via RPC. pub fn use_external_relay_chain_node_at_url(mut self, network_address: Url) -> Self { - self.relay_chain_full_node_url = Some(vec![network_address]); + self.relay_chain_full_node_url = vec![network_address]; self } @@ -552,7 +556,7 @@ impl TestNodeBuilder { let mut localhost_url = Url::parse("ws://localhost").expect("Should be able to parse localhost Url"); localhost_url.set_port(Some(port)).expect("Should be able to set port"); - self.relay_chain_full_node_url = Some(vec![localhost_url]); + self.relay_chain_full_node_url = vec![localhost_url]; self } From 7225bb7113b874ad59c4e7025021221d807ee63d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 22 Nov 2022 12:19:25 +0100 Subject: [PATCH 18/36] Extract reconnection to extra method --- .../src/reconnecting_ws_client.rs | 105 ++++++++++++------ 1 file changed, 68 insertions(+), 37 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index c93208e027b..408b70e5f6d 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -18,12 +18,12 @@ use jsonrpsee::{ use polkadot_service::TaskManager; use std::{future::Future, sync::Arc}; use tokio::sync::mpsc::{ - channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, + channel as tokio_channel, error::TryRecvError, Receiver as TokioReceiver, Sender as TokioSender, }; use url::Url; const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; -const LOG_TARGET: &str = "pooled-rpc-client"; +const LOG_TARGET: &str = "reconnecting-websocket-client"; /// Messages for communication between [`ReconnectingWsClient`] and [`ReconnectingWebsocketWorker`]. #[derive(Debug)] @@ -45,7 +45,7 @@ pub struct ReconnectingWsClient { impl ReconnectingWsClient { /// Create a new websocket client frontend. pub async fn new(urls: Vec, task_manager: &mut TaskManager) -> RelayChainResult { - tracing::info!(target: "pooled-client", "Instantiating pooled websocket client"); + tracing::info!(target: LOG_TARGET, "Instantiating reconnecting websocket client"); let (worker, sender) = ReconnectingWebsocketWorker::new(urls).await; @@ -187,7 +187,7 @@ impl ClientManager { if urls.is_empty() { return Err(()); } - let active_client = connect_next_available_rpc_server(&urls, 0).await?; + let active_client = connect_next_available_rpc_server(&urls, 1).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) } @@ -285,6 +285,58 @@ impl ReconnectingWebsocketWorker { (worker, tx) } + /// Reconnect via [`ClientManager`] and provide new notification streams. + async fn handle_reconnect( + &mut self, + client_manager: &mut ClientManager, + pending_requests: &mut FuturesUnordered< + impl Future>, + >, + ) -> Result { + let mut tmp_request_storage = Vec::new(); + loop { + match self.client_receiver.try_recv() { + Ok(val) => tmp_request_storage.push(val), + Err(TryRecvError::Empty) => break, + Err(_) => { + return Err("Can not fetch values from client receiver channel.".to_string()) + }, + } + } + + // At this point, all pending requests will return an error since the + // websocket connection is dead. So draining the pending requests should be fast. + while !pending_requests.is_empty() { + if let Some(Err(req)) = pending_requests.next().await { + // Put the failed requests into the queue again, they will be processed + // once we have a new rpc server to connect to. + if let Err(err) = self.self_sender.try_send(req) { + return Err(format!( + "Unable to retry requests, queue is unexpectedly full. err: {:?}", + err + )); + }; + } + } + + for item in tmp_request_storage.into_iter() { + if let Err(err) = self.self_sender.try_send(item) { + return Err(format!( + "Unable to retry requests, queue is unexpectedly full. err: {:?}", + err + )); + }; + } + + if client_manager.connect_to_new_rpc_server().await.is_err() { + return Err(format!("Unable to find valid external RPC server, shutting down.")); + }; + + client_manager.get_subscriptions().await.map_err(|e| { + format!("Not able to create streams from newly connected RPC server, shutting down. err: {:?}", e) + }) + } + /// Run this worker to drive notification streams. /// The worker does the following: /// - Listen for [`RpcDispatcherMessage`], perform requests and register new listeners for the notification streams @@ -309,40 +361,19 @@ impl ReconnectingWebsocketWorker { loop { // This branch is taken if the websocket connection to the current RPC server is closed. if should_reconnect { - // At this point, all pending requests will return an error since the - // websocket connection is dead. So draining the pending requests should be fast. - while !pending_requests.is_empty() { - if let Some(Err(req)) = pending_requests.next().await { - // Put the failed requests into the queue again, they will be processed - // once we have a new rpc server to connect to. - if let Err(err) = self.self_sender.try_send(req) { - tracing::error!( - target: LOG_TARGET, - ?err, - "Unable to retry requests, queue is unexpectedly full." - ); - return; - }; - } + match self.handle_reconnect(&mut client_manager, &mut pending_requests).await { + Ok(new_subscriptions) => { + subscriptions = new_subscriptions; + }, + Err(message) => { + tracing::error!( + target: LOG_TARGET, + message, + "Unable to reconnect, stopping worker." + ); + return; + }, } - - if client_manager.connect_to_new_rpc_server().await.is_err() { - tracing::error!( - target: LOG_TARGET, - "Unable to find valid external RPC server, shutting down." - ); - return; - }; - - let Ok(new_subscriptions) = client_manager.get_subscriptions().await else { - tracing::error!( - target: LOG_TARGET, - "Not able to create streams from newly connected RPC server, shutting down." - ); - return; - }; - - subscriptions = new_subscriptions; should_reconnect = false; }; From 8e010602f90afe6a1d2db6ebda19d654249d74c6 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 22 Nov 2022 12:34:51 +0100 Subject: [PATCH 19/36] Add comment to reconnection method --- client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 408b70e5f6d..58a255c2c73 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -295,6 +295,7 @@ impl ReconnectingWebsocketWorker { ) -> Result { let mut tmp_request_storage = Vec::new(); loop { + // Drain the incoming request channel to insert retrying requests at the front later match self.client_receiver.try_recv() { Ok(val) => tmp_request_storage.push(val), Err(TryRecvError::Empty) => break, From 619edb918bd05cb905c2c7e4e0ef60a3456a955e Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 22 Nov 2022 12:54:07 +0100 Subject: [PATCH 20/36] Clean up some dependencies --- Cargo.lock | 11 +++++------ client/relay-chain-rpc-interface/Cargo.toml | 3 +-- .../src/reconnecting_ws_client.rs | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83b22d1eafe..fb2877d69e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2018,13 +2018,12 @@ dependencies = [ "parity-scale-codec", "polkadot-service", "sc-client-api", - "sc-rpc-api", + "serde", "serde_json", "sp-api", "sp-authority-discovery", "sp-consensus-babe", "sp-core", - "sp-runtime", "sp-state-machine", "sp-storage", "tokio", @@ -10421,18 +10420,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", diff --git a/client/relay-chain-rpc-interface/Cargo.toml b/client/relay-chain-rpc-interface/Cargo.toml index 3b6ea5c7a4a..cf35592e11b 100644 --- a/client/relay-chain-rpc-interface/Cargo.toml +++ b/client/relay-chain-rpc-interface/Cargo.toml @@ -13,13 +13,11 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } tokio = { version = "1.21.2", features = ["sync", "time"] } futures = "0.3.25" @@ -30,3 +28,4 @@ tracing = "0.1.37" async-trait = "0.1.58" url = "2.3.1" serde_json = "1.0.87" +serde = "1.0.147" diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 58a255c2c73..24e178a3ce7 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -65,7 +65,7 @@ impl ReconnectingWsClient { params: Option>, ) -> Result where - R: sp_runtime::DeserializeOwned, + R: serde::de::DeserializeOwned, { let (tx, rx) = futures::channel::oneshot::channel(); let message = RpcDispatcherMessage::Request(method.to_string(), params, tx); From 2ca7c4b5992c2c5469fc330487cb65d4d4071bf7 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 22 Nov 2022 16:56:07 +0100 Subject: [PATCH 21/36] Fix build --- Cargo.lock | 1 + client/relay-chain-rpc-interface/Cargo.toml | 5 +++-- client/relay-chain-rpc-interface/src/rpc_client.rs | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb2877d69e4..ed15904844f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2018,6 +2018,7 @@ dependencies = [ "parity-scale-codec", "polkadot-service", "sc-client-api", + "sc-rpc-api", "serde", "serde_json", "sp-api", diff --git a/client/relay-chain-rpc-interface/Cargo.toml b/client/relay-chain-rpc-interface/Cargo.toml index cf35592e11b..e9e066f2f2a 100644 --- a/client/relay-chain-rpc-interface/Cargo.toml +++ b/client/relay-chain-rpc-interface/Cargo.toml @@ -16,10 +16,11 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } -tokio = { version = "1.21.2", features = ["sync", "time"] } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +tokio = { version = "1.21.2", features = ["sync", "time"] } futures = "0.3.25" futures-timer = "3.0.2" parity-scale-codec = "3.2.1" diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 79236ae0fde..27684232e94 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -34,10 +34,10 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_service::{BlockNumber, TaskManager}; use sc_client_api::StorageData; use sc_rpc_api::{state::ReadProof, system::Health}; +use serde::de::DeserializeOwned; use sp_api::RuntimeVersion; use sp_consensus_babe::Epoch; use sp_core::sp_std::collections::btree_map::BTreeMap; -use sp_runtime::DeserializeOwned; use sp_storage::StorageKey; use std::sync::Arc; pub use url::Url; From 0ad786f5aa958b7fcf50e5a11e96e7830f1f428a Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 22 Nov 2022 17:08:56 +0100 Subject: [PATCH 22/36] fmt --- client/cli/src/lib.rs | 2 +- .../src/reconnecting_ws_client.rs | 19 ++- parachain-template/node/src/command.rs | 15 +- polkadot-parachain/src/command.rs | 130 +++++++----------- test/service/src/lib.rs | 2 +- 5 files changed, 69 insertions(+), 99 deletions(-) diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c3c3c4cb4c3..aca503bebe6 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -97,7 +97,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()); + return Ok(()) }, } } diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 24e178a3ce7..40a4da308dd 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -176,7 +176,7 @@ async fn connect_next_available_rpc_server( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url).await { - return Ok((index, Arc::new(ws_client))); + return Ok((index, Arc::new(ws_client))) }; } Err(()) @@ -185,7 +185,7 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()); + return Err(()) } let active_client = connect_next_available_rpc_server(&urls, 1).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) @@ -253,7 +253,7 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + return Err(RpcDispatcherMessage::Request(method, params, response_sender)) } if let Err(err) = response_sender.send(resp) { @@ -299,9 +299,8 @@ impl ReconnectingWebsocketWorker { match self.client_receiver.try_recv() { Ok(val) => tmp_request_storage.push(val), Err(TryRecvError::Empty) => break, - Err(_) => { - return Err("Can not fetch values from client receiver channel.".to_string()) - }, + Err(_) => + return Err("Can not fetch values from client receiver channel.".to_string()), } } @@ -315,7 +314,7 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } } @@ -325,12 +324,12 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } if client_manager.connect_to_new_rpc_server().await.is_err() { - return Err(format!("Unable to find valid external RPC server, shutting down.")); + return Err(format!("Unable to find valid external RPC server, shutting down.")) }; client_manager.get_subscriptions().await.map_err(|e| { @@ -372,7 +371,7 @@ impl ReconnectingWebsocketWorker { message, "Unable to reconnect, stopping worker." ); - return; + return }, } should_reconnect = false; diff --git a/parachain-template/node/src/command.rs b/parachain-template/node/src/command.rs index 3cf5622a018..b8a079e3b2e 100644 --- a/parachain-template/node/src/command.rs +++ b/parachain-template/node/src/command.rs @@ -192,28 +192,26 @@ pub fn run() -> Result<()> { let runner = cli.create_runner(cmd)?; // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => { + BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into()) - } - }, + }, BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => { + BenchmarkCmd::Storage(_) => return Err(sc_cli::Error::Input( "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." .into(), ) - .into()) - }, + .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; @@ -221,9 +219,8 @@ pub fn run() -> Result<()> { let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) }), - BenchmarkCmd::Machine(cmd) => { - runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())) - }, + BenchmarkCmd::Machine(cmd) => + runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), // NOTE: this allows the Client to leniently implement // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] diff --git a/polkadot-parachain/src/command.rs b/polkadot-parachain/src/command.rs index 23a3fa88a23..c6c56cb6e81 100644 --- a/polkadot-parachain/src/command.rs +++ b/polkadot-parachain/src/command.rs @@ -114,24 +114,20 @@ fn load_spec(id: &str) -> std::result::Result, String> { let (id, _, para_id) = extract_parachain_id(id); Ok(match id { // - Defaul-like - "staging" => { - Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()) - }, - "tick" => { + "staging" => + Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), + "tick" => Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/tick.json")[..], - )?) - }, - "trick" => { + )?), + "trick" => Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/trick.json")[..], - )?) - }, - "track" => { + )?), + "track" => Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/track.json")[..], - )?) - }, + )?), // -- Starters "shell" => Box::new(chain_spec::shell::get_shell_chain_spec()), @@ -168,36 +164,29 @@ fn load_spec(id: &str) -> std::result::Result, String> { )?), // -- Polkadot Collectives - "collectives-polkadot-dev" => { - Box::new(chain_spec::collectives::collectives_polkadot_development_config()) - }, - "collectives-polkadot-local" => { - Box::new(chain_spec::collectives::collectives_polkadot_local_config()) - }, - "collectives-polkadot" => { + "collectives-polkadot-dev" => + Box::new(chain_spec::collectives::collectives_polkadot_development_config()), + "collectives-polkadot-local" => + Box::new(chain_spec::collectives::collectives_polkadot_local_config()), + "collectives-polkadot" => Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/collectives-polkadot.json")[..], - )?) - }, - "collectives-westend" => { + )?), + "collectives-westend" => Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/collectives-westend.json")[..], - )?) - }, + )?), // -- Contracts on Rococo - "contracts-rococo-dev" => { - Box::new(chain_spec::contracts::contracts_rococo_development_config()) - }, - "contracts-rococo-local" => { - Box::new(chain_spec::contracts::contracts_rococo_local_config()) - }, + "contracts-rococo-dev" => + Box::new(chain_spec::contracts::contracts_rococo_development_config()), + "contracts-rococo-local" => + Box::new(chain_spec::contracts::contracts_rococo_local_config()), "contracts-rococo-genesis" => Box::new(chain_spec::contracts::contracts_rococo_config()), - "contracts-rococo" => { + "contracts-rococo" => Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_bytes( &include_bytes!("../../parachains/chain-specs/contracts-rococo.json")[..], - )?) - }, + )?), // -- Penpall "penpal-kusama" => Box::new(chain_spec::penpal::get_penpal_chain_spec( @@ -219,30 +208,23 @@ fn load_spec(id: &str) -> std::result::Result, String> { path => { let path: PathBuf = path.into(); match path.runtime() { - Runtime::Statemint => { - Box::new(chain_spec::statemint::StatemintChainSpec::from_json_file(path)?) - }, - Runtime::Statemine => { - Box::new(chain_spec::statemint::StatemineChainSpec::from_json_file(path)?) - }, - Runtime::Westmint => { - Box::new(chain_spec::statemint::WestmintChainSpec::from_json_file(path)?) - }, + Runtime::Statemint => + Box::new(chain_spec::statemint::StatemintChainSpec::from_json_file(path)?), + Runtime::Statemine => + Box::new(chain_spec::statemint::StatemineChainSpec::from_json_file(path)?), + Runtime::Westmint => + Box::new(chain_spec::statemint::WestmintChainSpec::from_json_file(path)?), Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, ), - Runtime::Shell => { - Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?) - }, - Runtime::Seedling => { - Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?) - }, - Runtime::ContractsRococo => { - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?) - }, - Runtime::Penpal(_para_id) => { - Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?) - }, + Runtime::Shell => + Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), + Runtime::Seedling => + Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?), + Runtime::ContractsRococo => + Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?), + Runtime::Penpal(_para_id) => + Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), Runtime::Default => Box::new( chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_file(path)?, ), @@ -313,9 +295,8 @@ impl SubstrateCli for Cli { Runtime::Statemint => &statemint_runtime::VERSION, Runtime::Statemine => &statemine_runtime::VERSION, Runtime::Westmint => &westmint_runtime::VERSION, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { - &collectives_polkadot_runtime::VERSION - }, + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + &collectives_polkadot_runtime::VERSION, Runtime::Shell => &shell_runtime::VERSION, Runtime::Seedling => &seedling_runtime::VERSION, Runtime::ContractsRococo => &contracts_rococo_runtime::VERSION, @@ -564,19 +545,16 @@ pub fn run() -> Result<()> { // Switch on the concrete benchmark sub-command- match cmd { - BenchmarkCmd::Pallet(cmd) => { + BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { runner.sync_run(|config| match config.chain_spec.runtime() { - Runtime::Statemine => { - cmd.run::(config) - }, + Runtime::Statemine => + cmd.run::(config), Runtime::Westmint => cmd.run::(config), - Runtime::Statemint => { - cmd.run::(config) - }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { - cmd.run::(config) - }, + Runtime::Statemint => + cmd.run::(config), + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + cmd.run::(config), _ => Err(format!( "Chain '{:?}' doesn't support benchmarking", config.chain_spec.runtime() @@ -587,20 +565,18 @@ pub fn run() -> Result<()> { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into()) - } - }, + }, BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { construct_benchmark_partials!(config, |partials| cmd.run(partials.client)) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => { + BenchmarkCmd::Storage(_) => return Err(sc_cli::Error::Input( "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." .into(), ) - .into()) - }, + .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { construct_benchmark_partials!(config, |partials| { @@ -610,9 +586,8 @@ pub fn run() -> Result<()> { cmd.run(config, partials.client.clone(), db, storage) }) }), - BenchmarkCmd::Machine(cmd) => { - runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())) - }, + BenchmarkCmd::Machine(cmd) => + runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())), // NOTE: this allows the Client to leniently implement // new benchmark commands without requiring a companion MR. #[allow(unreachable_patterns)] @@ -638,14 +613,13 @@ pub fn run() -> Result<()> { Runtime::Statemint => runner.async_run(|config| { Ok((cmd.run::(config), task_manager)) }), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => runner.async_run(|config| { Ok(( cmd.run::(config), task_manager, )) - }) - }, + }), Runtime::Shell => runner.async_run(|config| { Ok(( cmd.run::(config), diff --git a/test/service/src/lib.rs b/test/service/src/lib.rs index 6d3b6a4d2dd..429e8ca9c01 100644 --- a/test/service/src/lib.rs +++ b/test/service/src/lib.rs @@ -196,7 +196,7 @@ async fn build_relay_chain_interface( collator_options.relay_chain_rpc_urls, ) .await - .map(|r| r.0); + .map(|r| r.0) } let relay_chain_full_node = polkadot_test_service::new_full( From d3ee905a8ce1be0740f5803a3694fb9f67a6a078 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 23 Nov 2022 14:29:53 +0100 Subject: [PATCH 23/36] Provide alias for cli argument --- client/cli/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index aca503bebe6..edf11d72a0b 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -294,7 +294,8 @@ pub struct RunCmd { #[arg( long, value_parser = validate_relay_chain_url, - num_args = 0.. + num_args = 0.., + alias = "relay-chain-rpc-url" )] pub relay_chain_rpc_urls: Vec, } From 2b888bec972f60b85cfd07604d049d1950def38d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 25 Nov 2022 15:25:36 +0100 Subject: [PATCH 24/36] Apply review comments --- client/relay-chain-interface/src/lib.rs | 18 -------- .../src/reconnecting_ws_client.rs | 42 ++++++++++++------- .../src/rpc_client.rs | 5 +-- 3 files changed, 30 insertions(+), 35 deletions(-) diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 36500e51252..56de750b687 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -87,24 +87,6 @@ impl From for sp_blockchain::Error { } } -impl From> for RelayChainError { - fn from(e: tokio::sync::mpsc::error::SendError) -> Self { - RelayChainError::WorkerCommunicationError(format!( - "Unable to send message to RPC worker: {}", - e - )) - } -} - -impl From for RelayChainError { - fn from(e: futures::channel::oneshot::Canceled) -> Self { - RelayChainError::WorkerCommunicationError(format!( - "Unexpected channel close on RPC worker side: {}", - e - )) - } -} - /// Trait that provides all necessary methods for interaction between collator and relay chain. #[async_trait] pub trait RelayChainInterface: Send + Sync { diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 40a4da308dd..2b8e44db218 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -68,9 +68,22 @@ impl ReconnectingWsClient { R: serde::de::DeserializeOwned, { let (tx, rx) = futures::channel::oneshot::channel(); + let message = RpcDispatcherMessage::Request(method.to_string(), params, tx); - self.to_worker_channel.send(message).await?; - let value = rx.await??; + self.to_worker_channel.send(message).await.map_err(|err| { + RelayChainError::WorkerCommunicationError(format!( + "Unable to send message to RPC worker: {}", + err + )) + })?; + + let value = rx.await.map_err(|err| { + RelayChainError::WorkerCommunicationError(format!( + "Unexpected channel close on RPC worker side: {}", + err + )) + })??; + serde_json::from_value(value) .map_err(|_| RelayChainError::GenericError("Unable to deserialize value".to_string())) } @@ -109,7 +122,7 @@ impl ReconnectingWsClient { /// Worker that should be used in combination with [`RelayChainRpcClient`]. Must be polled to distribute header notifications to listeners. struct ReconnectingWebsocketWorker { - ws_urls: Option>, + ws_urls: Vec, /// Communication channel with the RPC client client_receiver: TokioReceiver, @@ -176,7 +189,7 @@ async fn connect_next_available_rpc_server( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url).await { - return Ok((index, Arc::new(ws_client))) + return Ok((index, Arc::new(ws_client))); }; } Err(()) @@ -185,7 +198,7 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()) + return Err(()); } let active_client = connect_next_available_rpc_server(&urls, 1).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) @@ -253,7 +266,7 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)) + return Err(RpcDispatcherMessage::Request(method, params, response_sender)); } if let Err(err) = response_sender.send(resp) { @@ -275,7 +288,7 @@ impl ReconnectingWebsocketWorker { ) -> (ReconnectingWebsocketWorker, TokioSender) { let (tx, rx) = tokio_channel(100); let worker = ReconnectingWebsocketWorker { - ws_urls: Some(urls), + ws_urls: urls, client_receiver: rx, self_sender: tx.clone(), imported_header_listeners: Vec::new(), @@ -299,8 +312,9 @@ impl ReconnectingWebsocketWorker { match self.client_receiver.try_recv() { Ok(val) => tmp_request_storage.push(val), Err(TryRecvError::Empty) => break, - Err(_) => - return Err("Can not fetch values from client receiver channel.".to_string()), + Err(_) => { + return Err("Can not fetch values from client receiver channel.".to_string()) + }, } } @@ -314,7 +328,7 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )) + )); }; } } @@ -324,12 +338,12 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )) + )); }; } if client_manager.connect_to_new_rpc_server().await.is_err() { - return Err(format!("Unable to find valid external RPC server, shutting down.")) + return Err(format!("Unable to find valid external RPC server, shutting down.")); }; client_manager.get_subscriptions().await.map_err(|e| { @@ -347,7 +361,7 @@ impl ReconnectingWebsocketWorker { async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); - let urls = self.ws_urls.take().expect("Constructor requires passing of urls; qed"); + let urls = std::mem::take(&mut self.ws_urls); let Ok(mut client_manager) = ClientManager::new(urls).await else { tracing::error!(target: LOG_TARGET, "No valid RPC url found. Stopping RPC worker."); return; @@ -371,7 +385,7 @@ impl ReconnectingWebsocketWorker { message, "Unable to reconnect, stopping worker." ); - return + return; }, } should_reconnect = false; diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index 27684232e94..ceefe36428d 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -39,7 +39,6 @@ use sp_api::RuntimeVersion; use sp_consensus_babe::Epoch; use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_storage::StorageKey; -use std::sync::Arc; pub use url::Url; const LOG_TARGET: &str = "relay-chain-rpc-client"; @@ -48,7 +47,7 @@ const LOG_TARGET: &str = "relay-chain-rpc-client"; #[derive(Clone)] pub struct RelayChainRpcClient { /// Websocket client to make calls - ws_client: Arc, + ws_client: ReconnectingWsClient, } macro_rules! rpc_params { @@ -81,7 +80,7 @@ pub async fn create_client_and_start_worker( impl RelayChainRpcClient { /// Initialize new RPC Client. async fn new(ws_client: ReconnectingWsClient) -> RelayChainResult { - let client = RelayChainRpcClient { ws_client: Arc::new(ws_client) }; + let client = RelayChainRpcClient { ws_client }; Ok(client) } From 70b59f06842d1ab7a1dad2cb60837ba594792de0 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 25 Nov 2022 15:57:14 +0100 Subject: [PATCH 25/36] Rename P* to Relay* --- client/relay-chain-rpc-interface/src/lib.rs | 28 +++---- .../src/reconnecting_ws_client.rs | 42 +++++----- .../src/rpc_client.rs | 78 +++++++++---------- 3 files changed, 74 insertions(+), 74 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index 58e37b4e6a7..c8a1c4e46d9 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -19,7 +19,7 @@ use core::time::Duration; use cumulus_primitives_core::{ relay_chain::{ v2::{CommittedCandidateReceipt, OccupiedCoreAssumption, SessionIndex, ValidatorId}, - Hash as PHash, Header as PHeader, InboundHrmpMessage, + Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -59,7 +59,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn retrieve_dmq_contents( &self, para_id: ParaId, - relay_parent: PHash, + relay_parent: RelayHash, ) -> RelayChainResult> { self.rpc_client.parachain_host_dmq_contents(para_id, relay_parent).await } @@ -67,7 +67,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn retrieve_all_inbound_hrmp_channel_contents( &self, para_id: ParaId, - relay_parent: PHash, + relay_parent: RelayHash, ) -> RelayChainResult>> { self.rpc_client .parachain_host_inbound_hrmp_channels_contents(para_id, relay_parent) @@ -76,7 +76,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn persisted_validation_data( &self, - hash: PHash, + hash: RelayHash, para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> RelayChainResult> { @@ -87,7 +87,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn candidate_pending_availability( &self, - hash: PHash, + hash: RelayHash, para_id: ParaId, ) -> RelayChainResult> { self.rpc_client @@ -95,17 +95,17 @@ impl RelayChainInterface for RelayChainRpcInterface { .await } - async fn session_index_for_child(&self, hash: PHash) -> RelayChainResult { + async fn session_index_for_child(&self, hash: RelayHash) -> RelayChainResult { self.rpc_client.parachain_host_session_index_for_child(hash).await } - async fn validators(&self, block_id: PHash) -> RelayChainResult> { + async fn validators(&self, block_id: RelayHash) -> RelayChainResult> { self.rpc_client.parachain_host_validators(block_id).await } async fn import_notification_stream( &self, - ) -> RelayChainResult + Send>>> { + ) -> RelayChainResult + Send>>> { let imported_headers_stream = self.rpc_client.get_imported_heads_stream()?; Ok(imported_headers_stream.boxed()) @@ -113,13 +113,13 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn finality_notification_stream( &self, - ) -> RelayChainResult + Send>>> { + ) -> RelayChainResult + Send>>> { let imported_headers_stream = self.rpc_client.get_finalized_heads_stream()?; Ok(imported_headers_stream.boxed()) } - async fn best_block_hash(&self) -> RelayChainResult { + async fn best_block_hash(&self) -> RelayChainResult { self.rpc_client.chain_get_head(None).await } @@ -133,7 +133,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn get_storage_by_key( &self, - relay_parent: PHash, + relay_parent: RelayHash, key: &[u8], ) -> RelayChainResult> { let storage_key = StorageKey(key.to_vec()); @@ -145,7 +145,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn prove_read( &self, - relay_parent: PHash, + relay_parent: RelayHash, relevant_keys: &Vec>, ) -> RelayChainResult { let cloned = relevant_keys.clone(); @@ -168,7 +168,7 @@ impl RelayChainInterface for RelayChainRpcInterface { /// 2. Check if the block is already in chain. If yes, succeed early. /// 3. Wait for the block to be imported via subscription. /// 4. If timeout is reached, we return an error. - async fn wait_for_block(&self, wait_for_hash: PHash) -> RelayChainResult<()> { + async fn wait_for_block(&self, wait_for_hash: RelayHash) -> RelayChainResult<()> { let mut head_stream = self.rpc_client.get_imported_heads_stream()?; if self.rpc_client.chain_get_header(Some(wait_for_hash)).await?.is_some() { @@ -192,7 +192,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn new_best_notification_stream( &self, - ) -> RelayChainResult + Send>>> { + ) -> RelayChainResult + Send>>> { let imported_headers_stream = self.rpc_client.get_best_heads_stream()?; Ok(imported_headers_stream.boxed()) } diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 2b8e44db218..6e8093512ea 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -1,4 +1,4 @@ -use cumulus_primitives_core::relay_chain::Header as PHeader; +use cumulus_primitives_core::relay_chain::Header as RelayHeader; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ channel::{ @@ -28,9 +28,9 @@ const LOG_TARGET: &str = "reconnecting-websocket-client"; /// Messages for communication between [`ReconnectingWsClient`] and [`ReconnectingWebsocketWorker`]. #[derive(Debug)] pub enum RpcDispatcherMessage { - RegisterBestHeadListener(Sender), - RegisterImportListener(Sender), - RegisterFinalizationListener(Sender), + RegisterBestHeadListener(Sender), + RegisterImportListener(Sender), + RegisterFinalizationListener(Sender), Request(String, Option>, OneshotSender>), } @@ -88,15 +88,15 @@ impl ReconnectingWsClient { .map_err(|_| RelayChainError::GenericError("Unable to deserialize value".to_string())) } /// Get a stream of new best relay chain headers - pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterBestHeadListener(tx))?; Ok(rx) } /// Get a stream of finalized relay chain headers - pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterFinalizationListener( tx, ))?; @@ -104,8 +104,8 @@ impl ReconnectingWsClient { } /// Get a stream of all imported relay chain headers - pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { + let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterImportListener(tx))?; Ok(rx) } @@ -130,14 +130,14 @@ struct ReconnectingWebsocketWorker { self_sender: TokioSender, /// Senders to distribute incoming header notifications to - imported_header_listeners: Vec>, - finalized_header_listeners: Vec>, - best_header_listeners: Vec>, + imported_header_listeners: Vec>, + finalized_header_listeners: Vec>, + best_header_listeners: Vec>, } fn handle_event_distribution( - event: Option>, - senders: &mut Vec>, + event: Option>, + senders: &mut Vec>, ) -> Result<(), String> { match event { Some(Ok(header)) => { @@ -173,9 +173,9 @@ struct ClientManager { } struct RelayChainSubscriptions { - import_subscription: Subscription, - finalized_subscription: Subscription, - best_subscription: Subscription, + import_subscription: Subscription, + finalized_subscription: Subscription, + best_subscription: Subscription, } /// Try to find a new RPC server to connect to. @@ -215,7 +215,7 @@ impl ClientManager { async fn get_subscriptions(&self) -> Result { let import_subscription = self .active_client - .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") + .subscribe::("chain_subscribeAllHeads", None, "chain_unsubscribeAllHeads") .await .map_err(|e| { tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); @@ -223,7 +223,7 @@ impl ClientManager { })?; let best_subscription = self .active_client - .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") + .subscribe::("chain_subscribeNewHeads", None, "chain_unsubscribeNewHeads") .await .map_err(|e| { tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); @@ -231,7 +231,7 @@ impl ClientManager { })?; let finalized_subscription = self .active_client - .subscribe::( + .subscribe::( "chain_subscribeFinalizedHeads", None, "chain_unsubscribeFinalizedHeads", diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index ceefe36428d..8242f7e6967 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -23,7 +23,7 @@ use cumulus_primitives_core::{ PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }, - CandidateHash, Hash as PHash, Header as PHeader, InboundHrmpMessage, + CandidateHash, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -89,7 +89,7 @@ impl RelayChainRpcClient { pub async fn call_remote_runtime_function( &self, method_name: &str, - hash: PHash, + hash: RelayHash, payload: Option, ) -> RelayChainResult { let payload_bytes = @@ -148,14 +148,14 @@ impl RelayChainRpcClient { } /// Returns information regarding the current epoch. - pub async fn babe_api_current_epoch(&self, at: PHash) -> Result { + pub async fn babe_api_current_epoch(&self, at: RelayHash) -> Result { self.call_remote_runtime_function("BabeApi_current_epoch", at, None::<()>).await } /// Old method to fetch v1 session info. pub async fn parachain_host_session_info_before_version_2( &self, - at: PHash, + at: RelayHash, index: SessionIndex, ) -> Result, RelayChainError> { self.call_remote_runtime_function( @@ -169,8 +169,8 @@ impl RelayChainRpcClient { /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. pub async fn parachain_host_on_chain_votes( &self, - at: PHash, - ) -> Result>, RelayChainError> { + at: RelayHash, + ) -> Result>, RelayChainError> { self.call_remote_runtime_function("ParachainHost_on_chain_votes", at, None::<()>) .await } @@ -178,7 +178,7 @@ impl RelayChainRpcClient { /// Returns code hashes of PVFs that require pre-checking by validators in the active set. pub async fn parachain_host_pvfs_require_precheck( &self, - at: PHash, + at: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function("ParachainHost_pvfs_require_precheck", at, None::<()>) .await @@ -187,7 +187,7 @@ impl RelayChainRpcClient { /// Submits a PVF pre-checking statement into the transaction pool. pub async fn parachain_host_submit_pvf_check_statement( &self, - at: PHash, + at: RelayHash, stmt: PvfCheckStatement, signature: ValidatorSignature, ) -> Result<(), RelayChainError> { @@ -213,8 +213,8 @@ impl RelayChainRpcClient { pub async fn state_get_read_proof( &self, storage_keys: Vec, - at: Option, - ) -> Result, RelayChainError> { + at: Option, + ) -> Result, RelayChainError> { let params = rpc_params!(storage_keys, at); self.request("state_getReadProof", params).await } @@ -223,7 +223,7 @@ impl RelayChainRpcClient { pub async fn state_get_storage( &self, storage_key: StorageKey, - at: Option, + at: Option, ) -> Result, RelayChainError> { let params = rpc_params!(storage_key, at); self.request("state_getStorage", params).await @@ -232,7 +232,7 @@ impl RelayChainRpcClient { /// Get hash of the n-th block in the canon chain. /// /// By default returns latest block hash. - pub async fn chain_get_head(&self, at: Option) -> Result { + pub async fn chain_get_head(&self, at: Option) -> Result { let params = rpc_params!(at); self.request("chain_getHead", params).await } @@ -242,7 +242,7 @@ impl RelayChainRpcClient { /// should be the successor of the number of the block. pub async fn parachain_host_validator_groups( &self, - at: PHash, + at: RelayHash, ) -> Result<(Vec>, GroupRotationInfo), RelayChainError> { self.call_remote_runtime_function("ParachainHost_validator_groups", at, None::<()>) .await @@ -251,7 +251,7 @@ impl RelayChainRpcClient { /// Get a vector of events concerning candidates that occurred within a block. pub async fn parachain_host_candidate_events( &self, - at: PHash, + at: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function("ParachainHost_candidate_events", at, None::<()>) .await @@ -260,7 +260,7 @@ impl RelayChainRpcClient { /// Checks if the given validation outputs pass the acceptance criteria. pub async fn parachain_host_check_validation_outputs( &self, - at: PHash, + at: RelayHash, para_id: ParaId, outputs: CandidateCommitments, ) -> Result { @@ -277,9 +277,9 @@ impl RelayChainRpcClient { /// data hash against an expected one and yields `None` if they're not equal. pub async fn parachain_host_assumed_validation_data( &self, - at: PHash, + at: RelayHash, para_id: ParaId, - expected_hash: PHash, + expected_hash: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function( "ParachainHost_persisted_assumed_validation_data", @@ -290,7 +290,7 @@ impl RelayChainRpcClient { } /// Get hash of last finalized block. - pub async fn chain_get_finalized_head(&self) -> Result { + pub async fn chain_get_finalized_head(&self) -> Result { self.request("chain_getFinalizedHead", None).await } @@ -298,7 +298,7 @@ impl RelayChainRpcClient { pub async fn chain_get_block_hash( &self, block_number: Option, - ) -> Result, RelayChainError> { + ) -> Result, RelayChainError> { let params = rpc_params!(block_number); self.request("chain_getBlockHash", params).await } @@ -310,7 +310,7 @@ impl RelayChainRpcClient { /// and the para already occupies a core. pub async fn parachain_host_persisted_validation_data( &self, - at: PHash, + at: RelayHash, para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> Result, RelayChainError> { @@ -325,7 +325,7 @@ impl RelayChainRpcClient { /// Get the validation code from its hash. pub async fn parachain_host_validation_code_by_hash( &self, - at: PHash, + at: RelayHash, validation_code_hash: ValidationCodeHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function( @@ -340,14 +340,14 @@ impl RelayChainRpcClient { /// Cores are either free or occupied. Free cores can have paras assigned to them. pub async fn parachain_host_availability_cores( &self, - at: PHash, - ) -> Result>, RelayChainError> { + at: RelayHash, + ) -> Result>, RelayChainError> { self.call_remote_runtime_function("ParachainHost_availability_cores", at, None::<()>) .await } /// Get runtime version - pub async fn runtime_version(&self, at: PHash) -> Result { + pub async fn runtime_version(&self, at: RelayHash) -> Result { let params = rpc_params!(at); self.request("state_getRuntimeVersion", params).await } @@ -356,7 +356,7 @@ impl RelayChainRpcClient { /// This is a staging method! Do not use on production runtimes! pub async fn parachain_host_staging_get_disputes( &self, - at: PHash, + at: RelayHash, ) -> Result)>, RelayChainError> { self.call_remote_runtime_function("ParachainHost_staging_get_disputes", at, None::<()>) .await @@ -364,7 +364,7 @@ impl RelayChainRpcClient { pub async fn authority_discovery_authorities( &self, - at: PHash, + at: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function("AuthorityDiscoveryApi_authorities", at, None::<()>) .await @@ -376,7 +376,7 @@ impl RelayChainRpcClient { /// and the para already occupies a core. pub async fn parachain_host_validation_code( &self, - at: PHash, + at: RelayHash, para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> Result, RelayChainError> { @@ -391,7 +391,7 @@ impl RelayChainRpcClient { /// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`. pub async fn parachain_host_validation_code_hash( &self, - at: PHash, + at: RelayHash, para_id: ParaId, occupied_core_assumption: OccupiedCoreAssumption, ) -> Result, RelayChainError> { @@ -406,7 +406,7 @@ impl RelayChainRpcClient { /// Get the session info for the given session, if stored. pub async fn parachain_host_session_info( &self, - at: PHash, + at: RelayHash, index: SessionIndex, ) -> Result, RelayChainError> { self.call_remote_runtime_function("ParachainHost_session_info", at, Some(index)) @@ -416,8 +416,8 @@ impl RelayChainRpcClient { /// Get header at specified hash pub async fn chain_get_header( &self, - hash: Option, - ) -> Result, RelayChainError> { + hash: Option, + ) -> Result, RelayChainError> { let params = rpc_params!(hash); self.request("chain_getHeader", params).await } @@ -426,7 +426,7 @@ impl RelayChainRpcClient { /// assigned to occupied cores in `availability_cores` and `None` otherwise. pub async fn parachain_host_candidate_pending_availability( &self, - at: PHash, + at: RelayHash, para_id: ParaId, ) -> Result, RelayChainError> { self.call_remote_runtime_function( @@ -442,7 +442,7 @@ impl RelayChainRpcClient { /// This can be used to instantiate a `SigningContext`. pub async fn parachain_host_session_index_for_child( &self, - at: PHash, + at: RelayHash, ) -> Result { self.call_remote_runtime_function("ParachainHost_session_index_for_child", at, None::<()>) .await @@ -451,7 +451,7 @@ impl RelayChainRpcClient { /// Get the current validators. pub async fn parachain_host_validators( &self, - at: PHash, + at: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function("ParachainHost_validators", at, None::<()>) .await @@ -462,7 +462,7 @@ impl RelayChainRpcClient { pub async fn parachain_host_inbound_hrmp_channels_contents( &self, para_id: ParaId, - at: PHash, + at: RelayHash, ) -> Result>, RelayChainError> { self.call_remote_runtime_function( "ParachainHost_inbound_hrmp_channels_contents", @@ -476,24 +476,24 @@ impl RelayChainRpcClient { pub async fn parachain_host_dmq_contents( &self, para_id: ParaId, - at: PHash, + at: RelayHash, ) -> Result, RelayChainError> { self.call_remote_runtime_function("ParachainHost_dmq_contents", at, Some(para_id)) .await } /// Get a stream of all imported relay chain headers - pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { + pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { self.ws_client.get_imported_heads_stream() } /// Get a stream of new best relay chain headers - pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { + pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { self.ws_client.get_best_heads_stream() } /// Get a stream of finalized relay chain headers - pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { + pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { self.ws_client.get_finalized_heads_stream() } } From 5957a7ae8ee98ee83fac50dae3cecfdf31a17b61 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 29 Nov 2022 19:36:47 +0000 Subject: [PATCH 26/36] Improve zombienet test --- zombienet/tests/0006-rpc_collator_builds_blocks.feature | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.feature b/zombienet/tests/0006-rpc_collator_builds_blocks.feature index 05fa672d8a5..852ab8ead1d 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.feature +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.feature @@ -18,3 +18,9 @@ dave: reports block height is at least 12 within 250 seconds eve: reports block height is at least 12 within 250 seconds one: pause dave: reports block height is at least 20 within 200 seconds +one: resume +sleep 10 +two: pause +three: pause +dave: is up +dave: reports block height is at least 30 within 200 seconds From 2df5418c2a16a445083137f9465901e2dd6983ef Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 29 Nov 2022 19:43:23 +0000 Subject: [PATCH 27/36] fmt --- .../src/reconnecting_ws_client.rs | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 6e8093512ea..94d56681561 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -89,14 +89,16 @@ impl ReconnectingWsClient { } /// Get a stream of new best relay chain headers pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + let (tx, rx) = + futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterBestHeadListener(tx))?; Ok(rx) } /// Get a stream of finalized relay chain headers pub fn get_finalized_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + let (tx, rx) = + futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterFinalizationListener( tx, ))?; @@ -105,7 +107,8 @@ impl ReconnectingWsClient { /// Get a stream of all imported relay chain headers pub fn get_imported_heads_stream(&self) -> Result, RelayChainError> { - let (tx, rx) = futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); + let (tx, rx) = + futures::channel::mpsc::channel::(NOTIFICATION_CHANNEL_SIZE_LIMIT); self.send_register_message_to_worker(RpcDispatcherMessage::RegisterImportListener(tx))?; Ok(rx) } @@ -189,7 +192,7 @@ async fn connect_next_available_rpc_server( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url).await { - return Ok((index, Arc::new(ws_client))); + return Ok((index, Arc::new(ws_client))) }; } Err(()) @@ -198,7 +201,7 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()); + return Err(()) } let active_client = connect_next_available_rpc_server(&urls, 1).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) @@ -266,7 +269,7 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + return Err(RpcDispatcherMessage::Request(method, params, response_sender)) } if let Err(err) = response_sender.send(resp) { @@ -312,9 +315,8 @@ impl ReconnectingWebsocketWorker { match self.client_receiver.try_recv() { Ok(val) => tmp_request_storage.push(val), Err(TryRecvError::Empty) => break, - Err(_) => { - return Err("Can not fetch values from client receiver channel.".to_string()) - }, + Err(_) => + return Err("Can not fetch values from client receiver channel.".to_string()), } } @@ -328,7 +330,7 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } } @@ -338,12 +340,12 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } if client_manager.connect_to_new_rpc_server().await.is_err() { - return Err(format!("Unable to find valid external RPC server, shutting down.")); + return Err(format!("Unable to find valid external RPC server, shutting down.")) }; client_manager.get_subscriptions().await.map_err(|e| { @@ -385,7 +387,7 @@ impl ReconnectingWebsocketWorker { message, "Unable to reconnect, stopping worker." ); - return; + return }, } should_reconnect = false; From b318e3fe69d12e210b4f51a327879751b16ca3b5 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 30 Nov 2022 15:07:27 +0000 Subject: [PATCH 28/36] Fix zombienet sleep --- zombienet/tests/0006-rpc_collator_builds_blocks.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.feature b/zombienet/tests/0006-rpc_collator_builds_blocks.feature index 852ab8ead1d..cab40254313 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.feature +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.feature @@ -19,7 +19,7 @@ eve: reports block height is at least 12 within 250 seconds one: pause dave: reports block height is at least 20 within 200 seconds one: resume -sleep 10 +sleep 10 seconds two: pause three: pause dave: is up From 8329760e2c8ec615f1dbce2fb66a958cd0baa496 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 12 Dec 2022 10:46:30 +0100 Subject: [PATCH 29/36] Simplify zombienet test --- zombienet/tests/0006-rpc_collator_builds_blocks.feature | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.feature b/zombienet/tests/0006-rpc_collator_builds_blocks.feature index cab40254313..512850e4788 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.feature +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.feature @@ -15,12 +15,5 @@ alice: parachain 2000 is registered within 225 seconds alice: parachain 2000 block height is at least 10 within 250 seconds dave: reports block height is at least 12 within 250 seconds -eve: reports block height is at least 12 within 250 seconds -one: pause +one: restart after 10 seconds dave: reports block height is at least 20 within 200 seconds -one: resume -sleep 10 seconds -two: pause -three: pause -dave: is up -dave: reports block height is at least 30 within 200 seconds From 513bb1300dce2bdb69215aa652b01004861edf90 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 12 Dec 2022 13:01:24 +0100 Subject: [PATCH 30/36] Reduce log clutter and fix starting position --- .../src/reconnecting_ws_client.rs | 5 ++--- zombienet/tests/0006-rpc_collator_builds_blocks.feature | 7 ++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 94d56681561..3fc0907864e 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -186,8 +186,7 @@ async fn connect_next_available_rpc_server( urls: &Vec, starting_position: usize, ) -> Result<(usize, Arc), ()> { - tracing::info!(target: LOG_TARGET, "Connecting to RPC server."); - + tracing::debug!(target: LOG_TARGET, starting_position, "Connecting to RPC server."); for (counter, url) in urls.iter().cycle().skip(starting_position).take(urls.len()).enumerate() { let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); @@ -203,7 +202,7 @@ impl ClientManager { if urls.is_empty() { return Err(()) } - let active_client = connect_next_available_rpc_server(&urls, 1).await?; + let active_client = connect_next_available_rpc_server(&urls, 0).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) } diff --git a/zombienet/tests/0006-rpc_collator_builds_blocks.feature b/zombienet/tests/0006-rpc_collator_builds_blocks.feature index 512850e4788..78886b7020b 100644 --- a/zombienet/tests/0006-rpc_collator_builds_blocks.feature +++ b/zombienet/tests/0006-rpc_collator_builds_blocks.feature @@ -14,6 +14,11 @@ eve: is up alice: parachain 2000 is registered within 225 seconds alice: parachain 2000 block height is at least 10 within 250 seconds +eve: reports block height is at least 12 within 250 seconds dave: reports block height is at least 12 within 250 seconds -one: restart after 10 seconds +one: restart after 1 seconds dave: reports block height is at least 20 within 200 seconds +two: restart after 1 seconds +three: restart after 20 seconds +dave: is up +dave: reports block height is at least 30 within 200 seconds From 9c104698e256e3e601b375b3193e1601348a8951 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 13 Dec 2022 11:45:13 +0100 Subject: [PATCH 31/36] Do not distribute duplicated imported and finalized blocks --- Cargo.lock | 7 +- client/relay-chain-rpc-interface/Cargo.toml | 1 + .../src/reconnecting_ws_client.rs | 87 ++++++++++++++----- 3 files changed, 69 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d1f6cdf40a..0a2bf3e17b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2120,6 +2120,7 @@ dependencies = [ "futures", "futures-timer", "jsonrpsee", + "lru", "parity-scale-codec", "polkadot-service", "sc-client-api", @@ -12598,7 +12599,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.3", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] @@ -13711,9 +13712,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" +checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" dependencies = [ "futures", "log", diff --git a/client/relay-chain-rpc-interface/Cargo.toml b/client/relay-chain-rpc-interface/Cargo.toml index 00fe3907a4f..9bc06e99fe0 100644 --- a/client/relay-chain-rpc-interface/Cargo.toml +++ b/client/relay-chain-rpc-interface/Cargo.toml @@ -30,3 +30,4 @@ async-trait = "0.1.59" url = "2.3.1" serde_json = "1.0.87" serde = "1.0.147" +lru = "0.8.1" diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index e0f0362b75e..fdfcd9fad61 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -1,4 +1,6 @@ -use cumulus_primitives_core::relay_chain::Header as RelayHeader; +use cumulus_primitives_core::relay_chain::{ + BlockNumber as RelayBlockNumber, Header as RelayHeader, +}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use futures::{ channel::{ @@ -17,8 +19,9 @@ use jsonrpsee::{ rpc_params, ws_client::WsClientBuilder, }; +use lru::LruCache; use polkadot_service::TaskManager; -use std::{future::Future, sync::Arc}; +use std::{future::Future, num::NonZeroUsize, sync::Arc}; use tokio::sync::mpsc::{ channel as tokio_channel, error::TryRecvError, Receiver as TokioReceiver, Sender as TokioSender, }; @@ -136,13 +139,8 @@ struct ReconnectingWebsocketWorker { best_header_listeners: Vec>, } -fn handle_event_distribution( - event: Option>, - senders: &mut Vec>, -) -> Result<(), String> { - match event { - Some(Ok(header)) => { - senders.retain_mut(|e| { +fn distribute_header(header: RelayHeader, senders: &mut Vec>) { + senders.retain_mut(|e| { match e.try_send(header.clone()) { // Receiver has been dropped, remove Sender from list. Err(error) if error.is_disconnected() => false, @@ -156,11 +154,6 @@ fn handle_event_distribution( _ => true, } }); - Ok(()) - }, - None => Err("Subscription closed".to_string()), - Some(Err(err)) => Err(format!("Error in RPC subscription: {}", err)), - } } /// Manages the active websocket client. @@ -379,7 +372,10 @@ impl ReconnectingWebsocketWorker { return; }; + let mut imported_blocks_cache = + LruCache::new(NonZeroUsize::new(40).expect("40 is nonzero; qed.")); let mut should_reconnect = false; + let mut last_seen_finalized_num: RelayBlockNumber = 0; loop { // This branch is taken if the websocket connection to the current RPC server is closed. if should_reconnect { @@ -432,21 +428,66 @@ impl ReconnectingWebsocketWorker { } }, import_event = subscriptions.import_subscription.next() => { - if let Err(err) = handle_event_distribution(import_event, &mut self.imported_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing imported header notification."); - should_reconnect = true; + match import_event { + Some(Ok(header)) => { + let hash = header.hash(); + if imported_blocks_cache.contains(&hash) { + tracing::debug!( + target: LOG_TARGET, + number = header.number, + ?hash, + "Duplicate imported block header. This might happen after switching to a new RPC node. Skipping distribution." + ); + continue; + } + imported_blocks_cache.put(hash, ()); + distribute_header(header, &mut self.imported_header_listeners); + }, + None => { + tracing::error!(target: LOG_TARGET, "Subscription closed."); + should_reconnect = true; + }, + Some(Err(err)) => { + tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + should_reconnect = true; + }, } }, best_header_event = subscriptions.best_subscription.next() => { - if let Err(err) = handle_event_distribution(best_header_event, &mut self.best_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing best header notification."); - should_reconnect = true; + match best_header_event { + Some(Ok(header)) => distribute_header(header, &mut self.best_header_listeners), + None => { + tracing::error!(target: LOG_TARGET, "Subscription closed."); + should_reconnect = true; + }, + Some(Err(err)) => { + tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + should_reconnect = true; + }, } } finalized_event = subscriptions.finalized_subscription.next() => { - if let Err(err) = handle_event_distribution(finalized_event, &mut self.finalized_header_listeners) { - tracing::error!(target: LOG_TARGET, err, "Encountered error while processing finalized header notification."); - should_reconnect = true; + match finalized_event { + Some(Ok(header)) if header.number > last_seen_finalized_num => { + last_seen_finalized_num = header.number; + distribute_header(header, &mut self.finalized_header_listeners); + }, + Some(Ok(header)) => { + tracing::debug!( + target: LOG_TARGET, + number = header.number, + last_seen_finalized_num, + "Duplicate finalized block header. This might happen after switching to a new RPC node. Skipping distribution." + ); + }, + None => { + tracing::error!(target: LOG_TARGET, "Subscription closed."); + should_reconnect = true; + }, + Some(Err(err)) => { + tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + should_reconnect = true; + }, } } } From 3b27fd83977a758ff670de3265343a3305b116a0 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 13 Dec 2022 17:08:02 +0100 Subject: [PATCH 32/36] fmt --- .../src/reconnecting_ws_client.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index fdfcd9fad61..923b92ba94b 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -182,7 +182,7 @@ async fn connect_next_available_rpc_server( let index = (starting_position + counter) % urls.len(); tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); if let Ok(ws_client) = WsClientBuilder::default().build(url).await { - return Ok((index, Arc::new(ws_client))); + return Ok((index, Arc::new(ws_client))) }; } Err(()) @@ -191,7 +191,7 @@ async fn connect_next_available_rpc_server( impl ClientManager { pub async fn new(urls: Vec) -> Result { if urls.is_empty() { - return Err(()); + return Err(()) } let active_client = connect_next_available_rpc_server(&urls, 0).await?; Ok(Self { urls, active_client: active_client.1, active_index: active_client.0 }) @@ -267,7 +267,7 @@ impl ClientManager { // the websocket connection is dead and requires a restart. // Other errors should be forwarded to the request caller. if let Err(JsonRpseeError::RestartNeeded(_)) = resp { - return Err(RpcDispatcherMessage::Request(method, params, response_sender)); + return Err(RpcDispatcherMessage::Request(method, params, response_sender)) } if let Err(err) = response_sender.send(resp) { @@ -313,9 +313,8 @@ impl ReconnectingWebsocketWorker { match self.client_receiver.try_recv() { Ok(val) => tmp_request_storage.push(val), Err(TryRecvError::Empty) => break, - Err(_) => { - return Err("Can not fetch values from client receiver channel.".to_string()) - }, + Err(_) => + return Err("Can not fetch values from client receiver channel.".to_string()), } } @@ -329,7 +328,7 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } } @@ -339,12 +338,12 @@ impl ReconnectingWebsocketWorker { return Err(format!( "Unable to retry requests, queue is unexpectedly full. err: {:?}", err - )); + )) }; } if client_manager.connect_to_new_rpc_server().await.is_err() { - return Err(format!("Unable to find valid external RPC server, shutting down.")); + return Err(format!("Unable to find valid external RPC server, shutting down.")) }; client_manager.get_subscriptions().await.map_err(|e| { @@ -389,7 +388,7 @@ impl ReconnectingWebsocketWorker { message, "Unable to reconnect, stopping worker." ); - return; + return }, } should_reconnect = false; From 7a18946249f05fefc27599adb3151ccbe35569fc Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 14 Dec 2022 10:08:52 +0100 Subject: [PATCH 33/36] Apply code review suggestions --- .../src/reconnecting_ws_client.rs | 57 ++++++++++++++----- .../src/rpc_client.rs | 2 +- polkadot-parachain/src/service.rs | 1 - 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 923b92ba94b..262111a0dd3 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -1,3 +1,19 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + use cumulus_primitives_core::relay_chain::{ BlockNumber as RelayBlockNumber, Header as RelayHeader, }; @@ -32,7 +48,7 @@ const LOG_TARGET: &str = "reconnecting-websocket-client"; /// Messages for communication between [`ReconnectingWsClient`] and [`ReconnectingWebsocketWorker`]. #[derive(Debug)] -pub enum RpcDispatcherMessage { +enum RpcDispatcherMessage { RegisterBestHeadListener(Sender), RegisterImportListener(Sender), RegisterFinalizationListener(Sender), @@ -50,7 +66,7 @@ pub struct ReconnectingWsClient { impl ReconnectingWsClient { /// Create a new websocket client frontend. pub async fn new(urls: Vec, task_manager: &mut TaskManager) -> RelayChainResult { - tracing::info!(target: LOG_TARGET, "Instantiating reconnecting websocket client"); + tracing::debug!(target: LOG_TARGET, "Instantiating reconnecting websocket client"); let (worker, sender) = ReconnectingWebsocketWorker::new(urls).await; @@ -70,7 +86,7 @@ impl ReconnectingWsClient { { let (tx, rx) = futures::channel::oneshot::channel(); - let message = RpcDispatcherMessage::Request(method.to_string(), params, tx); + let message = RpcDispatcherMessage::Request(method.into(), params, tx); self.to_worker_channel.send(message).await.map_err(|err| { RelayChainError::WorkerCommunicationError(format!( "Unable to send message to RPC worker: {}", @@ -88,6 +104,7 @@ impl ReconnectingWsClient { serde_json::from_value(value) .map_err(|_| RelayChainError::GenericError("Unable to deserialize value".to_string())) } + /// Get a stream of new best relay chain headers pub fn get_best_heads_stream(&self) -> Result, RelayChainError> { let (tx, rx) = @@ -215,7 +232,11 @@ impl ClientManager { ) .await .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + tracing::error!( + target: LOG_TARGET, + ?e, + "Unable to open `chain_subscribeAllHeads` subscription." + ); e })?; let best_subscription = self @@ -227,7 +248,11 @@ impl ClientManager { ) .await .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + tracing::error!( + target: LOG_TARGET, + ?e, + "Unable to open `chain_subscribeNewHeads` subscription." + ); e })?; let finalized_subscription = self @@ -239,7 +264,11 @@ impl ClientManager { ) .await .map_err(|e| { - tracing::error!(target: LOG_TARGET, ?e, "Unable to open subscription."); + tracing::error!( + target: LOG_TARGET, + ?e, + "Unable to open `chain_subscribeFinalizedHeads` subscription." + ); e })?; @@ -299,7 +328,7 @@ impl ReconnectingWebsocketWorker { (worker, tx) } - /// Reconnect via [`ClientManager`] and provide new notification streams. + /// Reconnect via [`ClientManager`] and provide new notification streams. async fn handle_reconnect( &mut self, client_manager: &mut ClientManager, @@ -367,7 +396,7 @@ impl ReconnectingWebsocketWorker { return; }; let Ok(mut subscriptions) = client_manager.get_subscriptions().await else { - tracing::error!(target: LOG_TARGET, "Not able to fetch subscription after reconnect."); + tracing::error!(target: LOG_TARGET, "Unable to fetch subscriptions on initial connection."); return; }; @@ -446,8 +475,8 @@ impl ReconnectingWebsocketWorker { tracing::error!(target: LOG_TARGET, "Subscription closed."); should_reconnect = true; }, - Some(Err(err)) => { - tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + Some(Err(error)) => { + tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); should_reconnect = true; }, } @@ -459,8 +488,8 @@ impl ReconnectingWebsocketWorker { tracing::error!(target: LOG_TARGET, "Subscription closed."); should_reconnect = true; }, - Some(Err(err)) => { - tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + Some(Err(error)) => { + tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); should_reconnect = true; }, } @@ -483,8 +512,8 @@ impl ReconnectingWebsocketWorker { tracing::error!(target: LOG_TARGET, "Subscription closed."); should_reconnect = true; }, - Some(Err(err)) => { - tracing::error!(target: LOG_TARGET, ?err, "Error in RPC subscription."); + Some(Err(error)) => { + tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); should_reconnect = true; }, } diff --git a/client/relay-chain-rpc-interface/src/rpc_client.rs b/client/relay-chain-rpc-interface/src/rpc_client.rs index bdf50d50bea..d8ef632b693 100644 --- a/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -126,7 +126,7 @@ impl RelayChainRpcClient { R: DeserializeOwned + std::fmt::Debug, OR: Fn(&RelayChainError), { - self.ws_client.request(method, params.clone()).await.map_err(|err| { + self.ws_client.request(method, params).await.map_err(|err| { trace_error(&err); RelayChainError::RpcCallError(method.to_string()) }) diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index fa2228fa5c7..092c7658113 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -430,7 +430,6 @@ where sc_service::spawn_tasks(sc_service::SpawnTasksParams { rpc_builder, client: client.clone(), - transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, config: parachain_config, From 56b1f7b384c61e401457459e67b209a2cbb48fb9 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 14 Dec 2022 10:57:27 +0100 Subject: [PATCH 34/36] Move building of relay chain interface to `cumulus-client-service` --- Cargo.lock | 10 +++--- .../src/collator_overseer.rs | 18 +++++++--- .../src/reconnecting_ws_client.rs | 7 +++- client/service/Cargo.toml | 4 +++ client/service/src/lib.rs | 35 ++++++++++++++++++- parachain-template/node/Cargo.toml | 3 -- parachain-template/node/src/service.rs | 35 ++----------------- polkadot-parachain/Cargo.toml | 3 -- polkadot-parachain/src/service.rs | 34 ++---------------- 9 files changed, 68 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5ee49cfd39..8ce155eb0f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1783,12 +1783,16 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-pov-recovery", "cumulus-primitives-core", + "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", + "cumulus-relay-chain-minimal-node", "parking_lot 0.12.1", "polkadot-primitives", "sc-client-api", "sc-consensus", "sc-service", + "sc-sysinfo", + "sc-telemetry", "sp-api", "sp-blockchain", "sp-consensus", @@ -6539,10 +6543,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", - "cumulus-relay-chain-minimal-node", - "cumulus-relay-chain-rpc-interface", "frame-benchmarking", "frame-benchmarking-cli", "jsonrpsee", @@ -7768,10 +7769,7 @@ dependencies = [ "cumulus-client-service", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", - "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", - "cumulus-relay-chain-minimal-node", - "cumulus-relay-chain-rpc-interface", "frame-benchmarking", "frame-benchmarking-cli", "futures", diff --git a/client/relay-chain-minimal-node/src/collator_overseer.rs b/client/relay-chain-minimal-node/src/collator_overseer.rs index e0e94c6cf5b..7ed01193026 100644 --- a/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -223,8 +223,13 @@ async fn forward_collator_events( f = finality.next() => { match f { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received finalized block via RPC: #{} ({} -> {})", - header.number, header.parent_hash, header.hash()); + tracing::info!( + target: "minimal-polkadot-node", + "Received finalized block via RPC: #{} ({} -> {})", + header.number, + header.parent_hash, + header.hash() + ); let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_finalized(block_info).await; } @@ -234,8 +239,13 @@ async fn forward_collator_events( i = imports.next() => { match i { Some(header) => { - tracing::info!(target: "minimal-polkadot-node", "Received imported block via RPC: #{} ({} -> {})", - header.number, header.parent_hash, header.hash()); + tracing::info!( + target: "minimal-polkadot-node", + "Received imported block via RPC: #{} ({} -> {})", + header.number, + header.parent_hash, + header.hash() + ); let block_info = BlockInfo { hash: header.hash(), parent_hash: header.parent_hash, number: header.number }; handle.block_imported(block_info).await; } diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 262111a0dd3..8ff0f605282 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -197,7 +197,12 @@ async fn connect_next_available_rpc_server( tracing::debug!(target: LOG_TARGET, starting_position, "Connecting to RPC server."); for (counter, url) in urls.iter().cycle().skip(starting_position).take(urls.len()).enumerate() { let index = (starting_position + counter) % urls.len(); - tracing::info!(target: LOG_TARGET, index, ?url, "Connecting to RPC node",); + tracing::info!( + target: LOG_TARGET, + index, + ?url, + "Trying to connect to next external relaychain node.", + ); if let Ok(ws_client) = WsClientBuilder::default().build(url).await { return Ok((index, Arc::new(ws_client))) }; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e705364a1c8..6642deef06e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -11,6 +11,8 @@ parking_lot = "0.12.1" sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -27,3 +29,5 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-primitives-core = { path = "../../primitives/core" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } +cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } +cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 53277ca5817..737736b34ed 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -18,15 +18,19 @@ //! //! Provides functions for starting a collator node or a normal full node. +use cumulus_client_cli::CollatorOptions; use cumulus_client_consensus_common::ParachainConsensus; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; -use cumulus_relay_chain_interface::RelayChainInterface; +use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; +use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; +use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; use polkadot_primitives::v2::CollatorPair; use sc_client_api::{ Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, UsageProvider, }; use sc_consensus::{import_queue::ImportQueueService, BlockImport}; use sc_service::{Configuration, TaskManager}; +use sc_telemetry::TelemetryWorkerHandle; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::traits::SpawnNamed; @@ -217,3 +221,32 @@ pub fn prepare_node_config(mut parachain_config: Configuration) -> Configuration parachain_config } + +/// Build a relay chain interface. +/// Will return a minimal relay chain node with RPC +/// client or an inprocess node, based on the [`CollatorOptions`] passed in. +pub async fn build_relay_chain_interface( + polkadot_config: Configuration, + parachain_config: &Configuration, + telemetry_worker_handle: Option, + task_manager: &mut TaskManager, + collator_options: CollatorOptions, + hwbench: Option, +) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { + if !collator_options.relay_chain_rpc_urls.is_empty() { + build_minimal_relay_chain_node( + polkadot_config, + task_manager, + collator_options.relay_chain_rpc_urls, + ) + .await + } else { + build_inprocess_relay_chain( + polkadot_config, + parachain_config, + telemetry_worker_handle, + task_manager, + hwbench, + ) + } +} diff --git a/parachain-template/node/Cargo.toml b/parachain-template/node/Cargo.toml index b3085541b56..e26d4b00efc 100644 --- a/parachain-template/node/Cargo.toml +++ b/parachain-template/node/Cargo.toml @@ -67,10 +67,7 @@ cumulus-client-network = { path = "../../client/network" } cumulus-client-service = { path = "../../client/service" } cumulus-primitives-core = { path = "../../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-inprocess-interface = { path = "../../client/relay-chain-inprocess-interface" } cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } -cumulus-relay-chain-rpc-interface = { path = "../../client/relay-chain-rpc-interface" } -cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/parachain-template/node/src/service.rs b/parachain-template/node/src/service.rs index 94868f9813e..2df17e2eb89 100644 --- a/parachain-template/node/src/service.rs +++ b/parachain-template/node/src/service.rs @@ -14,12 +14,11 @@ use cumulus_client_consensus_common::{ }; use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, + StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::ParaId; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; -use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; // Substrate Imports use sc_consensus::ImportQueue; @@ -31,8 +30,6 @@ use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerH use sp_keystore::SyncCryptoStorePtr; use substrate_prometheus_endpoint::Registry; -use polkadot_service::CollatorPair; - /// Native executor type. pub struct ParachainNativeExecutor; @@ -136,32 +133,6 @@ pub fn new_partial( }) } -async fn build_relay_chain_interface( - polkadot_config: Configuration, - parachain_config: &Configuration, - telemetry_worker_handle: Option, - task_manager: &mut TaskManager, - collator_options: CollatorOptions, - hwbench: Option, -) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - if !collator_options.relay_chain_rpc_urls.is_empty() { - build_minimal_relay_chain_node( - polkadot_config, - task_manager, - collator_options.relay_chain_rpc_urls, - ) - .await - } else { - build_inprocess_relay_chain( - polkadot_config, - parachain_config, - telemetry_worker_handle, - task_manager, - hwbench, - ) - } -} - /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. diff --git a/polkadot-parachain/Cargo.toml b/polkadot-parachain/Cargo.toml index f9b7943b4d8..d879c468403 100644 --- a/polkadot-parachain/Cargo.toml +++ b/polkadot-parachain/Cargo.toml @@ -88,9 +88,6 @@ cumulus-client-network = { path = "../client/network" } cumulus-primitives-core = { path = "../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../primitives/parachain-inherent" } cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" } -cumulus-relay-chain-inprocess-interface = { path = "../client/relay-chain-inprocess-interface" } -cumulus-relay-chain-rpc-interface = { path = "../client/relay-chain-rpc-interface" } -cumulus-relay-chain-minimal-node = { path = "../client/relay-chain-minimal-node" } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot-parachain/src/service.rs b/polkadot-parachain/src/service.rs index 092c7658113..b38858c8f3e 100644 --- a/polkadot-parachain/src/service.rs +++ b/polkadot-parachain/src/service.rs @@ -22,16 +22,14 @@ use cumulus_client_consensus_common::{ }; use cumulus_client_network::BlockAnnounceValidator; use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, + build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, + StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::{ relay_chain::v2::{Hash as PHash, PersistedValidationData}, ParaId, }; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; -use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; -use polkadot_service::CollatorPair; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use sp_core::Pair; use jsonrpsee::RpcModule; @@ -301,32 +299,6 @@ where Ok(params) } -async fn build_relay_chain_interface( - polkadot_config: Configuration, - parachain_config: &Configuration, - telemetry_worker_handle: Option, - task_manager: &mut TaskManager, - collator_options: CollatorOptions, - hwbench: Option, -) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { - if !collator_options.relay_chain_rpc_urls.is_empty() { - build_minimal_relay_chain_node( - polkadot_config, - task_manager, - collator_options.relay_chain_rpc_urls, - ) - .await - } else { - build_inprocess_relay_chain( - polkadot_config, - parachain_config, - telemetry_worker_handle, - task_manager, - hwbench, - ) - } -} - /// Start a shell node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api for shell nodes. From b065a76f3779cc6a75af037e61ec5c70ceea0323 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 14 Dec 2022 16:36:00 +0100 Subject: [PATCH 35/36] Refactoring to not push back into channel --- .../src/reconnecting_ws_client.rs | 101 ++++++++---------- 1 file changed, 46 insertions(+), 55 deletions(-) diff --git a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index 8ff0f605282..3414dd652c6 100644 --- a/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -23,8 +23,9 @@ use futures::{ mpsc::{Receiver, Sender}, oneshot::Sender as OneshotSender, }, + future::BoxFuture, stream::FuturesUnordered, - StreamExt, + FutureExt, StreamExt, }; use jsonrpsee::{ core::{ @@ -37,9 +38,9 @@ use jsonrpsee::{ }; use lru::LruCache; use polkadot_service::TaskManager; -use std::{future::Future, num::NonZeroUsize, sync::Arc}; +use std::{num::NonZeroUsize, sync::Arc}; use tokio::sync::mpsc::{ - channel as tokio_channel, error::TryRecvError, Receiver as TokioReceiver, Sender as TokioSender, + channel as tokio_channel, Receiver as TokioReceiver, Sender as TokioSender, }; use url::Url; @@ -147,9 +148,6 @@ struct ReconnectingWebsocketWorker { /// Communication channel with the RPC client client_receiver: TokioReceiver, - /// Communication channel with ourself - self_sender: TokioSender, - /// Senders to distribute incoming header notifications to imported_header_listeners: Vec>, finalized_header_listeners: Vec>, @@ -292,7 +290,7 @@ impl ClientManager { method: String, params: ArrayParams, response_sender: OneshotSender>, - ) -> impl Future> { + ) -> BoxFuture<'static, Result<(), RpcDispatcherMessage>> { let future_client = self.active_client.clone(); async move { let resp = future_client.request(&method, params.clone()).await; @@ -313,9 +311,15 @@ impl ClientManager { } Ok(()) } + .boxed() } } +enum ConnectionStatus { + Connected, + ReconnectRequired(Option), +} + impl ReconnectingWebsocketWorker { /// Create new worker. Returns the worker and a channel to register new listeners. async fn new( @@ -325,7 +329,6 @@ impl ReconnectingWebsocketWorker { let worker = ReconnectingWebsocketWorker { ws_urls: urls, client_receiver: rx, - self_sender: tx.clone(), imported_header_listeners: Vec::new(), finalized_header_listeners: Vec::new(), best_header_listeners: Vec::new(), @@ -338,48 +341,37 @@ impl ReconnectingWebsocketWorker { &mut self, client_manager: &mut ClientManager, pending_requests: &mut FuturesUnordered< - impl Future>, + BoxFuture<'static, Result<(), RpcDispatcherMessage>>, >, + first_failed_request: Option, ) -> Result { - let mut tmp_request_storage = Vec::new(); - loop { - // Drain the incoming request channel to insert retrying requests at the front later - match self.client_receiver.try_recv() { - Ok(val) => tmp_request_storage.push(val), - Err(TryRecvError::Empty) => break, - Err(_) => - return Err("Can not fetch values from client receiver channel.".to_string()), - } + let mut requests_to_retry = Vec::new(); + if let Some(req @ RpcDispatcherMessage::Request(_, _, _)) = first_failed_request { + requests_to_retry.push(req); } // At this point, all pending requests will return an error since the // websocket connection is dead. So draining the pending requests should be fast. while !pending_requests.is_empty() { if let Some(Err(req)) = pending_requests.next().await { - // Put the failed requests into the queue again, they will be processed - // once we have a new rpc server to connect to. - if let Err(err) = self.self_sender.try_send(req) { - return Err(format!( - "Unable to retry requests, queue is unexpectedly full. err: {:?}", - err - )) - }; + requests_to_retry.push(req); } } - for item in tmp_request_storage.into_iter() { - if let Err(err) = self.self_sender.try_send(item) { - return Err(format!( - "Unable to retry requests, queue is unexpectedly full. err: {:?}", - err - )) - }; - } - if client_manager.connect_to_new_rpc_server().await.is_err() { return Err(format!("Unable to find valid external RPC server, shutting down.")) }; + for item in requests_to_retry.into_iter() { + if let RpcDispatcherMessage::Request(method, params, response_sender) = item { + pending_requests.push(client_manager.create_request( + method, + params, + response_sender, + )); + }; + } + client_manager.get_subscriptions().await.map_err(|e| { format!("Not able to create streams from newly connected RPC server, shutting down. err: {:?}", e) }) @@ -407,12 +399,19 @@ impl ReconnectingWebsocketWorker { let mut imported_blocks_cache = LruCache::new(NonZeroUsize::new(40).expect("40 is nonzero; qed.")); - let mut should_reconnect = false; + let mut should_reconnect = ConnectionStatus::Connected; let mut last_seen_finalized_num: RelayBlockNumber = 0; loop { // This branch is taken if the websocket connection to the current RPC server is closed. - if should_reconnect { - match self.handle_reconnect(&mut client_manager, &mut pending_requests).await { + if let ConnectionStatus::ReconnectRequired(maybe_failed_request) = should_reconnect { + match self + .handle_reconnect( + &mut client_manager, + &mut pending_requests, + maybe_failed_request, + ) + .await + { Ok(new_subscriptions) => { subscriptions = new_subscriptions; }, @@ -425,8 +424,8 @@ impl ReconnectingWebsocketWorker { return }, } - should_reconnect = false; - }; + should_reconnect = ConnectionStatus::Connected; + } tokio::select! { evt = self.client_receiver.recv() => match evt { @@ -449,15 +448,7 @@ impl ReconnectingWebsocketWorker { }, should_retry = pending_requests.next(), if !pending_requests.is_empty() => { if let Some(Err(req)) = should_retry { - if let Err(err) = self.self_sender.try_send(req) { - tracing::error!( - target: LOG_TARGET, - ?err, - "Unable to retry requests, queue is unexpectedly full." - ); - return; - }; - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(Some(req)); } }, import_event = subscriptions.import_subscription.next() => { @@ -478,11 +469,11 @@ impl ReconnectingWebsocketWorker { }, None => { tracing::error!(target: LOG_TARGET, "Subscription closed."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, Some(Err(error)) => { tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, } }, @@ -491,11 +482,11 @@ impl ReconnectingWebsocketWorker { Some(Ok(header)) => distribute_header(header, &mut self.best_header_listeners), None => { tracing::error!(target: LOG_TARGET, "Subscription closed."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, Some(Err(error)) => { tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, } } @@ -515,11 +506,11 @@ impl ReconnectingWebsocketWorker { }, None => { tracing::error!(target: LOG_TARGET, "Subscription closed."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, Some(Err(error)) => { tracing::error!(target: LOG_TARGET, ?error, "Error in RPC subscription."); - should_reconnect = true; + should_reconnect = ConnectionStatus::ReconnectRequired(None); }, } } From 6431473308021d53c7c1d14c213fcc4e47fb3667 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 15 Dec 2022 10:12:18 +0100 Subject: [PATCH 36/36] FMT --- client/service/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 737736b34ed..82ce3ce4872 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -21,9 +21,9 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_consensus_common::ParachainConsensus; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; +use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; use cumulus_relay_chain_interface::{RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_minimal_node::build_minimal_relay_chain_node; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; use polkadot_primitives::v2::CollatorPair; use sc_client_api::{ Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, UsageProvider,