From bd8556680c39b68f4c579d2e18d87ef8f2e464dd Mon Sep 17 00:00:00 2001 From: Aleksandr Petrosyan Date: Wed, 22 Mar 2023 10:35:27 +0400 Subject: [PATCH] [fix] #3031: Fix the UI/UX of missing configuration parameters Signed-off-by: Aleksandr Petrosyan --- Cargo.lock | 20 + cli/src/lib.rs | 1075 +++++++++++++------------- cli/src/main.rs | 75 +- config/Cargo.toml | 1 + config/base/Cargo.toml | 1 + config/base/derive/src/documented.rs | 8 +- config/base/derive/src/proxy.rs | 13 +- config/base/derive/src/view.rs | 2 +- config/base/src/lib.rs | 116 ++- config/src/client.rs | 64 +- config/src/iroha.rs | 48 +- config/src/lib.rs | 1 - config/src/path.rs | 24 +- core/src/smartcontracts/wasm.rs | 2 +- crypto/src/lib.rs | 2 +- data_model/derive/src/api.rs | 1 + 16 files changed, 815 insertions(+), 638 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da45af262d4..5fd5dd715e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2056,6 +2056,7 @@ dependencies = [ "iroha_data_model", "iroha_primitives", "json5", + "path-absolutize", "proptest", "serde", "serde_json", @@ -2072,6 +2073,7 @@ version = "2.0.0-pre-rc.13" dependencies = [ "crossbeam", "iroha_config_derive", + "iroha_crypto", "json5", "serde", "serde_json", @@ -3033,6 +3035,24 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +[[package]] +name = "path-absolutize" +version = "3.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f1d4993b16f7325d90c18c3c6a3327db7808752db8d208cea0acee0abd52c52" +dependencies = [ + "path-dedot", +] + +[[package]] +name = "path-dedot" +version = "3.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a81540d94551664b72b72829b12bd167c73c9d25fbac0e04fafa8023f7e4901" +dependencies = [ + "once_cell", +] + [[package]] name = "percent-encoding" version = "2.2.0" diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 67fe8d99edb..92e0884f3a0 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -5,38 +5,41 @@ //! `Iroha` is the main instance of the peer program. `Arguments` //! should be constructed externally: (see `main.rs`). #![allow( - clippy::arithmetic_side_effects, - clippy::std_instead_of_core, - clippy::std_instead_of_alloc + clippy::arithmetic_side_effects, + clippy::std_instead_of_core, + clippy::std_instead_of_alloc )] use core::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use color_eyre::eyre::{eyre, Result, WrapErr}; -use eyre::ContextCompat as _; +use color_eyre::{ + eyre::{eyre, Result, WrapErr}, + Section, +}; use iroha_actor::{broker::*, prelude::*}; use iroha_config::{ - base::proxy::{LoadFromDisk, LoadFromEnv, Override}, - iroha::{Configuration, ConfigurationProxy}, - path::Path as ConfigPath, + base::proxy::{LoadFromDisk, LoadFromEnv, Override}, + iroha::{Configuration, ConfigurationProxy}, + path::Path as ConfigPath, }; use iroha_core::{ - block_sync::BlockSynchronizer, - handler::ThreadHandler, - kura::Kura, - prelude::{World, WorldStateView}, - queue::Queue, - sumeragi::Sumeragi, - tx::{PeerId, TransactionValidator}, - IrohaNetwork, + block_sync::BlockSynchronizer, + handler::ThreadHandler, + kura::Kura, + prelude::{World, WorldStateView}, + queue::Queue, + sumeragi::Sumeragi, + tx::{PeerId, TransactionValidator}, + IrohaNetwork, }; use iroha_data_model::prelude::*; -use iroha_genesis::{GenesisNetwork, GenesisNetworkTrait, RawGenesisBlock}; +use iroha_genesis::GenesisNetwork; use iroha_p2p::network::NetworkBaseRelayOnlinePeers; +use owo_colors::OwoColorize as _; use tokio::{ - signal, - sync::{broadcast, Notify}, - task, + signal, + sync::{broadcast, Notify}, + task, }; use torii::Torii; @@ -50,570 +53,574 @@ pub mod torii; /// constructed. #[derive(Debug)] pub struct Arguments { - /// Set this flag on the peer that should submit genesis on the network initial start. - pub submit_genesis: bool, - /// Set custom genesis file path. `None` if `submit_genesis` set to `false`. - pub genesis_path: Option, - /// Set custom config file path. - pub config_path: ConfigPath, + /// Set this flag on the peer that should submit genesis on the network initial start. + pub submit_genesis: bool, + /// Set custom genesis file path. `None` if `submit_genesis` set to `false`. + pub genesis_path: Option, + /// Set custom config file path. + pub config_path: ConfigPath, } /// Default configuration path static CONFIGURATION_PATH: once_cell::sync::Lazy<&'static std::path::Path> = - once_cell::sync::Lazy::new(|| std::path::Path::new("config")); + once_cell::sync::Lazy::new(|| std::path::Path::new("config")); /// Default genesis path static GENESIS_PATH: once_cell::sync::Lazy<&'static std::path::Path> = - once_cell::sync::Lazy::new(|| std::path::Path::new("genesis")); + once_cell::sync::Lazy::new(|| std::path::Path::new("genesis")); impl Default for Arguments { - fn default() -> Self { - Self { - submit_genesis: false, - genesis_path: Some(ConfigPath::default(&GENESIS_PATH)), - config_path: ConfigPath::default(&CONFIGURATION_PATH), - } - } + fn default() -> Self { + Self { + submit_genesis: false, + genesis_path: Some(ConfigPath::default(&GENESIS_PATH)), + config_path: ConfigPath::default(&CONFIGURATION_PATH), + } + } } /// Iroha is an [Orchestrator](https://en.wikipedia.org/wiki/Orchestration_%28computing%29) of the /// system. It configures, coordinates and manages transactions and queries processing, work of consensus and storage. pub struct Iroha { - /// Queue of transactions - pub queue: Arc, - /// Sumeragi consensus - pub sumeragi: Arc, - /// Kura — block storage - pub kura: Arc, - /// Block synchronization actor - pub block_sync: AlwaysAddr, - /// Torii web server - pub torii: Option, - /// Thread handlers - thread_handlers: Vec, - /// Relay that redirects messages from the network subsystem to core subsystems. - _sumeragi_relay: AlwaysAddr, // TODO: figure out if truly unused. - /// A boolean value indicating whether or not the peers will recieve data from the network. Used in - /// sumeragi testing. - #[cfg(debug_assertions)] - pub freeze_status: Arc, + /// Queue of transactions + pub queue: Arc, + /// Sumeragi consensus + pub sumeragi: Arc, + /// Kura — block storage + pub kura: Arc, + /// Block synchronization actor + pub block_sync: AlwaysAddr, + /// Torii web server + pub torii: Option, + /// Thread handlers + thread_handlers: Vec, + /// Relay that redirects messages from the network subsystem to core subsystems. + _sumeragi_relay: AlwaysAddr, // TODO: figure out if truly unused. + /// A boolean value indicating whether or not the peers will recieve data from the network. Used in + /// sumeragi testing. + #[cfg(debug_assertions)] + pub freeze_status: Arc, } impl Drop for Iroha { - fn drop(&mut self) { - // Drop thread handles first - let _thread_handles = core::mem::take(&mut self.thread_handlers); - } + fn drop(&mut self) { + // Drop thread handles first + let _thread_handles = core::mem::take(&mut self.thread_handlers); + } } struct FromNetworkBaseRelay { - sumeragi: Arc, - broker: Broker, - #[cfg(debug_assertions)] - freeze_status: Arc, + sumeragi: Arc, + broker: Broker, + #[cfg(debug_assertions)] + freeze_status: Arc, } #[async_trait::async_trait] impl Actor for FromNetworkBaseRelay { - async fn on_start(&mut self, ctx: &mut iroha_actor::prelude::Context) { - // to start connections - self.broker.subscribe::(ctx); - self.broker.subscribe::(ctx); - } + async fn on_start(&mut self, ctx: &mut iroha_actor::prelude::Context) { + // to start connections + self.broker.subscribe::(ctx); + self.broker.subscribe::(ctx); + } } #[async_trait::async_trait] impl Handler for FromNetworkBaseRelay { - type Result = (); + type Result = (); - async fn handle(&mut self, msg: NetworkBaseRelayOnlinePeers) { - self.sumeragi.update_online_peers(msg.online_peers); - } + async fn handle(&mut self, msg: NetworkBaseRelayOnlinePeers) { + self.sumeragi.update_online_peers(msg.online_peers); + } } #[async_trait::async_trait] impl Handler for FromNetworkBaseRelay { - type Result = (); - - async fn handle(&mut self, msg: iroha_core::NetworkMessage) -> Self::Result { - use iroha_core::NetworkMessage::*; - - #[cfg(debug_assertions)] - if self.freeze_status.load(Ordering::SeqCst) { - return; - } - - match msg { - SumeragiPacket(data) => { - self.sumeragi.incoming_message(data.into_v1()); - } - BlockSync(data) => self.broker.issue_send(data.into_v1()).await, - Health => {} - } - } + type Result = (); + + async fn handle(&mut self, msg: iroha_core::NetworkMessage) -> Self::Result { + use iroha_core::NetworkMessage::*; + + #[cfg(debug_assertions)] + if self.freeze_status.load(Ordering::SeqCst) { + return; + } + + match msg { + SumeragiPacket(data) => { + self.sumeragi.incoming_message(data.into_v1()); + } + BlockSync(data) => self.broker.issue_send(data.into_v1()).await, + Health => {} + } + } } impl Iroha { - /// To make `Iroha` peer work all actors should be started first. - /// After that moment it you can start it with listening to torii events. - /// - /// # Side effect - /// - Prints welcome message in the log - /// - /// # Errors - /// - Reading genesis from disk - /// - Reading telemetry configs - /// - telemetry setup - /// - Initialization of [`Sumeragi`] - #[allow(clippy::non_ascii_literal)] - pub async fn new(args: &Arguments) -> Result { - let mut config = args - .config_path - .first_existing_path() - .map_or_else( - || { - eprintln!( - "Configuration file not found. Using environment variables as fallback." - ); - ConfigurationProxy::default() - }, - |path| ConfigurationProxy::from_path(&path.as_path()), - ) - .override_with(ConfigurationProxy::from_env()) - .build()?; - - if style::should_disable_color() { - config.disable_panic_terminal_colors = true; - // Remove terminal colors to comply with XDG - // specifications, Rust's conventions as well as remove - // escape codes from logs redirected from STDOUT. If you - // need syntax highlighting, use JSON logging instead. - config.logger.terminal_colors = false; - } - - let telemetry = iroha_logger::init(&config.logger)?; - iroha_logger::info!( - git_commit_sha = env!("VERGEN_GIT_SHA"), - "Hyperledgerいろは2にようこそ!(translation) Welcome to Hyperledger Iroha {}!", - env!("CARGO_PKG_VERSION") - ); - - let genesis = if let Some(genesis_path) = &args.genesis_path { - GenesisNetwork::from_configuration( - args.submit_genesis, - RawGenesisBlock::from_path( - genesis_path - .first_existing_path() - .wrap_err_with(|| { - format!("Genesis block file {genesis_path:?} doesn't exist") - })? - .as_ref(), - )?, - Some(&config.genesis), - &config.sumeragi.transaction_limits, - ) - .wrap_err("Failed to initialize genesis.")? - } else { - None - }; - - Self::with_genesis(genesis, config, Broker::new(), telemetry).await - } - - fn prepare_panic_hook(notify_shutdown: Arc) { - #[cfg(not(feature = "test-network"))] - use std::panic::set_hook; - - // This is a hot-fix for tests - // - // # Problem - // - // When running tests in parallel `std::panic::set_hook()` will be set - // the same for all threads. That means, that panic in one test can - // cause another test shutdown, which we don't want. - // - // # Downside - // - // A downside of this approach is that this panic hook will not work for - // threads created by Iroha itself (e.g. Sumeragi thread). - // - // # TODO - // - // Remove this when all Rust integrations tests will be converted to a - // separate Python tests. - #[cfg(feature = "test-network")] - use thread_local_panic_hook::set_hook; - - set_hook(Box::new(move |info| { - // What clippy suggests is much less readable in this case - #[allow(clippy::option_if_let_else)] - let panic_message = if let Some(message) = info.payload().downcast_ref::<&str>() { - message - } else if let Some(message) = info.payload().downcast_ref::() { - message - } else { - "unspecified" - }; - - let location = info.location().map_or_else( - || "unspecified".to_owned(), - |location| format!("{}:{}", location.file(), location.line()), - ); - - iroha_logger::error!(panic_message, location, "A panic occurred, shutting down"); - - // NOTE: shutdown all currently listening waiters - notify_shutdown.notify_waiters(); - })); - } - - /// Create Iroha with specified broker, config, and genesis. - /// - /// # Errors - /// - Reading telemetry configs - /// - telemetry setup - /// - Initialization of [`Sumeragi`] - #[allow(clippy::too_many_lines)] // This is actually easier to understand as a linear sequence of init statements. - pub async fn with_genesis( - genesis: Option, - config: Configuration, - broker: Broker, - telemetry: Option, - ) -> Result { - if !config.disable_panic_terminal_colors { - if let Err(e) = color_eyre::install() { - let error_message = format!("{e:#}"); - iroha_logger::error!(error = %error_message, "Tried to `color_eyre::install()` twice",); - } - } - let listen_addr = config.torii.p2p_addr.clone(); - iroha_logger::info!(%listen_addr, "Starting peer"); - let network = IrohaNetwork::new( - broker.clone(), - listen_addr, - config.public_key.clone(), - config.network.actor_channel_capacity, - ) - .await - .wrap_err("Unable to start P2P-network")?; - let network_addr = network.start().await; - - let (events_sender, _) = broadcast::channel(10000); - let world = World::with( - [genesis_domain(&config)], - config.sumeragi.trusted_peers.peers.clone(), - ); - - let kura = Kura::new( - config.kura.init_mode, - std::path::Path::new(&config.kura.block_store_path), - config.kura.debug_output_new_blocks, - )?; - let wsv = WorldStateView::from_configuration(config.wsv, world, Arc::clone(&kura)); - - let transaction_validator = TransactionValidator::new(config.sumeragi.transaction_limits); - - // Validate every transaction in genesis block - if let Some(ref genesis) = genesis { - let wsv_clone = wsv.clone(); - - transaction_validator - .validate_every(genesis.iter().cloned(), &wsv_clone) - .wrap_err("Transaction validation failed in genesis block")?; - } - - let block_hashes = kura.init()?; - - let notify_shutdown = Arc::new(Notify::new()); - - let queue = Arc::new(Queue::from_configuration(&config.queue)); - if Self::start_telemetry(telemetry, &config).await? { - iroha_logger::info!("Telemetry started") - } else { - iroha_logger::warn!("Telemetry not started") - } - - let kura_thread_handler = Kura::start(Arc::clone(&kura)); - - let sumeragi = Arc::new( - // TODO: No function needs 10 parameters. It should accept one struct. - Sumeragi::new( - &config.sumeragi, - events_sender.clone(), - wsv, - transaction_validator, - Arc::clone(&queue), - broker.clone(), - Arc::clone(&kura), - network_addr.clone(), - ), - ); - - let freeze_status = Arc::new(AtomicBool::new(false)); - - let sumeragi_relay = FromNetworkBaseRelay { - sumeragi: Arc::clone(&sumeragi), - broker: broker.clone(), - #[cfg(debug_assertions)] - freeze_status: freeze_status.clone(), - } - .start() - .await - .expect_running(); - - let sumeragi_thread_handler = - Sumeragi::initialize_and_start_thread(Arc::clone(&sumeragi), genesis, &block_hashes); - - let block_sync = BlockSynchronizer::from_configuration( - &config.block_sync, - Arc::clone(&sumeragi), - Arc::clone(&kura), - PeerId::new(&config.torii.p2p_addr, &config.public_key), - broker.clone(), - ) - .start() - .await - .expect_running(); - - let torii = Torii::from_configuration( - config.clone(), - Arc::clone(&queue), - events_sender, - Arc::clone(¬ify_shutdown), - Arc::clone(&sumeragi), - Arc::clone(&kura), - ); - - Self::start_listening_signal(Arc::clone(¬ify_shutdown))?; - - Self::prepare_panic_hook(notify_shutdown); - - let torii = Some(torii); - Ok(Self { - queue, - sumeragi, - kura, - block_sync, - torii, - thread_handlers: vec![sumeragi_thread_handler, kura_thread_handler], - _sumeragi_relay: sumeragi_relay, - #[cfg(debug_assertions)] - freeze_status, - }) - } - - /// To make `Iroha` peer work it should be started first. After - /// that moment it will listen for incoming requests and messages. - /// - /// # Errors - /// - Forwards initialisation error. - #[iroha_futures::telemetry_future] - pub async fn start(&mut self) -> Result<()> { - iroha_logger::info!("Starting Iroha"); - self.torii - .take() - .ok_or_else(|| eyre!("Torii is unavailable. Ensure nothing `take`s the Torii instance before this line"))? - .start() - .await - .wrap_err("Failed to start Torii") - } - - /// Starts iroha in separate tokio task. - /// - /// # Errors - /// - Forwards initialisation error. - #[cfg(feature = "test-network")] - pub fn start_as_task(&mut self) -> Result>> { - iroha_logger::info!("Starting Iroha as task"); - let torii = self - .torii - .take() - .ok_or_else(|| eyre!("Peer already started in a different task"))?; - Ok(tokio::spawn(async move { - torii.start().await.wrap_err("Failed to start Torii") - })) - } - - #[cfg(feature = "telemetry")] - async fn start_telemetry( - telemetry: Option<( - iroha_logger::SubstrateTelemetry, - iroha_logger::FutureTelemetry, - )>, - config: &Configuration, - ) -> Result { - #[allow(unused)] - if let Some((substrate_telemetry, telemetry_future)) = telemetry { - #[cfg(feature = "dev-telemetry")] - { - iroha_telemetry::dev::start(&config.telemetry, telemetry_future) - .await - .wrap_err("Failed to setup telemetry for futures")?; - } - iroha_telemetry::ws::start(&config.telemetry, substrate_telemetry) - .await - .wrap_err("Failed to setup telemetry for websocket communication") - } else { - Ok(false) - } - } - - #[cfg(not(feature = "telemetry"))] - async fn start_telemetry( - _telemetry: Option<( - iroha_logger::SubstrateTelemetry, - iroha_logger::FutureTelemetry, - )>, - _config: &Configuration, - ) -> Result { - Ok(false) - } - - #[allow(clippy::redundant_pub_crate)] - fn start_listening_signal(notify_shutdown: Arc) -> Result> { - let (mut sigint, mut sigterm) = signal::unix::signal(signal::unix::SignalKind::interrupt()) - .and_then(|sigint| { - let sigterm = signal::unix::signal(signal::unix::SignalKind::terminate())?; - - Ok((sigint, sigterm)) - }) - .wrap_err("Failed to start listening for OS signals")?; - - let handle = task::spawn(async move { - tokio::select! { - _ = sigint.recv() => { - iroha_logger::info!("SIGINT received, shutting down..."); - }, - _ = sigterm.recv() => { - iroha_logger::info!("SIGTERM received, shutting down..."); - }, - } - - // NOTE: shutdown all currently listening waiters - notify_shutdown.notify_waiters(); - }); - - Ok(handle) - } + fn prepare_panic_hook(notify_shutdown: Arc) { + #[cfg(not(feature = "test-network"))] + use std::panic::set_hook; + + // Hotfix for tests. + // + // # Problem + // + // When running tests in parallel `std::panic::set_hook()` will be set + // the same for all threads. That means, that panic in one test can + // cause another test shutdown, which we don't want. + // + // # Solution drawbacks + // + // A downside of this approach is that this panic hook will not work for + // threads created by Iroha itself (e.g. Sumeragi thread). + // + // # TODO: Remove this when all Rust integrations tests will + // be converted to separate Python tests. + #[cfg(feature = "test-network")] + use thread_local_panic_hook::set_hook; + + set_hook(Box::new(move |info| { + // What clippy suggests is much less readable in this case + #[allow(clippy::option_if_let_else)] + let panic_message = if let Some(message) = info.payload().downcast_ref::<&str>() { + message + } else if let Some(message) = info.payload().downcast_ref::() { + message + } else { + "unspecified" + }; + + let location = info.location().map_or_else( + || "unspecified".to_owned(), + |location| format!("{}:{}", location.file(), location.line()), + ); + + iroha_logger::error!(panic_message, location, "A panic occurred, shutting down"); + + // NOTE: shutdown all currently listening waiters + notify_shutdown.notify_waiters(); + })); + } + + /// Create Iroha with specified broker, config, and genesis. + /// + /// # Errors + /// - Reading telemetry configs + /// - telemetry setup + /// - Initialization of [`Sumeragi`] + #[allow(clippy::too_many_lines)] // This is actually easier to understand as a linear sequence of init statements. + pub async fn with_genesis( + genesis: Option, + config: Configuration, + broker: Broker, + telemetry: Option, + ) -> Result { + if !config.disable_panic_terminal_colors { + if let Err(e) = color_eyre::install() { + let error_message = format!("{e:#}"); + iroha_logger::error!(error = %error_message, "Tried to `color_eyre::install()` twice",); + } + } + let listen_addr = config.torii.p2p_addr.clone(); + iroha_logger::info!(%listen_addr, "Starting peer"); + let network = IrohaNetwork::new( + broker.clone(), + listen_addr, + config.public_key.clone(), + config.network.actor_channel_capacity, + ) + .await + .wrap_err("Unable to start P2P-network")?; + let network_addr = network.start().await; + + let (events_sender, _) = broadcast::channel(10000); + let world = World::with( + [genesis_domain(&config)], + config.sumeragi.trusted_peers.peers.clone(), + ); + + let kura = Kura::new( + config.kura.init_mode, + std::path::Path::new(&config.kura.block_store_path), + config.kura.debug_output_new_blocks, + )?; + let wsv = WorldStateView::from_configuration(config.wsv, world, Arc::clone(&kura)); + + let transaction_validator = TransactionValidator::new(config.sumeragi.transaction_limits); + + // Validate every transaction in genesis block + if let Some(ref genesis) = genesis { + let wsv_clone = wsv.clone(); + + transaction_validator + .validate_every(genesis.iter().cloned(), &wsv_clone) + .wrap_err("Transaction validation failed in genesis block")?; + } + + let block_hashes = kura.init()?; + + let notify_shutdown = Arc::new(Notify::new()); + + let queue = Arc::new(Queue::from_configuration(&config.queue)); + if Self::start_telemetry(telemetry, &config).await? { + iroha_logger::info!("Telemetry started") + } else { + iroha_logger::warn!("Telemetry not started") + } + + let kura_thread_handler = Kura::start(Arc::clone(&kura)); + + let sumeragi = Arc::new( + // TODO: No function needs 10 parameters. It should accept one struct. + Sumeragi::new( + &config.sumeragi, + events_sender.clone(), + wsv, + transaction_validator, + Arc::clone(&queue), + broker.clone(), + Arc::clone(&kura), + network_addr.clone(), + ), + ); + + let freeze_status = Arc::new(AtomicBool::new(false)); + + let sumeragi_relay = FromNetworkBaseRelay { + sumeragi: Arc::clone(&sumeragi), + broker: broker.clone(), + #[cfg(debug_assertions)] + freeze_status: freeze_status.clone(), + } + .start() + .await + .expect_running(); + + let sumeragi_thread_handler = + Sumeragi::initialize_and_start_thread(Arc::clone(&sumeragi), genesis, &block_hashes); + + let block_sync = BlockSynchronizer::from_configuration( + &config.block_sync, + Arc::clone(&sumeragi), + Arc::clone(&kura), + PeerId::new(&config.torii.p2p_addr, &config.public_key), + broker.clone(), + ) + .start() + .await + .expect_running(); + + let torii = Torii::from_configuration( + config.clone(), + Arc::clone(&queue), + events_sender, + Arc::clone(¬ify_shutdown), + Arc::clone(&sumeragi), + Arc::clone(&kura), + ); + + Self::start_listening_signal(Arc::clone(¬ify_shutdown))?; + + Self::prepare_panic_hook(notify_shutdown); + + let torii = Some(torii); + Ok(Self { + queue, + sumeragi, + kura, + block_sync, + torii, + thread_handlers: vec![sumeragi_thread_handler, kura_thread_handler], + _sumeragi_relay: sumeragi_relay, + #[cfg(debug_assertions)] + freeze_status, + }) + } + + /// To make `Iroha` peer work it should be started first. After + /// that moment it will listen for incoming requests and messages. + /// + /// # Errors + /// - Forwards initialisation error. + #[iroha_futures::telemetry_future] + pub async fn start(&mut self) -> Result<()> { + iroha_logger::info!("Starting Iroha"); + self.torii + .take() + .ok_or_else(|| eyre!("Torii is unavailable. Ensure nothing `take`s the Torii instance before this line"))? + .start() + .await + .wrap_err("Failed to start Torii") + } + + /// Starts iroha in separate tokio task. + /// + /// # Errors + /// - Forwards initialisation error. + #[cfg(feature = "test-network")] + pub fn start_as_task(&mut self) -> Result>> { + iroha_logger::info!("Starting Iroha as task"); + let torii = self + .torii + .take() + .ok_or_else(|| eyre!("Peer already started in a different task"))?; + Ok(tokio::spawn(async move { + torii.start().await.wrap_err("Failed to start Torii") + })) + } + + #[cfg(feature = "telemetry")] + async fn start_telemetry( + telemetry: Option<( + iroha_logger::SubstrateTelemetry, + iroha_logger::FutureTelemetry, + )>, + config: &Configuration, + ) -> Result { + #[allow(unused)] + if let Some((substrate_telemetry, telemetry_future)) = telemetry { + #[cfg(feature = "dev-telemetry")] + { + iroha_telemetry::dev::start(&config.telemetry, telemetry_future) + .await + .wrap_err("Failed to setup telemetry for futures")?; + } + iroha_telemetry::ws::start(&config.telemetry, substrate_telemetry) + .await + .wrap_err("Failed to setup telemetry for websocket communication") + } else { + Ok(false) + } + } + + #[cfg(not(feature = "telemetry"))] + async fn start_telemetry( + _telemetry: Option<( + iroha_logger::SubstrateTelemetry, + iroha_logger::FutureTelemetry, + )>, + _config: &Configuration, + ) -> Result { + Ok(false) + } + + #[allow(clippy::redundant_pub_crate)] + fn start_listening_signal(notify_shutdown: Arc) -> Result> { + let (mut sigint, mut sigterm) = signal::unix::signal(signal::unix::SignalKind::interrupt()) + .and_then(|sigint| { + let sigterm = signal::unix::signal(signal::unix::SignalKind::terminate())?; + + Ok((sigint, sigterm)) + }) + .wrap_err("Failed to start listening for OS signals")?; + + let handle = task::spawn(async move { + tokio::select! { + _ = sigint.recv() => { + iroha_logger::info!("SIGINT received, shutting down..."); + }, + _ = sigterm.recv() => { + iroha_logger::info!("SIGTERM received, shutting down..."); + }, + } + + // NOTE: shutdown all currently listening waiters + notify_shutdown.notify_waiters(); + }); + + Ok(handle) + } +} + +/// Combine configs from as many sources as are available. +/// +/// # Errors +/// If a necessary field isn't specified or specified with an error in +/// the overriding structure, report an error +pub fn combine_configs(args: &Arguments) -> Result { + let config = { + let colors = style::Styling::new(); + let config = args + .config_path + .first_existing_path() + .map_or_else( + || { + eprintln!( + "Configuration file not found at {}.\n{}\n", + args.config_path.style(colors.highlight), + "Falling back to environment variables".style(colors.highlight), + ); + ConfigurationProxy::default() + }, + |path| ConfigurationProxy::from_path(&path.as_path()), + ) + .override_with(ConfigurationProxy::from_env()) + .build(); + + match config { + Ok(mut config) => { + if style::should_disable_color() { + if !config.disable_panic_terminal_colors { + println!("Overriding config: disabling panic colors."); + config.disable_panic_terminal_colors = true; + } + + // Remove terminal colors to comply with XDG + // specifications, Rust's conventions as well as remove + // escape codes from logs redirected from STDOUT. If you + // need syntax highlighting, use JSON logging instead. + if config.logger.terminal_colors { + println!("Overriding config: disabling log colors."); + config.logger.terminal_colors = false; + } + } + config + } + Err(e) => { + use iroha_config::base::derive::Error::*; + + color_eyre::install()?; + + return Err(match e { + UnknownField(_) => eyre!(e) + .wrap_err("Failed to combine configurations.") + .suggestion("Double check the spelling of fields"), + MissingField { message, .. } | ProvidedInferredField { message, .. } => { + eyre!(e) + .wrap_err("Failed to combine configurations.") + .suggestion(message) + } + InsaneValue { ref message, .. } => { + let msg = message.clone(); + eyre!(e) + .wrap_err("Failed to combine configurations") + .suggestion(msg) + } + _ => eyre!(e).wrap_err("Failed to combine configurations."), + }); + } + } + }; + Ok(config) } fn genesis_account(public_key: iroha_crypto::PublicKey) -> Account { - Account::new(AccountId::genesis(), [public_key]).build() + Account::new(AccountId::genesis(), [public_key]).build() } fn genesis_domain(configuration: &Configuration) -> Domain { - let account_public_key = &configuration.genesis.account_public_key; - let mut domain = Domain::new(DomainId::genesis()).build(); + let account_public_key = &configuration.genesis.account_public_key; + let mut domain = Domain::new(DomainId::genesis()).build(); - domain.accounts.insert( - ::Id::genesis(), - genesis_account(account_public_key.clone()), - ); + domain.accounts.insert( + ::Id::genesis(), + genesis_account(account_public_key.clone()), + ); - domain + domain } pub mod style { - //! Style and colouration of Iroha CLI outputs. - use owo_colors::{OwoColorize, Style}; - - /// Styling information set at run-time for pretty-printing with colour - #[derive(Clone, Copy, Debug)] - pub struct Styling { - /// Positive highlight - pub positive: Style, - /// Negative highlight. Usually error message. - pub negative: Style, - /// Neutral highlight - pub highlight: Style, - /// Minor message - pub minor: Style, - } - - impl Default for Styling { - fn default() -> Self { - Self { - positive: Style::new().green().bold(), - negative: Style::new().red().bold(), - highlight: Style::new().bold(), - minor: Style::new().green(), - } - } - } - - /// Determine if message colourisation is to be enabled - pub fn should_disable_color() -> bool { - supports_color::on(supports_color::Stream::Stdout).is_none() - || std::env::var("TERMINAL_COLORS") - .map(|s| !s.as_str().parse().unwrap_or(true)) - .unwrap_or(false) - } - - impl Styling { - #[must_use] - /// Constructor - pub fn new() -> Self { - if should_disable_color() { - Self::no_color() - } else { - Self::default() - } - } - - fn no_color() -> Self { - Self { - positive: Style::new(), - negative: Style::new(), - highlight: Style::new(), - minor: Style::new(), - } - } - - /// Produce documentation for argument group - pub fn or(&self, arg_group: &[&str; 2]) -> String { - format!( - "`{}` (short `{}`)", - arg_group[0].style(self.positive), - arg_group[1].style(self.minor) - ) - } - - /// Convenience method for ".json or .json5" pattern - pub fn with_json_file_ext(&self, name: &str) -> String { - let json = format!("{name}.json"); - let json5 = format!("{name}.json5"); - format!( - "`{}` or `{}`", - json.style(self.highlight), - json5.style(self.highlight) - ) - } - } + //! Style and colouration of Iroha CLI outputs. + use owo_colors::{OwoColorize, Style}; + + /// Styling information set at run-time for pretty-printing with colour + #[derive(Clone, Copy, Debug)] + pub struct Styling { + /// Positive highlight + pub positive: Style, + /// Negative highlight. Usually error message. + pub negative: Style, + /// Neutral highlight + pub highlight: Style, + /// Minor message + pub minor: Style, + } + + impl Default for Styling { + fn default() -> Self { + Self { + positive: Style::new().green().bold(), + negative: Style::new().red().bold(), + highlight: Style::new().bold(), + minor: Style::new().green(), + } + } + } + + /// Determine if message colourisation is to be enabled + pub fn should_disable_color() -> bool { + supports_color::on(supports_color::Stream::Stdout).is_none() + || std::env::var("TERMINAL_COLORS") + .map(|s| !s.as_str().parse().unwrap_or(true)) + .unwrap_or(false) + } + + impl Styling { + #[must_use] + /// Constructor + pub fn new() -> Self { + if should_disable_color() { + Self::no_color() + } else { + Self::default() + } + } + + fn no_color() -> Self { + Self { + positive: Style::new(), + negative: Style::new(), + highlight: Style::new(), + minor: Style::new(), + } + } + + /// Produce documentation for argument group + pub fn or(&self, arg_group: &[&str; 2]) -> String { + format!( + "`{}` (short `{}`)", + arg_group[0].style(self.positive), + arg_group[1].style(self.minor) + ) + } + + /// Convenience method for ".json or .json5" pattern + pub fn with_json_file_ext(&self, name: &str) -> String { + let json = format!("{name}.json"); + let json5 = format!("{name}.json5"); + format!( + "`{}` or `{}`", + json.style(self.highlight), + json5.style(self.highlight) + ) + } + } } #[cfg(not(feature = "test-network"))] #[cfg(test)] mod tests { - use std::{iter::repeat, panic, thread}; - - use futures::future::join_all; - use serial_test::serial; - - use super::*; - - #[allow(clippy::panic, clippy::print_stdout)] - #[tokio::test] - #[serial] - async fn iroha_should_notify_on_panic() { - let notify = Arc::new(Notify::new()); - let hook = panic::take_hook(); - ::prepare_panic_hook(Arc::clone(¬ify)); - let waiters: Vec<_> = repeat(()).take(10).map(|_| Arc::clone(¬ify)).collect(); - let handles: Vec<_> = waiters.iter().map(|waiter| waiter.notified()).collect(); - thread::spawn(move || { - panic!("Test panic"); - }); - join_all(handles).await; - panic::set_hook(hook); - } + use std::{iter::repeat, panic, thread}; + + use futures::future::join_all; + use serial_test::serial; + + use super::*; + + #[allow(clippy::panic, clippy::print_stdout)] + #[tokio::test] + #[serial] + async fn iroha_should_notify_on_panic() { + let notify = Arc::new(Notify::new()); + let hook = panic::take_hook(); + ::prepare_panic_hook(Arc::clone(¬ify)); + let waiters: Vec<_> = repeat(()).take(10).map(|_| Arc::clone(¬ify)).collect(); + let handles: Vec<_> = waiters.iter().map(|waiter| waiter.notified()).collect(); + thread::spawn(move || { + panic!("Test panic"); + }); + join_all(handles).await; + panic::set_hook(hook); + } } diff --git a/cli/src/main.rs b/cli/src/main.rs index 35445603f23..b4441b13755 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -2,8 +2,10 @@ #![allow(clippy::print_stdout)] use std::env; +use color_eyre::eyre::WrapErr as _; use iroha::style::Styling; use iroha_config::path::Path as ConfigPath; +use iroha_genesis::{GenesisNetwork, GenesisNetworkTrait as _, RawGenesisBlock}; use owo_colors::OwoColorize as _; const HELP_ARG: [&str; 2] = ["--help", "-h"]; @@ -27,6 +29,17 @@ const REQUIRED_ENV_VARS: [(&str, &str); 7] = [ ]; #[tokio::main] +/// To make `Iroha` peer work all actors should be started first. +/// After that moment it you can start it with listening to torii events. +/// +/// # Side effect +/// - Prints welcome message in the log +/// +/// # Errors +/// - Reading genesis from disk +/// - Reading telemetry configs +/// - telemetry setup +/// - Initialization of [`Sumeragi`] async fn main() -> Result<(), color_eyre::Report> { let styling = Styling::new(); let mut args = iroha::Arguments::default(); @@ -43,7 +56,12 @@ async fn main() -> Result<(), color_eyre::Report> { if env::args().any(|a| SUBMIT_ARG.contains(&a.as_str())) { args.submit_genesis = true; if let Ok(genesis_path) = env::var("IROHA2_GENESIS_PATH") { - args.genesis_path = Some(ConfigPath::user_provided(&genesis_path)?); + args.genesis_path = + Some(ConfigPath::user_provided(&genesis_path) + .map_err(|e| {color_eyre::install().expect("CRITICAL"); e}) + .wrap_err_with( + || + format!("Could not read `{genesis_path}`, which is required, given you requested `--submit-genesis` on the command line."))?); } } else { args.genesis_path = None; @@ -56,6 +74,16 @@ async fn main() -> Result<(), color_eyre::Report> { .any(|group| group.contains(&arg.as_str()))) { print_help(&styling)?; + // WORKAROUND for #2212: because of how `color_eyre` + // works, we need to install the hook before creating any + // instance of `eyre::Report`, otherwise the `eyre` + // default reporting hook is going to be installed + // automatically. + + // This results in a nasty repetition of the + // `color_eyre::install().unwrap()` pattern, which is the + // lesser of two evils + color_eyre::install().expect("CRITICAL"); eyre::bail!( "Unrecognised command-line flag `{}`", arg.style(styling.negative) @@ -64,12 +92,19 @@ async fn main() -> Result<(), color_eyre::Report> { } if let Ok(config_path) = env::var("IROHA2_CONFIG_PATH") { - args.config_path = ConfigPath::user_provided(&config_path)?; + args.config_path = ConfigPath::user_provided(&config_path) + .map_err(|e| { + color_eyre::install().expect("CRITICAL"); + e + }) + .wrap_err_with(|| format!("Failed to parse `{config_path}` as configuration path"))?; } if !args.config_path.exists() { // Require all the fields defined in default `config.json` // to be specified as env vars with their respective prefixes + // TODO: Consider moving these into the + // `iroha::combine_configs` and dependent functions. for var_name in REQUIRED_ENV_VARS { // Rather than short circuit and require the person to fix // the missing env vars one by one, print out the whole @@ -84,7 +119,41 @@ async fn main() -> Result<(), color_eyre::Report> { } } - ::new(&args).await?.start().await?; + let config = iroha::combine_configs(&args)?; + let telemetry = iroha_logger::init(&config.logger)?; + iroha_logger::info!( + git_commit_sha = env!("VERGEN_GIT_SHA"), + "Hyperledgerいろは2にようこそ!(translation) Welcome to Hyperledger Iroha {}!", + env!("CARGO_PKG_VERSION") + ); + + let genesis = if let Some(genesis_path) = &args.genesis_path { + GenesisNetwork::from_configuration( + args.submit_genesis, + RawGenesisBlock::from_path( + genesis_path + .first_existing_path() + .ok_or({ + color_eyre::install().expect("CRITICAL"); + color_eyre::eyre::eyre!("Genesis block file {genesis_path:?} doesn't exist") + })? + .as_ref(), + )?, + Some(&config.genesis), + &config.sumeragi.transaction_limits, + ) + .wrap_err("Failed to initialize genesis.")? + } else { + None + }; + + iroha::Iroha::with_genesis( + genesis, + config, + iroha_actor::broker::Broker::new(), + telemetry, + ) + .await?; Ok(()) } diff --git a/config/Cargo.toml b/config/Cargo.toml index f27ce1db286..d31fcfb4876 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -25,6 +25,7 @@ json5 = "0.4.1" thiserror = "1.0.38" derive_more = "0.99.17" cfg-if = "1.0.0" +path-absolutize = "3.0.14" [dev-dependencies] proptest = "1.0.0" diff --git a/config/base/Cargo.toml b/config/base/Cargo.toml index a7d0ef961ef..de645864c81 100644 --- a/config/base/Cargo.toml +++ b/config/base/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true [dependencies] iroha_config_derive = { path = "derive" } +iroha_crypto = { version = "=2.0.0-pre-rc.13", path = "../../crypto" } serde = { version = "1.0.151", default-features = false, features = ["derive"] } serde_json = "1.0.91" diff --git a/config/base/derive/src/documented.rs b/config/base/derive/src/documented.rs index 91e8d144c3d..3720229cfad 100644 --- a/config/base/derive/src/documented.rs +++ b/config/base/derive/src/documented.rs @@ -193,7 +193,13 @@ fn impl_get_recursive(ast: &StructWithFields) -> proc_macro2::TokenStream { quote! { [stringify!(#ident)] => { serde_json::to_value(&#l_value) - .map_err(|e| ::iroha_config_base::derive::Error::field_error(stringify!(#ident), e))? + .map_err( + |error| + ::iroha_config_base::derive::Error::FieldDeserialization { + field: stringify!(#ident), + error + } + )? } #inner_thing2 } diff --git a/config/base/derive/src/proxy.rs b/config/base/derive/src/proxy.rs index 9b710e25a68..16459d3da0b 100644 --- a/config/base/derive/src/proxy.rs +++ b/config/base/derive/src/proxy.rs @@ -92,7 +92,7 @@ pub fn impl_load_from_env(ast: &StructWithFields) -> TokenStream { false }; let err_ty = quote! { ::iroha_config_base::derive::Error }; - let err_variant = quote! { ::iroha_config_base::derive::Error::SerdeError }; + let err_variant = quote! { ::iroha_config_base::derive::Error::Json5 }; let inner = if is_string { quote! { Ok(var) } } else if as_str_attr { @@ -152,8 +152,8 @@ pub fn impl_load_from_disk(ast: &StructWithFields) -> TokenStream { let proxy_name = &ast.ident; let disk_trait = quote! { ::iroha_config_base::proxy::LoadFromDisk }; let error_ty = quote! { ::iroha_config_base::derive::Error }; - let disk_err_variant = quote! { ::iroha_config_base::derive::Error::DiskError }; - let serde_err_variant = quote! { ::iroha_config_base::derive::Error::SerdeError }; + let disk_err_variant = quote! { ::iroha_config_base::derive::Error::Disk }; + let serde_err_variant = quote! { ::iroha_config_base::derive::Error::Json5 }; let none_proxy = gen_none_fields_proxy(ast); quote! { impl #disk_trait for #proxy_name { @@ -243,21 +243,22 @@ pub fn impl_build(ast: &StructWithFields) -> TokenStream { fn gen_none_fields_check(ast: &StructWithFields) -> proc_macro2::TokenStream { let checked_fields = ast.fields.iter().map(|field| { let ident = &field.ident; - let err_variant = quote! { ::iroha_config_base::derive::Error::ProxyBuildError }; + let missing_field = quote! { ::iroha_config_base::derive::Error::MissingField }; if field.has_inner { let inner_ty = get_inner_type("Option", &field.ty); let builder_trait = quote! { ::iroha_config_base::proxy::Builder }; quote! { #ident: <#inner_ty as #builder_trait>::build( self.#ident.ok_or( - #err_variant(stringify!(#ident).to_owned()) + #missing_field{field: stringify!(#ident), message: ""} )? )? } } else { quote! { #ident: self.#ident.ok_or( - #err_variant(stringify!(#ident).to_owned()))? + #missing_field{field: stringify!(#ident), message: ""} + )? } } }); diff --git a/config/base/derive/src/view.rs b/config/base/derive/src/view.rs index 674ef3f8503..05d954c12eb 100644 --- a/config/base/derive/src/view.rs +++ b/config/base/derive/src/view.rs @@ -123,7 +123,7 @@ mod gen { const _: () = { use iroha_config_base::view::NoView; #( - const _: () = assert!(!iroha_config_base::view::IsHasView::<#field_types>::IS_HAS_VIEW, #messages); + const _: () = assert!(!iroha_config_base::view::IsInstanceHasView::<#field_types>::IS_HAS_VIEW, #messages); )* }; } diff --git a/config/base/src/lib.rs b/config/base/src/lib.rs index f1649a61fa1..c0e5514f3ad 100644 --- a/config/base/src/lib.rs +++ b/config/base/src/lib.rs @@ -1,5 +1,4 @@ //! Package for managing iroha configuration -#![allow(clippy::std_instead_of_core)] use std::{fmt::Debug, path::Path}; use serde::{de::DeserializeOwned, Deserialize, Deserializer, Serialize}; @@ -300,16 +299,10 @@ pub mod derive { use serde::Deserialize; use thiserror::Error; - /// Error related to deserializing specific field - #[derive(Debug, Error)] - #[error("Name of the field: {}", .field)] - pub struct FieldError { - /// Field name (known at compile time) - pub field: &'static str, - /// Serde json error - #[source] - pub error: serde_json::Error, - } + // TODO: use VERGEN to point to LTS reference on LTS branch + /// Reference to the current Dev branch configuration + pub static CONFIG_REFERENCE: &str = + "https://github.com/hyperledger/iroha/blob/iroha2-dev/docs/source/references/config.md"; // TODO: deal with `#[serde(skip)]` /// Derive `Configurable` and `Proxy` error @@ -317,37 +310,68 @@ pub mod derive { #[allow(clippy::enum_variant_names)] pub enum Error { /// Used in [`Documented`] trait for wrong query errors - #[error("Got unknown field: `{}`", Self::concat_error_string(.0))] + #[error("Got unknown field: `{}`", .0.join("."))] UnknownField(Vec), + /// Used in [`Documented`] trait for deserialization errors /// while retaining field info - #[error("Failed to (de)serialize the field: {}", .0.field)] + #[error("Failed to (de)serialize the field: {}", .field)] #[serde(skip)] - FieldError(#[from] FieldError), - /// Used in [`Builder`] trait for build errors - #[error("Proxy failed at build stage due to: {0}")] - ProxyBuildError(String), - /// Used in the [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for file read errors - #[error("Reading file from disk failed: {0}")] + FieldDeserialization { + /// Field name (known at compile time) + field: &'static str, + /// Serde json error + #[source] + error: serde_json::Error, + }, + + /// When a field is missing. + #[error("Please add `{}` to the configuration.", .field)] #[serde(skip)] - DiskError(#[from] std::io::Error), - /// Used in [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for deserialization errors - #[error("Deserializing JSON failed: {0}")] + MissingField { + /// Field name + field: &'static str, + /// Additional message to be added as `color_eyre::suggestion` + message: &'static str, + }, + + /// Key pair creation failed, most likely because the keys don't form a pair + #[error("Key pair creation failed")] + Crypto(#[from] iroha_crypto::Error), + + // IMO this variant should not exist. If the value is inferred, we should only warn people if the inferred value is different from the provided one. + /// Inferred field was provided by accident and we don't want it to be provided, because the value is inferred from other fields + #[error("You should remove the field `{}` as its value is determined by other configuration parameters.", .field)] #[serde(skip)] - SerdeError(#[from] json5::Error), - } + ProvidedInferredField { + /// Field name + field: &'static str, + /// Additional message to be added as `color_eyre::suggestion` + message: &'static str, + }, - impl Error { - /// Construct a field error - pub const fn field_error(field: &'static str, error: serde_json::Error) -> Self { - Self::FieldError(FieldError { field, error }) - } + /// Value that is unacceptable to Iroha was encountered when deserializing the config + #[error("The value {} of {} is wrong. \nPlease change the value.", .value, .field)] + #[serde(skip)] + InsaneValue { + /// The value of the field that's incorrect + value: String, + /// Field name that contains invalid value + field: &'static str, + /// Additional message to be added as `color_eyre::suggestion` + message: String, + // docstring: &'static str, // TODO: Inline the docstring for easy access + }, - /// To be used for [`Self::UnknownField`] variant construction. - #[inline] - pub fn concat_error_string(field: &[String]) -> String { - field.join(".") - } + /// Used in the [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for file read errors + #[error("Reading file from disk failed.")] + #[serde(skip)] + Disk(#[from] std::io::Error), + + /// Used in [`LoadFromDisk`](`crate::proxy::LoadFromDisk`) trait for deserialization errors + #[error("Deserializing JSON failed")] + #[serde(skip)] + Json5(#[from] json5::Error), } } @@ -356,24 +380,27 @@ pub mod runtime_upgrades; pub mod view { //! Module for view related traits and structs - /// Marker trait to set default value `IS_HAS_VIEW` to `false` + /// Marker trait to set default value [`IsInstanceHasView::IS_INSTANCE_HAS_VIEW`] to `false` pub trait NoView { /// [`Self`] doesn't implement [`HasView`] const IS_HAS_VIEW: bool = false; } + impl NoView for T {} /// Marker traits for types for which views are implemented pub trait HasView {} /// Wrapper structure used to check if type implements `[HasView]` - /// If `T` doesn't implement [`HasView`] then `NoView::IS_HAS_VIEW` (`false`) will be used - /// Otherwise `IsHasView::IS_HAS_VIEW` (`true`) from `impl` block will shadow `NoView::IS_HAS_VIEW` - pub struct IsHasView(core::marker::PhantomData); + /// If `T` doesn't implement [`HasView`] then + /// [`NoView::IS_INSTANCE_HAS_VIEW`] (`false`) will be used. + /// Otherwise [`IsInstanceHasView::IS_INSTANCE_HAS_VIEW`] (`true`) + /// from `impl` block will shadow `NoView::IS_INSTANCE_HAS_VIEW` + pub struct IsInstanceHasView(core::marker::PhantomData); - impl IsHasView { + impl IsInstanceHasView { /// `T` implements trait [`HasView`] - pub const IS_HAS_VIEW: bool = true; + pub const IS_INSTANCE_HAS_VIEW: bool = true; } } @@ -382,7 +409,8 @@ pub mod proxy { use super::*; - /// Trait for dynamic and asynchronous configuration via maintenance endpoint for Rust structures + /// Trait for dynamic and asynchronous configuration via + /// maintenance endpoint for Rust structures pub trait Documented: Serialize + DeserializeOwned { /// Error type returned by methods of this trait type Error; @@ -394,6 +422,7 @@ pub mod proxy { fn get_inner_docs() -> String; /// Return the JSON value of a given field + /// /// # Errors /// Fails if field was unknown #[inline] @@ -402,6 +431,7 @@ pub mod proxy { } /// Get documentation of a given field + /// /// # Errors /// Fails if field was unknown #[inline] @@ -409,7 +439,9 @@ pub mod proxy { Self::get_doc_recursive([field]) } - /// Return the JSON value of a given inner field of arbitrary inner depth + /// Return the JSON value of a given inner field of arbitrary + /// inner depth + /// /// # Errors /// Fails if field was unknown fn get_recursive<'tl, T>(&self, inner_field: T) -> Result diff --git a/config/src/client.rs b/config/src/client.rs index 470c00d36d8..6e95fec8c50 100644 --- a/config/src/client.rs +++ b/config/src/client.rs @@ -124,20 +124,30 @@ impl ConfigurationProxy { if let Some(tx_ttl) = self.transaction_time_to_live_ms { // Really small TTL would be detrimental to performance if tx_ttl < TTL_TOO_SMALL_THRESHOLD { - eyre::bail!( - ConfigError::ProxyBuildError("`TRANSACTION_TIME_TO_LIVE_MS`, network throughput may be compromised for values less than {TTL_TOO_SMALL_THRESHOLD}".to_owned()) - ); + eyre::bail!(ConfigError::InsaneValue { + field: "TRANSACTION_TIME_TO_LIVE_MS", + value: tx_ttl.to_string(), + message: format!(", because if it's smaller than {TTL_TOO_SMALL_THRESHOLD}, Iroha wouldn't be able to produce blocks on time.") + }); } // Timeouts bigger than transaction TTL don't make sense as then transaction would be discarded before this timeout if let Some(timeout) = self.transaction_status_timeout_ms { if timeout > tx_ttl { - eyre::bail!(ConfigError::ProxyBuildError("`TRANSACTION_STATUS_TIMEOUT_MS`: {timeout} bigger than `TRANSACTION_TIME_TO_LIVE_MS`: {self.transaction_status_timeout_ms}. Consider making it smaller".to_owned())); + eyre::bail!(ConfigError::InsaneValue { + field: "TRANSACTION_STATUS_TIMEOUT_MS", + value: timeout.to_string(), + message: format!(", because it should be smaller than `TRANSACTION_TIME_TO_LIVE_MS`, which is {tx_ttl}") + }) } } } if let Some(tx_limits) = self.transaction_limits { if *tx_limits.max_wasm_size_bytes() < WASM_SIZE_TOO_SMALL_THRESHOLD { - eyre::bail!(ConfigError::ProxyBuildError("`TRANSACTION_LIMITS` parameter's `max_wasm_size` field too small at {tx_limits.max_wasm_size_bytes}. Consider making it bigger than {WASM_SIZE_TOO_SMALL_THRESHOLD}".to_owned())); + eyre::bail!(ConfigError::InsaneValue { + field: "TRANSACTION_LIMITS", + value: format!("{}", tx_limits.max_wasm_size_bytes()), + message: String::new() + }); } } if let Some(api_url) = &self.torii_api_url { @@ -147,16 +157,20 @@ impl ConfigurationProxy { Some((protocol, _)) => { // TODO: this is neither robust, nor useful. This should be enforced as a `FromStr` implementation. if protocol != "http" { - eyre::bail!(ConfigError::ProxyBuildError( - "`TORII_API_URL` string: `{api_url}` only supports the `HTTP` protocol currently".to_owned() - )); + eyre::bail!(ConfigError::InsaneValue { + field: "TORII_API_URL", + value: api_url.to_string(), + message: ", because we only support HTTP".to_owned(), + }); } } _ => { - eyre::bail!(ConfigError::ProxyBuildError( - "`TORII_API_URL` string: `{api_url}` should provide a connection protocol" - .to_owned() - )); + eyre::bail!(ConfigError::InsaneValue { + field: "TORII_API_URL", + value: api_url.to_string(), + message: ", because it's missing the connection protocol (e.g. `http://`)" + .to_owned(), + }); } } } @@ -166,21 +180,27 @@ impl ConfigurationProxy { match split { Some((protocol, endpoint)) => { if protocol != "http" { - eyre::bail!(ConfigError::ProxyBuildError( - "`TORII_TELEMETRY_URL` string: `{telemetry_url}` only supports HTTP" - .to_owned() - )); + eyre::bail!(ConfigError::InsaneValue { + value: telemetry_url.to_string(), + field: "TORII_TELEMETRY_URL", + message: ", because we only support HTTP".to_owned(), + }); } if endpoint.split(':').count() != 2 { - eyre::bail!(ConfigError::ProxyBuildError( - "`TORII_TELEMETRY_URL` string: `{telemetry_url}` should provide a connection port, e.g. `http://127.0.0.1:8180`".to_owned() - )); + eyre::bail!(ConfigError::InsaneValue{ + value: telemetry_url.to_string(), + field: "TORII_TELEMETRY_URL", + message: ". You haven't provided a connection port, e.g. `8180` in `http://127.0.0.1:8180`".to_owned(), + }); } } _ => { - eyre::bail!(ConfigError::ProxyBuildError( - "`TORII_TELEMETRY_URL` string: `{telemetry_url}` should provide a connection protocol".to_owned() - )); + eyre::bail!(ConfigError::InsaneValue { + value: telemetry_url.to_string(), + field: "TORII_TELEMETRY_URL", + message: ", because it's missing the connection protocol (e.g. `http://`)" + .to_owned() + }); } } } diff --git a/config/src/iroha.rs b/config/src/iroha.rs index dd5968306ac..d7a9d95dd55 100644 --- a/config/src/iroha.rs +++ b/config/src/iroha.rs @@ -2,7 +2,6 @@ #![allow(clippy::std_instead_of_core)] use std::fmt::Debug; -use eyre::{Result, WrapErr}; use iroha_config_base::derive::{view, Documented, Error as ConfigError, Proxy}; use iroha_crypto::prelude::*; use serde::{Deserialize, Serialize}; @@ -88,48 +87,56 @@ impl ConfigurationProxy { /// # Errors /// - If the relevant uppermost Iroha config fields were not provided. #[allow(clippy::expect_used, clippy::unwrap_in_result)] - pub fn finish(&mut self) -> Result<()> { + pub fn finish(&mut self) -> Result<(), ConfigError> { if let Some(sumeragi_proxy) = &mut self.sumeragi { // First, iroha public/private key and sumeragi keypair are interchangeable, but // the user is allowed to provide only the former, and keypair is generated automatically, // bailing out if key_pair provided in sumeragi no matter its value if sumeragi_proxy.key_pair.is_some() { - eyre::bail!(ConfigError::ProxyBuildError( - "Sumeragi should not be provided with `key_pair` directly as it is instantiated via Iroha config. Please set the `KEY_PAIR` to `null` or omit them entirely." - .to_owned())) + return Err(ConfigError::ProvidedInferredField { + field: "key_pair", + message: "Sumeragi should not be provided with `KEY_PAIR` directly. That value is computed from the other config parameters. Please set the `KEY_PAIR` to `null` or omit entirely." + }); } if let (Some(public_key), Some(private_key)) = (&self.public_key, &self.private_key) { sumeragi_proxy.key_pair = Some(KeyPair::new(public_key.clone(), private_key.clone())?); } else { - eyre::bail!(ConfigError::ProxyBuildError( - "Iroha public and private key not supplied, instantiating `sumeragi` keypair is impossible. Please provide `PRIVATE_KEY` and `PUBLIC_KEY` variables." - .to_owned() - )) + return Err(ConfigError::MissingField { + field: "PUBLIC_KEY and PRIVATE_KEY", + message: "The sumeragi keypair is not provided in the example configuration. It's done this way to ensure you don't re-use the example keys in production, and know how to generate new keys. Please have a look at \n\nhttps://hyperledger.github.io/iroha-2-docs/guide/configure/keys.html\n\nto learn more.\n\n-----", + }); } // Second, torii gateway and sumeragi peer id are interchangeable too; the latter is derived from the // former and overwritten silently in case of difference if let Some(torii_proxy) = &mut self.torii { if sumeragi_proxy.peer_id.is_none() { sumeragi_proxy.peer_id = Some(iroha_data_model::peer::Id::new( - &torii_proxy.p2p_addr.clone().ok_or_else(|| { - eyre::eyre!("Torii `p2p_addr` field has `None` value") - })?, + &torii_proxy + .p2p_addr + .clone() + .ok_or(ConfigError::MissingField { + field: "p2p_addr", + message: + "`p2p_addr` should not be set to `null` or `None` explicitly.", + })?, &self.public_key.clone().expect( "Iroha `public_key` should have been initialized above at the latest", ), )); } else { // TODO: should we just warn the user that this value will be ignored? - eyre::bail!(ConfigError::ProxyBuildError( - "Sumeragi should not be provided with `peer_id` directly. It is computed from the other provided values.".to_owned() - )) + // TODO: Consider eliminating this value from the public API. + return Err(ConfigError::ProvidedInferredField { + field: "PEER_ID", + message: "The `peer_id` is computed from the key and address. You should remove it from the config.", + }); } } else { - eyre::bail!(ConfigError::ProxyBuildError( - "Torii config should have at least `p2p_addr` provided for sumeragi finalisation" - .to_owned() - )) + return Err(ConfigError::MissingField{ + field: "p2p_addr", + message: "Torii config should have at least `p2p_addr` provided for sumeragi finalisation", + }); } // Finally, if trusted peers were not supplied, we can fall back to inserting itself as // the only trusted one @@ -151,10 +158,9 @@ impl ConfigurationProxy { /// - Finalisation fails /// - Building fails, e.g. any of the inner fields had a `None` value when that /// is not allowed by the defaults. - pub fn build(mut self) -> Result { + pub fn build(mut self) -> Result { self.finish()?; ::build(self) - .wrap_err("Failed to build `Configuration` from `ConfigurationProxy`") } } diff --git a/config/src/lib.rs b/config/src/lib.rs index 99b164bb7b4..f24447b4bba 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -1,5 +1,4 @@ //! Aggregate configuration for different Iroha modules. - pub use iroha_config_base as base; use serde::{Deserialize, Serialize}; diff --git a/config/src/path.rs b/config/src/path.rs index 0963b260de4..8cf7c26769e 100644 --- a/config/src/path.rs +++ b/config/src/path.rs @@ -5,6 +5,8 @@ extern crate alloc; use alloc::borrow::Cow; use std::path::PathBuf; +// TODO: replace with `std::fs::absolute` when it's stable. +use path_absolutize::Absolutize as _; use InnerPath::*; /// Allowed configuration file extension that user can provide. @@ -15,13 +17,13 @@ pub const ALLOWED_CONFIG_EXTENSIONS: [&str; 2] = ["json", "json5"]; pub enum ExtensionError { /// User provided config file without extension. #[error( - "Provided config file has no extension, allowed extensions are: {:?}.", + "No valid file extension found. Allowed file extensions are: {:?}.", ALLOWED_CONFIG_EXTENSIONS )] Missing, /// User provided config file with unsupported extension. #[error( - "Provided config file has invalid extension `{0}`, \ + "Provided config file has an unsupported file extension `{0}`, \ allowed extensions are: {:?}.", ALLOWED_CONFIG_EXTENSIONS )] @@ -40,7 +42,7 @@ enum InnerPath { UserProvided(PathBuf), } -/// Wrapper around path to config file (i.e. config.json, genesis.json). +/// Wrapper around path to config file (e.g. `config.json`). /// /// Provides abstraction above user-provided config and default ones. #[derive(Debug, Clone)] @@ -49,8 +51,20 @@ pub struct Path(InnerPath); impl core::fmt::Display for Path { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { - Default(pth) => write!(f, "{pth:?} (default)"), - UserProvided(pth) => write!(f, "{pth:?}"), + Default(pth) => write!( + f, + "{:?} (default)", + pth.with_extension("json") + .absolutize() + .expect("Malformed default path") + ), + UserProvided(pth) => write!( + f, + "{:?} (user-provided)", + pth.with_extension("json") + .absolutize() + .expect("Malformed user-provided path") + ), } } } diff --git a/core/src/smartcontracts/wasm.rs b/core/src/smartcontracts/wasm.rs index 96a42be2c2e..b7db537eb0e 100644 --- a/core/src/smartcontracts/wasm.rs +++ b/core/src/smartcontracts/wasm.rs @@ -438,7 +438,7 @@ impl<'wrld> Runtime<'wrld> { ) -> Result<(), Trap> { const TARGET: &str = "WASM"; - let error_msg = || Trap::new(format!("{}: not a valid log level", log_level)); + let error_msg = || Trap::new(format!("{log_level}: not a valid log level")); let Ok(log_level) = log_level.try_into() else { return Err(error_msg()); }; diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index 6178254d4b2..62e70610947 100755 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -180,7 +180,7 @@ ffi::ffi_item! { } /// Error when dealing with cryptographic functions -#[derive(Debug, Display)] +#[derive(Debug, Display, Deserialize)] pub enum Error { /// Returned when trying to create an algorithm which does not exist #[display(fmt = "Algorithm doesn't exist")] // TODO: which algorithm diff --git a/data_model/derive/src/api.rs b/data_model/derive/src/api.rs index bab205cb8c4..57c11b84ace 100644 --- a/data_model/derive/src/api.rs +++ b/data_model/derive/src/api.rs @@ -129,6 +129,7 @@ fn process_pub_item(input: syn::DeriveInput) -> TokenStream { } }); + #[allow(clippy::arithmetic_side_effects)] let item = quote! { pub union #ident #impl_generics #where_clause { #(#fields),*