diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 536680d5fd..786a28c9e2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -155,32 +155,6 @@ jobs: cargo -Zgitoxide -Zgit clippy --locked --all-targets --features rocm -- -D warnings if: runner.os == 'Windows' - cargo-docs: - runs-on: ${{ fromJson(github.repository_owner == 'autonomys' && '["self-hosted", "ubuntu-22.04-x86-64"]' || '"ubuntu-22.04"') }} - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Install Protoc - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Configure cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }} - restore-keys: | - ${{ runner.os }}-cargo- - - - name: Check Documentation - run: cargo -Zgitoxide -Zgit doc --locked --all --no-deps --lib - env: - RUSTDOCFLAGS: "-D rustdoc::broken-intra-doc-links -D rustdoc::private_intra_doc_links" - cargo-test: strategy: matrix: diff --git a/.github/workflows/rustdoc.yml b/.github/workflows/rustdoc.yml index ef24ad6e08..953b296c63 100644 --- a/.github/workflows/rustdoc.yml +++ b/.github/workflows/rustdoc.yml @@ -4,6 +4,9 @@ on: push: branches: - main + pull_request: + workflow_dispatch: + merge_group: concurrency: group: rustdoc-${{ github.workflow }}-${{ github.ref }} @@ -48,9 +51,9 @@ jobs: # Build the rust crate docs - name: Build Documentation - run: cargo -Zgitoxide -Zgit doc --all --no-deps --lib --features cuda + run: cargo -Zgitoxide -Zgit doc --locked --all --no-deps --lib --features cuda env: - RUSTDOCFLAGS: "-Z unstable-options --enable-index-page" + RUSTDOCFLAGS: "-D rustdoc::broken-intra-doc-links -D rustdoc::private_intra_doc_links -Z unstable-options --enable-index-page" - name: Deploy Docs uses: JamesIves/github-pages-deploy-action@881db5376404c5c8d621010bcbec0310b58d5e29 # v4.6.8 @@ -58,3 +61,4 @@ jobs: branch: gh-pages single-commit: true folder: target/doc + if: ${{ github.event_name == 'push' }} diff --git a/Cargo.lock b/Cargo.lock index e9e94555ea..24779abeb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12576,6 +12576,7 @@ dependencies = [ "anyhow", "async-trait", "futures", + "hex", "parity-scale-codec", "subspace-archiving", "subspace-core-primitives", diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index aab92ac09f..9b9b9c5089 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -53,7 +53,7 @@ use frame_support::ensure; use frame_support::pallet_prelude::StorageVersion; use frame_support::traits::fungible::{Inspect, InspectHold}; use frame_support::traits::tokens::{Fortitude, Preservation}; -use frame_support::traits::{Get, Randomness as RandomnessT}; +use frame_support::traits::{Get, Randomness as RandomnessT, Time}; use frame_support::weights::Weight; use frame_system::offchain::SubmitTransaction; use frame_system::pallet_prelude::*; @@ -86,6 +86,7 @@ use sp_subspace_mmr::{ConsensusChainMmrLeafProof, MmrProofVerifier}; pub use staking::OperatorConfig; use subspace_core_primitives::pot::PotOutput; use subspace_core_primitives::{BlockHash, SlotNumber, U256}; +use subspace_runtime_primitives::{Balance, Moment, StorageFee}; /// Maximum number of nominators to slash within a give operator at a time. pub const MAX_NOMINATORS_TO_SLASH: u32 = 10; @@ -201,7 +202,7 @@ mod pallet { #[cfg(not(feature = "runtime-benchmarks"))] use crate::staking_epoch::do_slash_operator; use crate::staking_epoch::{do_finalize_domain_current_epoch, Error as StakingEpochError}; - use crate::storage_proof::InvalidInherentExtrinsicData; + use crate::storage_proof::InherentExtrinsicData; use crate::weights::WeightInfo; #[cfg(not(feature = "runtime-benchmarks"))] use crate::DomainHashingFor; @@ -248,10 +249,10 @@ mod pallet { use sp_std::fmt::Debug; use sp_subspace_mmr::MmrProofVerifier; use subspace_core_primitives::{Randomness, U256}; - use subspace_runtime_primitives::{Balance, StorageFee}; + use subspace_runtime_primitives::StorageFee; #[pallet::config] - pub trait Config: frame_system::Config> { + pub trait Config: frame_system::Config + From> { type RuntimeEvent: From> + IsType<::RuntimeEvent>; // TODO: `DomainHash` can be derived from `DomainHeader`, it is still needed just for @@ -699,7 +700,7 @@ mod pallet { #[pallet::storage] pub(super) type AccumulatedTreasuryFunds = StorageValue<_, BalanceOf, ValueQuery>; - /// Storage used to keep track of which consensus block the domain runtime upgrade happen. + /// Storage used to keep track of which consensus block each domain runtime upgrade happens in. #[pallet::storage] pub(super) type DomainRuntimeUpgradeRecords = StorageMap< _, @@ -709,8 +710,8 @@ mod pallet { ValueQuery, >; - /// Temporary storage keep track of domain runtime upgrade happen in the current block, cleared - /// in the next block initialization. + /// Temporary storage to keep track of domain runtime upgrades which happened in the parent + /// block. Cleared in the current block's initialization. #[pallet::storage] pub type DomainRuntimeUpgrades = StorageValue<_, Vec, ValueQuery>; @@ -1856,7 +1857,7 @@ mod pallet { /// Combined fraud proof data for the InvalidInherentExtrinsic fraud proof #[pallet::storage] - pub type BlockInvalidInherentExtrinsicData = StorageValue<_, InvalidInherentExtrinsicData>; + pub type BlockInherentExtrinsicData = StorageValue<_, InherentExtrinsicData>; #[pallet::hooks] // TODO: proper benchmark @@ -1865,8 +1866,7 @@ mod pallet { let parent_number = block_number - One::one(); let parent_hash = frame_system::Pallet::::block_hash(parent_number); - // Record any previous domain runtime upgrade in `DomainRuntimeUpgradeRecords` and then do the - // domain runtime upgrade scheduled in the current block + // Record any previous domain runtime upgrades in `DomainRuntimeUpgradeRecords` for runtime_id in DomainRuntimeUpgrades::::take() { let reference_count = RuntimeRegistry::::get(runtime_id) .expect("Runtime object must be present since domain is insantiated; qed") @@ -1883,9 +1883,14 @@ mod pallet { }); } } + // Set DomainRuntimeUpgrades to an empty list. (If there are no runtime upgrades + // scheduled in the current block, we can generate a proof the list is empty.) + DomainRuntimeUpgrades::::set(Vec::new()); + // Do the domain runtime upgrades scheduled in the current block, and record them in + // DomainRuntimeUpgrades do_upgrade_runtimes::(block_number); - // Store the hash of the parent consensus block for domain that have bundles submitted + // Store the hash of the parent consensus block for domains that have bundles submitted // in that consensus block for (domain_id, _) in SuccessfulBundles::::drain() { ConsensusBlockHash::::insert(domain_id, parent_number, parent_hash); @@ -1896,48 +1901,42 @@ mod pallet { } for (operator_id, slot_set) in OperatorBundleSlot::::drain() { - // NOTE: `OperatorBundleSlot` use `BTreeSet` so `last` will return the maximum value in the set + // NOTE: `OperatorBundleSlot` uses `BTreeSet` so `last` will return the maximum + // value in the set if let Some(highest_slot) = slot_set.last() { OperatorHighestSlot::::insert(operator_id, highest_slot); } } - BlockInvalidInherentExtrinsicData::::kill(); + BlockInherentExtrinsicData::::kill(); Weight::zero() } fn on_finalize(_: BlockNumberFor) { - // If this consensus block will derive any domain block, gather the necessary storage for potential fraud proof usage + // If this consensus block will derive any domain block, gather the necessary storage + // for potential fraud proof usage if SuccessfulBundles::::iter_keys().count() > 0 - || DomainRuntimeUpgrades::::exists() + || !DomainRuntimeUpgrades::::get().is_empty() { let extrinsics_shuffling_seed = Randomness::from( - Into::::into(Self::extrinsics_shuffling_seed()).to_fixed_bytes(), + Into::::into(Self::extrinsics_shuffling_seed_value()).to_fixed_bytes(), ); // There are no actual conversions here, but the trait bounds required to prove that // (and debug-print the error in expect()) are very verbose. - let timestamp = T::BlockTimestamp::now() - .try_into() - .map_err(|_| ()) - .expect("Moment is the same type in both pallets; qed"); - let transaction_byte_fee: Balance = T::StorageFee::transaction_byte_fee() - .try_into() - .map_err(|_| ()) - .expect("Balance is the same type in both pallets; qed"); + let timestamp = Self::timestamp_value(); // The value returned by the consensus_chain_byte_fee() runtime API - let consensus_transaction_byte_fee = - sp_domains::DOMAIN_STORAGE_FEE_MULTIPLIER * transaction_byte_fee; + let consensus_transaction_byte_fee = Self::consensus_transaction_byte_fee_value(); - let invalid_inherent_extrinsic_data = InvalidInherentExtrinsicData { + let inherent_extrinsic_data = InherentExtrinsicData { extrinsics_shuffling_seed, timestamp, consensus_transaction_byte_fee, }; - BlockInvalidInherentExtrinsicData::::set(Some(invalid_inherent_extrinsic_data)); + BlockInherentExtrinsicData::::set(Some(inherent_extrinsic_data)); } let _ = LastEpochStakingDistribution::::clear(u32::MAX, None); @@ -2096,11 +2095,17 @@ impl Pallet { Ok(HeadDomainNumber::::get(domain_id) + missed_upgrade.into()) } + /// Returns the runtime ID for the supplied `domain_id`, if that domain exists. pub fn runtime_id(domain_id: DomainId) -> Option { DomainRegistry::::get(domain_id) .map(|domain_object| domain_object.domain_config.runtime_id) } + /// Returns the list of runtime upgrades in the current block. + pub fn runtime_upgrades() -> Vec { + DomainRuntimeUpgrades::::get() + } + pub fn domain_instance_data( domain_id: DomainId, ) -> Option<(DomainInstanceData, BlockNumberFor)> { @@ -2758,12 +2763,66 @@ impl Pallet { false } + /// The external function used to access the extrinsics shuffling seed stored in + /// `BlockInherentExtrinsicData`. pub fn extrinsics_shuffling_seed() -> T::Hash { + // Fall back to recalculating if it hasn't been stored yet. + BlockInherentExtrinsicData::::get() + .map(|data| H256::from(*data.extrinsics_shuffling_seed).into()) + .unwrap_or_else(|| Self::extrinsics_shuffling_seed_value()) + } + + /// The internal function used to calculate the extrinsics shuffling seed for storage into + /// `BlockInherentExtrinsicData`. + fn extrinsics_shuffling_seed_value() -> T::Hash { let subject = DOMAIN_EXTRINSICS_SHUFFLING_SEED_SUBJECT; let (randomness, _) = T::Randomness::random(subject); randomness } + /// The external function used to access the timestamp stored in + /// `BlockInherentExtrinsicData`. + pub fn timestamp() -> Moment { + // Fall back to recalculating if it hasn't been stored yet. + BlockInherentExtrinsicData::::get() + .map(|data| data.timestamp) + .unwrap_or_else(|| Self::timestamp_value()) + } + + /// The internal function used to access the timestamp for storage into + /// `BlockInherentExtrinsicData`. + fn timestamp_value() -> Moment { + // There are no actual conversions here, but the trait bounds required to prove that + // (and debug-print the error in expect()) are very verbose. + T::BlockTimestamp::now() + .try_into() + .map_err(|_| ()) + .expect("Moment is the same type in both pallets; qed") + } + + /// The external function used to access the consensus transaction byte fee stored in + /// `BlockInherentExtrinsicData`. + /// This value is returned by the consensus_chain_byte_fee() runtime API + pub fn consensus_transaction_byte_fee() -> Balance { + // Fall back to recalculating if it hasn't been stored yet. + BlockInherentExtrinsicData::::get() + .map(|data| data.consensus_transaction_byte_fee) + .unwrap_or_else(|| Self::consensus_transaction_byte_fee_value()) + } + + /// The internal function used to calculate the consensus transaction byte fee for storage into + /// `BlockInherentExtrinsicData`. + fn consensus_transaction_byte_fee_value() -> Balance { + // There are no actual conversions here, but the trait bounds required to prove that + // (and debug-print the error in expect()) are very verbose. + let transaction_byte_fee: Balance = T::StorageFee::transaction_byte_fee() + .try_into() + .map_err(|_| ()) + .expect("Balance is the same type in both pallets; qed"); + + sp_domains::DOMAIN_STORAGE_FEE_MULTIPLIER * transaction_byte_fee + } + pub fn execution_receipt(receipt_hash: ReceiptHashFor) -> Option> { BlockTreeNodes::::get(receipt_hash).map(|db| db.execution_receipt) } @@ -2910,7 +2969,7 @@ impl Pallet { } // Get the domain runtime code that used to derive `receipt`, if the runtime code still present in - // the state then get it from the state otherwise from the `maybe_domain_runtime_code_at` prood. + // the state then get it from the state otherwise from the `maybe_domain_runtime_code_at` proof. pub fn get_domain_runtime_code_for_receipt( domain_id: DomainId, receipt: &ExecutionReceiptOf, diff --git a/crates/pallet-domains/src/tests.rs b/crates/pallet-domains/src/tests.rs index 20f937a669..60ac88303d 100644 --- a/crates/pallet-domains/src/tests.rs +++ b/crates/pallet-domains/src/tests.rs @@ -33,10 +33,10 @@ use sp_domains::{ }; use sp_domains_fraud_proof::fraud_proof::FraudProof; use sp_runtime::traits::{ - AccountIdConversion, BlakeTwo256, BlockNumberProvider, Hash as HashT, IdentityLookup, One, + AccountIdConversion, BlakeTwo256, BlockNumberProvider, Hash as HashT, IdentityLookup, One, Zero, }; use sp_runtime::transaction_validity::TransactionValidityError; -use sp_runtime::{BuildStorage, OpaqueExtrinsic, Saturating}; +use sp_runtime::{BuildStorage, OpaqueExtrinsic}; use sp_version::RuntimeVersion; use subspace_core_primitives::U256 as P256; use subspace_runtime_primitives::{HoldIdentifier, Moment, StorageFee, SSC}; @@ -430,14 +430,20 @@ impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { } pub(crate) fn run_to_block(block_number: BlockNumberFor, parent_hash: T::Hash) { - // Finalize previous block - crate::Pallet::::on_finalize(block_number.saturating_sub(One::one())); + // Finalize the previous block + // on_finalize() does not run on the genesis block + if block_number > One::one() { + crate::Pallet::::on_finalize(block_number - One::one()); + } frame_system::Pallet::::finalize(); // Initialize current block frame_system::Pallet::::set_block_number(block_number); frame_system::Pallet::::initialize(&block_number, &parent_hash, &Default::default()); - crate::Pallet::::on_initialize(block_number); + // on_initialize() does not run on the genesis block + if block_number > Zero::zero() { + crate::Pallet::::on_initialize(block_number); + } } pub(crate) fn register_genesis_domain(creator: u128, operator_ids: Vec) -> DomainId { diff --git a/crates/pallet-subspace/src/lib.rs b/crates/pallet-subspace/src/lib.rs index b1dd52d7d4..e193be29e3 100644 --- a/crates/pallet-subspace/src/lib.rs +++ b/crates/pallet-subspace/src/lib.rs @@ -1678,7 +1678,7 @@ fn check_vote( (reward_address, _signature), ) in current_reward_receivers.iter_mut() { - if public_key != &offender { + if public_key == &offender { // Revoke reward if assigned in current block. reward_address.take(); } diff --git a/crates/pallet-subspace/src/tests.rs b/crates/pallet-subspace/src/tests.rs index 611174f79c..66f576514b 100644 --- a/crates/pallet-subspace/src/tests.rs +++ b/crates/pallet-subspace/src/tests.rs @@ -1122,8 +1122,10 @@ fn vote_equivocation_current_voters_duplicate() { // Current block author + slot matches that of the vote let voter_keypair = Keypair::generate(); + let other_voter_keypair = Keypair::generate(); let slot = Subspace::current_slot(); let reward_address = 0; + let other_reward_address = 1; let signed_vote = create_signed_vote( &voter_keypair, @@ -1153,15 +1155,43 @@ fn vote_equivocation_current_voters_duplicate() { map }); + // Insert another voter that is not equivocating + { + let other_signed_vote = create_signed_vote( + &other_voter_keypair, + 2, + frame_system::Pallet::::block_hash(1), + slot, + Default::default(), + Default::default(), + archived_segment, + other_reward_address, + pallet::SolutionRanges::::get().current, + pallet::SolutionRanges::::get().voting_current, + ); + + CurrentBlockVoters::::mutate(|map| { + map.as_mut().unwrap().insert( + ( + PublicKey::from(other_voter_keypair.public.to_bytes()), + other_signed_vote.vote.solution().sector_index, + other_signed_vote.vote.solution().piece_offset, + other_signed_vote.vote.solution().chunk, + slot, + ), + (Some(other_reward_address), other_signed_vote.signature), + ); + }); + } + // Identical vote submitted twice leads to duplicate error assert_err!( super::check_vote::(&signed_vote, true), CheckVoteError::DuplicateVote ); - CurrentBlockVoters::::put({ - let mut map = BTreeMap::new(); - map.insert( + CurrentBlockVoters::::mutate(|map| { + map.as_mut().unwrap().insert( ( PublicKey::from(voter_keypair.public.to_bytes()), signed_vote.vote.solution().sector_index, @@ -1171,14 +1201,17 @@ fn vote_equivocation_current_voters_duplicate() { ), (Some(reward_address), RewardSignature::from([0; 64])), ); - map }); // Different vote for the same sector index and time slot leads to equivocation Subspace::pre_dispatch_vote(&signed_vote).unwrap(); - // Voter doesn't get reward after equivocation - assert_eq!(Subspace::find_voting_reward_addresses().len(), 0); + // Equivocating voter doesn't get reward, but the other voter does + assert_eq!(Subspace::find_voting_reward_addresses().len(), 1); + assert_eq!( + Subspace::find_voting_reward_addresses().first().unwrap(), + &other_reward_address + ); }); } diff --git a/crates/pallet-transaction-fees/src/lib.rs b/crates/pallet-transaction-fees/src/lib.rs index ac31cab436..1ec8b9fac4 100644 --- a/crates/pallet-transaction-fees/src/lib.rs +++ b/crates/pallet-transaction-fees/src/lib.rs @@ -21,11 +21,6 @@ pub mod weights; -#[cfg(not(feature = "std"))] -extern crate alloc; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; use codec::{Codec, Decode, Encode}; use frame_support::sp_runtime::traits::Zero; use frame_support::sp_runtime::SaturatedConversion; diff --git a/crates/sc-consensus-subspace-rpc/src/lib.rs b/crates/sc-consensus-subspace-rpc/src/lib.rs index 6029ec2c41..0dff89e28e 100644 --- a/crates/sc-consensus-subspace-rpc/src/lib.rs +++ b/crates/sc-consensus-subspace-rpc/src/lib.rs @@ -55,11 +55,11 @@ use std::sync::{Arc, Weak}; use std::time::Duration; use subspace_archiving::archiver::NewArchivedSegment; use subspace_core_primitives::hashes::Blake3Hash; -use subspace_core_primitives::objects::GlobalObjectMapping; +use subspace_core_primitives::objects::{GlobalObjectMapping, ObjectMappingResponse}; use subspace_core_primitives::pieces::{Piece, PieceIndex}; use subspace_core_primitives::segments::{HistorySize, SegmentHeader, SegmentIndex}; use subspace_core_primitives::solutions::Solution; -use subspace_core_primitives::{BlockHash, BlockNumber, PublicKey, SlotNumber}; +use subspace_core_primitives::{BlockHash, PublicKey, SlotNumber}; use subspace_erasure_coding::ErasureCoding; use subspace_farmer_components::FarmerProtocolInfo; use subspace_kzg::Kzg; @@ -223,19 +223,6 @@ impl CachedArchivedSegment { } } -/// Response to object mapping subscription, including a block height. -/// Large responses are batched, so the block height can be repeated in different responses. -#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ObjectMappingResponse { - /// The block number that the object mapping is from. - pub block_number: BlockNumber, - - /// The object mappings. - #[serde(flatten)] - pub objects: GlobalObjectMapping, -} - /// Subspace RPC configuration pub struct SubspaceRpcConfig where diff --git a/crates/sp-domains-fraud-proof/src/fraud_proof.rs b/crates/sp-domains-fraud-proof/src/fraud_proof.rs index 149a960d6b..3fc6521f62 100644 --- a/crates/sp-domains-fraud-proof/src/fraud_proof.rs +++ b/crates/sp-domains-fraud-proof/src/fraud_proof.rs @@ -497,9 +497,11 @@ pub struct InvalidExtrinsicsRootProof { /// The combined storage proofs used during verification pub invalid_inherent_extrinsic_proofs: InvalidInherentExtrinsicDataProof, - /// The individual storage proofs used during verification - // TODO: combine these proofs into `InvalidInherentExtrinsicDataProof` - pub invalid_inherent_extrinsic_proof: InvalidInherentExtrinsicProof, + /// A single domain runtime code upgrade (or "not upgraded") storage proof + pub maybe_domain_runtime_upgraded_proof: MaybeDomainRuntimeUpgradedProof, + + /// Storage proof for a change to the chains that are allowed to open a channel with each domain + pub domain_chain_allowlist_proof: DomainChainsAllowlistUpdateStorageProof, /// Optional sudo extrinsic call storage proof pub domain_sudo_call_proof: DomainSudoCallStorageProof, diff --git a/crates/sp-domains-fraud-proof/src/storage_proof.rs b/crates/sp-domains-fraud-proof/src/storage_proof.rs index 899ea77b76..d30864ca65 100644 --- a/crates/sp-domains-fraud-proof/src/storage_proof.rs +++ b/crates/sp-domains-fraud-proof/src/storage_proof.rs @@ -7,10 +7,8 @@ use sp_domains::proof_provider_and_verifier::{ StorageProofVerifier, VerificationError as StorageProofVerificationError, }; use sp_domains::{ - DomainAllowlistUpdates, DomainId, DomainSudoCall, DomainsDigestItem, OpaqueBundle, RuntimeId, - RuntimeObject, + DomainAllowlistUpdates, DomainId, DomainSudoCall, OpaqueBundle, RuntimeId, RuntimeObject, }; -use sp_runtime::generic::Digest; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor}; use sp_runtime_interface::pass_by; use sp_runtime_interface::pass_by::PassBy; @@ -40,7 +38,7 @@ pub enum VerificationError { InvalidInherentExtrinsicStorageProof(StorageProofVerificationError), SuccessfulBundlesStorageProof(StorageProofVerificationError), DomainAllowlistUpdatesStorageProof(StorageProofVerificationError), - BlockDigestStorageProof(StorageProofVerificationError), + DomainRuntimeUpgradesStorageProof(StorageProofVerificationError), RuntimeRegistryStorageProof(StorageProofVerificationError), DigestStorageProof(StorageProofVerificationError), BlockFeesStorageProof(StorageProofVerificationError), @@ -55,7 +53,7 @@ pub enum FraudProofStorageKeyRequest { InvalidInherentExtrinsicData, SuccessfulBundles(DomainId), DomainAllowlistUpdates(DomainId), - BlockDigest, + DomainRuntimeUpgrades, RuntimeRegistry(RuntimeId), DomainSudoCall(DomainId), MmrRoot(Number), @@ -71,7 +69,9 @@ impl FraudProofStorageKeyRequest { Self::DomainAllowlistUpdates(_) => { VerificationError::DomainAllowlistUpdatesStorageProof(err) } - Self::BlockDigest => VerificationError::BlockDigestStorageProof(err), + Self::DomainRuntimeUpgrades => { + VerificationError::DomainRuntimeUpgradesStorageProof(err) + } Self::RuntimeRegistry(_) => VerificationError::RuntimeRegistryStorageProof(err), FraudProofStorageKeyRequest::DomainSudoCall(_) => { VerificationError::DomainSudoCallStorageProof(err) @@ -179,17 +179,6 @@ impl BasicStorageProof for DomainChainsAllowlistUpdateStor } } -#[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] -pub struct BlockDigestProof(StorageProof); - -impl_storage_proof!(BlockDigestProof); -impl BasicStorageProof for BlockDigestProof { - type StorageValue = Digest; - fn storage_key_request(_key: Self::Key) -> FraudProofStorageKeyRequest> { - FraudProofStorageKeyRequest::BlockDigest - } -} - #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] pub struct DomainSudoCallStorageProof(StorageProof); @@ -202,7 +191,18 @@ impl BasicStorageProof for DomainSudoCallStorageProof { } } -// TODO: get the runtime id from pallet-domains since it won't change for a given domain +#[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] +pub struct DomainRuntimeUpgradesProof(StorageProof); + +impl_storage_proof!(DomainRuntimeUpgradesProof); +impl BasicStorageProof for DomainRuntimeUpgradesProof { + type StorageValue = Vec; + type Key = (); + fn storage_key_request(_key: Self::Key) -> FraudProofStorageKeyRequest> { + FraudProofStorageKeyRequest::DomainRuntimeUpgrades + } +} + // The domain runtime code with storage proof // // NOTE: usually we should use the parent consensus block hash to `generate` or `verify` the @@ -286,15 +286,20 @@ where } } +/// A proof of a single domain runtime upgrade (or that there wasn't an upgrade). #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] pub struct MaybeDomainRuntimeUpgradedProof { - pub block_digest: BlockDigestProof, + /// A list of domain runtime upgrades for a block. + pub domain_runtime_upgrades: DomainRuntimeUpgradesProof, + + /// The new domain runtime code, if the domain runtime was upgraded. pub new_domain_runtime_code: Option, } impl MaybeDomainRuntimeUpgradedProof { - /// Generate the `MaybeDomainRuntimeUpgradedProof`, it is the caller's responsibility to check - /// if the domain runtime is upgraded at `block_hash` if so the `maybe_runtime_id` should be `Some`. + /// Generate the `MaybeDomainRuntimeUpgradedProof`. + /// It is the caller's responsibility to check if the domain runtime is upgraded at + /// `block_hash`. If it is, the `maybe_runtime_id` should be `Some`. #[cfg(feature = "std")] #[allow(clippy::let_and_return)] pub fn generate< @@ -307,8 +312,12 @@ impl MaybeDomainRuntimeUpgradedProof { block_hash: Block::Hash, maybe_runtime_id: Option, ) -> Result { - let block_digest = - BlockDigestProof::generate(proof_provider, block_hash, (), storage_key_provider)?; + let domain_runtime_upgrades = DomainRuntimeUpgradesProof::generate( + proof_provider, + block_hash, + (), + storage_key_provider, + )?; let new_domain_runtime_code = if let Some(runtime_id) = maybe_runtime_id { Some(DomainRuntimeCodeProof::generate( proof_provider, @@ -320,7 +329,7 @@ impl MaybeDomainRuntimeUpgradedProof { None }; Ok(MaybeDomainRuntimeUpgradedProof { - block_digest, + domain_runtime_upgrades, new_domain_runtime_code, }) } @@ -330,17 +339,13 @@ impl MaybeDomainRuntimeUpgradedProof { runtime_id: RuntimeId, state_root: &Block::Hash, ) -> Result>, VerificationError> { - let block_digest = >::verify::( - self.block_digest.clone(), - (), - state_root, - )?; - - let runtime_upgraded = block_digest - .logs - .iter() - .filter_map(|log| log.as_domain_runtime_upgrade()) - .any(|upgraded_runtime_id| upgraded_runtime_id == runtime_id); + let domain_runtime_upgrades = + >::verify::( + self.domain_runtime_upgrades.clone(), + (), + state_root, + )?; + let runtime_upgraded = domain_runtime_upgrades.contains(&runtime_id); match (runtime_upgraded, self.new_domain_runtime_code.as_ref()) { (true, None) | (false, Some(_)) => { @@ -364,7 +369,7 @@ impl MaybeDomainRuntimeUpgradedProof { } #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] -pub struct InvalidInherentExtrinsicData { +pub struct InherentExtrinsicData { /// Extrinsics shuffling seed, derived from block randomness pub extrinsics_shuffling_seed: Randomness, @@ -375,7 +380,7 @@ pub struct InvalidInherentExtrinsicData { pub consensus_transaction_byte_fee: Balance, } -impl PassBy for InvalidInherentExtrinsicData { +impl PassBy for InherentExtrinsicData { type PassBy = pass_by::Codec; } @@ -384,85 +389,12 @@ pub struct InvalidInherentExtrinsicDataProof(StorageProof); impl_storage_proof!(InvalidInherentExtrinsicDataProof); impl BasicStorageProof for InvalidInherentExtrinsicDataProof { - type StorageValue = InvalidInherentExtrinsicData; + type StorageValue = InherentExtrinsicData; fn storage_key_request(_key: Self::Key) -> FraudProofStorageKeyRequest> { FraudProofStorageKeyRequest::InvalidInherentExtrinsicData } } -#[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] -pub struct InvalidInherentExtrinsicProof { - /// Optional domain runtime code upgrade storage proof - pub maybe_domain_runtime_upgrade_proof: MaybeDomainRuntimeUpgradedProof, - - /// Change in the allowed chains storage proof - pub domain_chain_allowlist_proof: DomainChainsAllowlistUpdateStorageProof, -} - -/// The verified data from an `InvalidInherentExtrinsicProof` -#[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] -pub struct InvalidInherentExtrinsicVerified { - pub maybe_domain_runtime_upgrade: Option>, - pub domain_chain_allowlist: DomainAllowlistUpdates, -} - -impl InvalidInherentExtrinsicProof { - #[cfg(feature = "std")] - #[allow(clippy::let_and_return)] - pub fn generate< - Block: BlockT, - PP: ProofProvider, - SKP: FraudProofStorageKeyProviderInstance>, - >( - storage_key_provider: &SKP, - proof_provider: &PP, - domain_id: DomainId, - block_hash: Block::Hash, - maybe_runtime_id: Option, - ) -> Result { - let maybe_domain_runtime_upgrade_proof = MaybeDomainRuntimeUpgradedProof::generate( - storage_key_provider, - proof_provider, - block_hash, - maybe_runtime_id, - )?; - let domain_chain_allowlist_proof = DomainChainsAllowlistUpdateStorageProof::generate( - proof_provider, - block_hash, - domain_id, - storage_key_provider, - )?; - - Ok(Self { - maybe_domain_runtime_upgrade_proof, - domain_chain_allowlist_proof, - }) - } - - pub fn verify>>( - &self, - domain_id: DomainId, - runtime_id: RuntimeId, - state_root: &Block::Hash, - ) -> Result { - let maybe_domain_runtime_upgrade = self - .maybe_domain_runtime_upgrade_proof - .verify::(runtime_id, state_root)?; - - let domain_chain_allowlist = - >::verify::( - self.domain_chain_allowlist_proof.clone(), - domain_id, - state_root, - )?; - - Ok(InvalidInherentExtrinsicVerified { - maybe_domain_runtime_upgrade, - domain_chain_allowlist, - }) - } -} - #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] pub struct MmrRootStorageProof { storage_proof: StorageProof, diff --git a/crates/sp-domains-fraud-proof/src/verification.rs b/crates/sp-domains-fraud-proof/src/verification.rs index 6c4a624258..7fa90fdead 100644 --- a/crates/sp-domains-fraud-proof/src/verification.rs +++ b/crates/sp-domains-fraud-proof/src/verification.rs @@ -66,7 +66,8 @@ where let InvalidExtrinsicsRootProof { valid_bundle_digests, invalid_inherent_extrinsic_proofs, - invalid_inherent_extrinsic_proof, + maybe_domain_runtime_upgraded_proof, + domain_chain_allowlist_proof, domain_sudo_call_proof, } = fraud_proof; @@ -77,10 +78,13 @@ where &state_root, )?; - let inherent_extrinsic_verified = invalid_inherent_extrinsic_proof.verify::( - domain_id, - runtime_id, - &state_root, + let maybe_domain_runtime_upgrade = + maybe_domain_runtime_upgraded_proof.verify::(runtime_id, &state_root)?; + + let domain_chain_allowlist = >::verify::( + domain_chain_allowlist_proof.clone(), domain_id, &state_root )?; let domain_sudo_call = >::verify::( @@ -93,10 +97,10 @@ where let domain_inherent_extrinsic_data = DomainInherentExtrinsicData { timestamp: invalid_inherent_extrinsic_data.timestamp, - maybe_domain_runtime_upgrade: inherent_extrinsic_verified.maybe_domain_runtime_upgrade, + maybe_domain_runtime_upgrade, consensus_transaction_byte_fee: invalid_inherent_extrinsic_data .consensus_transaction_byte_fee, - domain_chain_allowlist: inherent_extrinsic_verified.domain_chain_allowlist, + domain_chain_allowlist, maybe_sudo_runtime_call: domain_sudo_call.maybe_call, }; diff --git a/crates/sp-domains/src/extrinsics.rs b/crates/sp-domains/src/extrinsics.rs index 896a0d231a..8d84bc561c 100644 --- a/crates/sp-domains/src/extrinsics.rs +++ b/crates/sp-domains/src/extrinsics.rs @@ -1,11 +1,9 @@ #[cfg(not(feature = "std"))] extern crate alloc; -use crate::DOMAIN_EXTRINSICS_SHUFFLING_SEED_SUBJECT; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use domain_runtime_primitives::opaque::AccountId; -use hash_db::Hasher; use rand::seq::SliceRandom; use rand::SeedableRng; use rand_chacha::ChaCha8Rng; @@ -15,15 +13,6 @@ use sp_std::collections::vec_deque::VecDeque; use sp_std::fmt::Debug; use subspace_core_primitives::Randomness; -pub fn extrinsics_shuffling_seed(block_randomness: Randomness) -> Hashing::Out -where - Hashing: Hasher, -{ - let mut subject = DOMAIN_EXTRINSICS_SHUFFLING_SEED_SUBJECT.to_vec(); - subject.extend_from_slice(block_randomness.as_ref()); - Hashing::hash(&subject) -} - pub fn deduplicate_and_shuffle_extrinsics( mut extrinsics: Vec<(Option, Extrinsic)>, shuffling_seed: Randomness, diff --git a/crates/sp-domains/src/lib.rs b/crates/sp-domains/src/lib.rs index 3f169729b8..b87f5824f3 100644 --- a/crates/sp-domains/src/lib.rs +++ b/crates/sp-domains/src/lib.rs @@ -1394,9 +1394,9 @@ pub type ExecutionReceiptFor = ExecutionReceipt< /// Domain chains allowlist updates. #[derive(Default, Debug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] pub struct DomainAllowlistUpdates { - /// Chains that are allowed to open channel with this chain. + /// Chains that are allowed to open a channel with this chain. pub allow_chains: BTreeSet, - /// Chains that are not allowed to open channel with this chain. + /// Chains that are not allowed to open a channel with this chain. pub remove_chains: BTreeSet, } @@ -1448,14 +1448,6 @@ pub fn system_digest_final_key() -> Vec { frame_support::storage::storage_prefix("System".as_ref(), "Digest".as_ref()).to_vec() } -// TODO: This is used to keep compatible with gemini-3h, remove before next network -/// This is a representation of actual Block Fees storage in pallet-block-fees. -/// Any change in key or value there should be changed here accordingly. -pub fn operator_block_fees_final_key() -> Vec { - frame_support::storage::storage_prefix("BlockFees".as_ref(), "CollectedBlockFees".as_ref()) - .to_vec() -} - /// Hook to handle chain rewards. pub trait OnChainRewards { fn on_chain_rewards(chain_id: ChainId, reward: Balance); @@ -1476,15 +1468,19 @@ pub enum OperatorRewardSource { } sp_api::decl_runtime_apis! { - /// API necessary for domains pallet. + /// APIs used to access the domains pallet. + // When updating this version, document new APIs with "Only present in API versions" comments. + // TODO: when removing this version, also remove "Only present in API versions" comments and + // deprecated attributes. + #[api_version(3)] pub trait DomainsApi { /// Submits the transaction bundle via an unsigned extrinsic. fn submit_bundle_unsigned(opaque_bundle: OpaqueBundle, Block::Hash, DomainHeader, Balance>); - // Submit singleton receipt via an unsigned extrinsic. + // Submits a singleton receipt via an unsigned extrinsic. fn submit_receipt_unsigned(singleton_receipt: SealedSingletonReceipt, Block::Hash, DomainHeader, Balance>); - /// Extract the bundles stored successfully from the given extrinsics. + /// Extracts the bundles successfully stored from the given extrinsics. fn extract_successful_bundles( domain_id: DomainId, extrinsics: Vec, @@ -1493,22 +1489,41 @@ sp_api::decl_runtime_apis! { /// Generates a randomness seed for extrinsics shuffling. fn extrinsics_shuffling_seed() -> Randomness; - /// Returns the WASM bundle for given `domain_id`. + /// Returns the current WASM bundle for the given `domain_id`. fn domain_runtime_code(domain_id: DomainId) -> Option>; - /// Returns the runtime id for given `domain_id`. + /// Returns the runtime id for the given `domain_id`. fn runtime_id(domain_id: DomainId) -> Option; - /// Returns the domain instance data for given `domain_id`. + /// Returns the list of runtime upgrades in the current block. + /// Only present in API versions 2 and later. + fn runtime_upgrades() -> Vec; + + /// Returns the domain instance data for the given `domain_id`. fn domain_instance_data(domain_id: DomainId) -> Option<(DomainInstanceData, NumberFor)>; - /// Returns the current timestamp at given height. + /// Returns the current timestamp at the current height. + fn domain_timestamp() -> Moment; + + /// Returns the current timestamp at the current height. + #[allow(clippy::deprecated_semver)] + #[deprecated(since = "3", note = "Use `domain_timestamp()` instead")] fn timestamp() -> Moment; + /// Returns the consensus transaction byte fee that will used to charge the domain + /// transaction for consensus chain storage fees. + fn consensus_transaction_byte_fee() -> Balance; + + /// Returns the consensus chain byte fee that will used to charge the domain transaction + /// for consensus chain storage fees. + #[allow(clippy::deprecated_semver)] + #[deprecated(since = "3", note = "Use `consensus_transaction_byte_fee()` instead")] + fn consensus_chain_byte_fee() -> Balance; + /// Returns the current Tx range for the given domain Id. fn domain_tx_range(domain_id: DomainId) -> U256; - /// Return the genesis state root if not pruned + /// Returns the genesis state root if not pruned. fn genesis_state_root(domain_id: DomainId) -> Option; /// Returns the best execution chain number. @@ -1523,38 +1538,34 @@ sp_api::decl_runtime_apis! { /// Returns true if there are any ERs in the challenge period with non empty extrinsics. fn non_empty_er_exists(domain_id: DomainId) -> bool; - /// Returns the current best number of the domain. + /// Returns the current best block number for the domain. fn domain_best_number(domain_id: DomainId) -> Option>; - /// Returns the execution receipt + /// Returns the execution receipt with the given hash. fn execution_receipt(receipt_hash: HeaderHashFor) -> Option>; - /// Returns the current epoch and the next epoch operators of the given domain + /// Returns the current epoch and the next epoch operators of the given domain. fn domain_operators(domain_id: DomainId) -> Option<(BTreeMap, Vec)>; - /// Returns the execution receipt hash of the given domain and domain block number + /// Returns the execution receipt hash of the given domain and domain block number. fn receipt_hash(domain_id: DomainId, domain_number: HeaderNumberFor) -> Option>; - /// Return the consensus chain byte fee that will used to charge the domain transaction for consensus - /// chain storage fee - fn consensus_chain_byte_fee() -> Balance; - - /// Returns the latest confirmed domain block number and hash + /// Returns the latest confirmed domain block number and hash. fn latest_confirmed_domain_block(domain_id: DomainId) -> Option<(HeaderNumberFor, HeaderHashFor)>; - /// Return if the receipt is exist and pending to prune + /// Returns if the receipt is exist and pending to prune fn is_bad_er_pending_to_prune(domain_id: DomainId, receipt_hash: HeaderHashFor) -> bool; - /// Return the balance of the storage fund account + /// Returns the balance of the storage fund account. fn storage_fund_account_balance(operator_id: OperatorId) -> Balance; - /// Return if the domain runtime code is upgraded since `at` + /// Returns true if the given domain's runtime code has been upgraded since `at`. fn is_domain_runtime_upgraded_since(domain_id: DomainId, at: NumberFor) -> Option; - /// Return domain sudo call. + /// Returns the domain sudo calls for the given domain, if any. fn domain_sudo_call(domain_id: DomainId) -> Option>; - /// Return last confirmed domain block execution receipt. + /// Returns the last confirmed domain block execution receipt. fn last_confirmed_domain_block_receipt(domain_id: DomainId) ->Option>; } diff --git a/crates/subspace-core-primitives/src/objects.rs b/crates/subspace-core-primitives/src/objects.rs index 838b66f1b0..467e5a7f80 100644 --- a/crates/subspace-core-primitives/src/objects.rs +++ b/crates/subspace-core-primitives/src/objects.rs @@ -24,6 +24,7 @@ extern crate alloc; use crate::hashes::Blake3Hash; use crate::pieces::PieceIndex; +use crate::BlockNumber; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use core::default::Default; @@ -172,3 +173,17 @@ impl GlobalObjectMapping { } } } + +/// Response to object mapping subscription, including a block height. +/// Large responses are batched, so the block height can be repeated in different responses. +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +pub struct ObjectMappingResponse { + /// The block number that the object mapping is from. + pub block_number: BlockNumber, + + /// The object mappings. + #[cfg_attr(feature = "serde", serde(flatten))] + pub objects: GlobalObjectMapping, +} diff --git a/crates/subspace-fake-runtime-api/src/lib.rs b/crates/subspace-fake-runtime-api/src/lib.rs index 30a2b1bad9..7a4f5e1100 100644 --- a/crates/subspace-fake-runtime-api/src/lib.rs +++ b/crates/subspace-fake-runtime-api/src/lib.rs @@ -210,11 +210,27 @@ sp_api::impl_runtime_apis! { unreachable!() } + fn runtime_upgrades() -> Vec { + unreachable!() + } + fn domain_instance_data(_domain_id: DomainId) -> Option<(DomainInstanceData, NumberFor)> { unreachable!() } - fn timestamp() -> Moment{ + fn domain_timestamp() -> Moment { + unreachable!() + } + + fn timestamp() -> Moment { + unreachable!() + } + + fn consensus_transaction_byte_fee() -> Balance { + unreachable!() + } + + fn consensus_chain_byte_fee() -> Balance { unreachable!() } @@ -258,10 +274,6 @@ sp_api::impl_runtime_apis! { unreachable!() } - fn consensus_chain_byte_fee() -> Balance { - unreachable!() - } - fn latest_confirmed_domain_block(_domain_id: DomainId) -> Option<(DomainNumber, DomainHash)>{ unreachable!() } diff --git a/crates/subspace-farmer/src/cluster/plotter.rs b/crates/subspace-farmer/src/cluster/plotter.rs index cc11200e15..b1cfab45fb 100644 --- a/crates/subspace-farmer/src/cluster/plotter.rs +++ b/crates/subspace-farmer/src/cluster/plotter.rs @@ -496,6 +496,24 @@ where } } Err(error) => match error.kind() { + RequestErrorKind::TimedOut => { + if let Some(delay) = retry_backoff_policy.next_backoff() { + debug!("Plotter request timed out, retrying"); + + tokio::time::sleep(delay).await; + continue; + } else { + progress_updater + .update_progress_and_events( + progress_sender, + SectorPlottingProgress::Error { + error: "Plotter request timed out, exiting".to_string(), + }, + ) + .await; + return None; + } + } RequestErrorKind::NoResponders => { if let Some(delay) = retry_backoff_policy.next_backoff() { debug!("No plotters, retrying"); @@ -514,7 +532,7 @@ where return None; } } - RequestErrorKind::TimedOut | RequestErrorKind::Other => { + RequestErrorKind::Other => { progress_updater .update_progress_and_events( progress_sender, diff --git a/crates/subspace-gateway-rpc/src/lib.rs b/crates/subspace-gateway-rpc/src/lib.rs index f50b3e521c..6dd689b46b 100644 --- a/crates/subspace-gateway-rpc/src/lib.rs +++ b/crates/subspace-gateway-rpc/src/lib.rs @@ -5,11 +5,10 @@ use jsonrpsee::proc_macros::rpc; use jsonrpsee::types::{ErrorObject, ErrorObjectOwned}; use std::fmt; use std::ops::{Deref, DerefMut}; -use subspace_core_primitives::hashes::{blake3_hash, Blake3Hash}; use subspace_core_primitives::objects::GlobalObjectMapping; use subspace_data_retrieval::object_fetcher::{self, ObjectFetcher}; use subspace_data_retrieval::piece_getter::PieceGetter; -use tracing::{debug, error, trace}; +use tracing::{debug, error}; const SUBSPACE_ERROR: i32 = 9000; @@ -33,17 +32,6 @@ pub enum Error { /// The object fetcher failed. #[error(transparent)] ObjectFetcherError(#[from] object_fetcher::Error), - - /// The returned object data did not match the hash in the mapping. - #[error( - "Invalid object hash, mapping had {mapping_hash:?}, but fetched data had {data_hash:?}" - )] - InvalidObjectHash { - /// The expected hash from the mapping. - mapping_hash: Blake3Hash, - /// The actual hash of the returned object data. - data_hash: Blake3Hash, - }, } impl From for ErrorObjectOwned { @@ -144,31 +132,8 @@ where return Err(Error::TooManyMappings { count }); } - let mut objects = Vec::with_capacity(count); - // TODO: fetch concurrently - for mapping in mappings.objects() { - let data = self - .object_fetcher - .fetch_object(mapping.piece_index, mapping.offset) - .await?; - - let data_hash = blake3_hash(&data); - if data_hash != mapping.hash { - error!( - ?data_hash, - data_size = %data.len(), - ?mapping.hash, - "Retrieved data did not match mapping hash", - ); - trace!(data = %hex::encode(data), "Retrieved data"); - return Err(Error::InvalidObjectHash { - mapping_hash: mapping.hash, - data_hash, - }); - } - - objects.push(data.into()); - } + let objects = self.object_fetcher.fetch_objects(mappings).await?; + let objects = objects.into_iter().map(HexData::from).collect(); Ok(objects) } diff --git a/crates/subspace-gateway/src/commands/http/server.rs b/crates/subspace-gateway/src/commands/http/server.rs index d222509e63..6e30883b1a 100644 --- a/crates/subspace-gateway/src/commands/http/server.rs +++ b/crates/subspace-gateway/src/commands/http/server.rs @@ -1,12 +1,9 @@ //! HTTP server which fetches objects from the DSN based on a hash, using a mapping indexer service. use actix_web::{web, App, HttpResponse, HttpServer, Responder}; -use serde::{Deserialize, Deserializer, Serialize}; -use std::default::Default; use std::sync::Arc; -use subspace_core_primitives::hashes::{blake3_hash, Blake3Hash}; -use subspace_core_primitives::pieces::PieceIndex; -use subspace_core_primitives::BlockNumber; +use subspace_core_primitives::hashes::Blake3Hash; +use subspace_core_primitives::objects::ObjectMappingResponse; use subspace_data_retrieval::object_fetcher::ObjectFetcher; use subspace_data_retrieval::piece_getter::PieceGetter; use tracing::{debug, error, trace}; @@ -21,108 +18,101 @@ where pub(crate) http_endpoint: String, } -/// Object mapping format from the indexer service. -#[derive(Serialize, Deserialize, Debug, Default)] -#[serde(rename_all = "camelCase")] -struct ObjectMapping { - hash: Blake3Hash, - piece_index: PieceIndex, - piece_offset: u32, - #[serde(deserialize_with = "string_to_u32")] - block_number: BlockNumber, -} - -/// Utility function to deserialize a JSON string into a u32. -fn string_to_u32<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - s.parse::().map_err(serde::de::Error::custom) -} - -/// Requests an object mapping with `hash` from the indexer service. -async fn request_object_mapping(endpoint: &str, hash: Blake3Hash) -> anyhow::Result { +/// Requests the object mappings for `hashes` from the indexer service. +/// Multiple hashes are separated by `+`. +async fn request_object_mapping( + endpoint: &str, + hashes: &Vec, +) -> anyhow::Result { let client = reqwest::Client::new(); - let object_mappings_url = format!("{}/objects/{}", endpoint, hex::encode(hash)); + let hash_list = hashes.iter().map(hex::encode).collect::>(); + let object_mappings_url = format!("{}/objects/{}", endpoint, hash_list.join("+")); - debug!(?hash, ?object_mappings_url, "Requesting object mapping..."); + debug!( + ?hashes, + ?object_mappings_url, + "Requesting object mappings..." + ); + + let response = client.get(&object_mappings_url).send().await?.json().await; - let response = client - .get(&object_mappings_url) - .send() - .await? - .json::() - .await; match &response { Ok(json) => { - trace!(?hash, ?json, "Received object mapping"); + trace!(?hashes, ?json, "Received object mappings"); } Err(err) => { - error!(?hash, ?err, ?object_mappings_url, "Request failed"); + error!(?hashes, ?err, ?object_mappings_url, "Request failed"); } } response.map_err(|err| err.into()) } -/// Fetches a DSN object with `hash`, using the mapping indexer service. +/// Fetches the DSN objects with `hashes`, using the mapping indexer service. +/// Multiple hashes are separated by `+`. async fn serve_object( - hash: web::Path, + hashes: web::Path, additional_data: web::Data>>, ) -> impl Responder where PG: PieceGetter + Send + Sync + 'static, { let server_params = additional_data.into_inner(); - let hash = hash.into_inner(); + let hashes = hashes.into_inner(); + let hashes = hashes + .split('+') + .map(|s| { + let mut hash = Blake3Hash::default(); + hex::decode_to_slice(s, hash.as_mut()).map(|()| hash) + }) + .try_collect::>(); + + let Ok(hashes) = hashes else { + return HttpResponse::BadRequest().finish(); + }; - let Ok(object_mapping) = request_object_mapping(&server_params.indexer_endpoint, hash).await + let Ok(object_mappings) = + request_object_mapping(&server_params.indexer_endpoint, &hashes).await else { return HttpResponse::BadRequest().finish(); }; - if object_mapping.hash != hash { - error!( - ?object_mapping, - ?hash, - "Returned object mapping doesn't match requested hash" - ); - return HttpResponse::ServiceUnavailable().finish(); + for object_mapping in object_mappings.objects.objects() { + if !hashes.contains(&object_mapping.hash) { + error!( + ?object_mapping, + ?hashes, + "Returned object mapping wasn't in requested hashes" + ); + return HttpResponse::ServiceUnavailable().finish(); + } } let object_fetcher_result = server_params .object_fetcher - .fetch_object(object_mapping.piece_index, object_mapping.piece_offset) + .fetch_objects(object_mappings.objects) .await; - let object = match object_fetcher_result { - Ok(object) => { - trace!(?hash, size = %object.len(), "Object fetched successfully"); - - let data_hash = blake3_hash(&object); - if data_hash != hash { - error!( - ?data_hash, - data_size = %object.len(), - ?hash, - "Retrieved data doesn't match requested mapping hash" - ); - trace!(data = %hex::encode(object), "Retrieved data"); - return HttpResponse::ServiceUnavailable().finish(); - } - - object + let objects = match object_fetcher_result { + Ok(objects) => { + trace!( + ?hashes, + count = %objects.len(), + sizes = ?objects.iter().map(|object| object.len()), + "Objects fetched successfully" + ); + objects } Err(err) => { - error!(?hash, ?err, "Failed to fetch object"); + error!(?hashes, ?err, "Failed to fetch objects"); return HttpResponse::ServiceUnavailable().finish(); } }; + // TODO: return a multi-part response, with one part per object HttpResponse::Ok() .content_type("application/octet-stream") - .body(object) + .body(objects.concat()) } /// Starts the DSN object HTTP server. @@ -135,7 +125,7 @@ where HttpServer::new(move || { App::new() .app_data(web::Data::new(server_params.clone())) - .route("/data/{hash}", web::get().to(serve_object::)) + .route("/data/{hashes}", web::get().to(serve_object::)) }) .bind(http_endpoint)? .run() diff --git a/crates/subspace-gateway/src/main.rs b/crates/subspace-gateway/src/main.rs index 65ae756983..70856c154a 100644 --- a/crates/subspace-gateway/src/main.rs +++ b/crates/subspace-gateway/src/main.rs @@ -1,5 +1,7 @@ //! Subspace gateway implementation. +#![feature(iterator_try_collect)] + mod commands; mod node_client; mod piece_getter; diff --git a/crates/subspace-runtime/src/lib.rs b/crates/subspace-runtime/src/lib.rs index 547b7db129..ed930abdf7 100644 --- a/crates/subspace-runtime/src/lib.rs +++ b/crates/subspace-runtime/src/lib.rs @@ -1038,7 +1038,7 @@ impl FraudProofStorageKeyProvider> for StorageKeyProvider { fn storage_key(req: FraudProofStorageKeyRequest>) -> Vec { match req { FraudProofStorageKeyRequest::InvalidInherentExtrinsicData => { - pallet_domains::BlockInvalidInherentExtrinsicData::::hashed_key().to_vec() + pallet_domains::BlockInherentExtrinsicData::::hashed_key().to_vec() } FraudProofStorageKeyRequest::SuccessfulBundles(domain_id) => { pallet_domains::SuccessfulBundles::::hashed_key_for(domain_id) @@ -1046,7 +1046,9 @@ impl FraudProofStorageKeyProvider> for StorageKeyProvider { FraudProofStorageKeyRequest::DomainAllowlistUpdates(domain_id) => { Messenger::domain_allow_list_update_storage_key(domain_id) } - FraudProofStorageKeyRequest::BlockDigest => sp_domains::system_digest_final_key(), + FraudProofStorageKeyRequest::DomainRuntimeUpgrades => { + pallet_domains::DomainRuntimeUpgrades::::hashed_key().to_vec() + } FraudProofStorageKeyRequest::RuntimeRegistry(runtime_id) => { pallet_domains::RuntimeRegistry::::hashed_key_for(runtime_id) } @@ -1268,14 +1270,30 @@ impl_runtime_apis! { Domains::runtime_id(domain_id) } + fn runtime_upgrades() -> Vec { + Domains::runtime_upgrades() + } + fn domain_instance_data(domain_id: DomainId) -> Option<(DomainInstanceData, NumberFor)> { Domains::domain_instance_data(domain_id) } - fn timestamp() -> Moment{ + fn domain_timestamp() -> Moment { + Domains::timestamp() + } + + fn timestamp() -> Moment { Timestamp::now() } + fn consensus_transaction_byte_fee() -> Balance { + Domains::consensus_transaction_byte_fee() + } + + fn consensus_chain_byte_fee() -> Balance { + DOMAIN_STORAGE_FEE_MULTIPLIER * TransactionFees::transaction_byte_fee() + } + fn domain_tx_range(domain_id: DomainId) -> U256 { Domains::domain_tx_range(domain_id) } @@ -1319,10 +1337,6 @@ impl_runtime_apis! { Domains::receipt_hash(domain_id, domain_number) } - fn consensus_chain_byte_fee() -> Balance { - DOMAIN_STORAGE_FEE_MULTIPLIER * TransactionFees::transaction_byte_fee() - } - fn latest_confirmed_domain_block(domain_id: DomainId) -> Option<(DomainNumber, DomainHash)>{ Domains::latest_confirmed_domain_block(domain_id) } diff --git a/domains/client/block-preprocessor/src/inherents.rs b/domains/client/block-preprocessor/src/inherents.rs index fdf0c88543..73653b7293 100644 --- a/domains/client/block-preprocessor/src/inherents.rs +++ b/domains/client/block-preprocessor/src/inherents.rs @@ -13,7 +13,7 @@ //! Deriving these extrinsics during fraud proof verification should be possible since //! verification environment will have access to consensus chain. -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_domains::{DomainId, DomainsApi, DomainsDigestItem}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; @@ -69,25 +69,40 @@ where CBlock: BlockT, Block: BlockT, { - let header = consensus_client.header(consensus_block_hash)?.ok_or( - sp_blockchain::Error::MissingHeader(format!( - "No header found for {consensus_block_hash:?}" - )), - )?; - let runtime_api = consensus_client.runtime_api(); + let runtime_id = runtime_api .runtime_id(consensus_block_hash, domain_id)? .ok_or(sp_blockchain::Error::Application(Box::from(format!( "No RuntimeId found for {domain_id:?}" ))))?; - Ok(header - .digest() - .logs - .iter() - .filter_map(|log| log.as_domain_runtime_upgrade()) - .any(|upgraded_runtime_id| upgraded_runtime_id == runtime_id)) + // The runtime_upgrades() API is only present in API versions 2 and later. On earlier versions, + // we need to call legacy code. + // TODO: remove version check before next network + let domains_api_version = runtime_api + .api_version::>(consensus_block_hash)? + // It is safe to return a default version of 1, since there will always be version 1. + .unwrap_or(1); + + let is_upgraded = if domains_api_version >= 2 { + let runtime_upgrades = runtime_api.runtime_upgrades(consensus_block_hash)?; + runtime_upgrades.contains(&runtime_id) + } else { + let header = consensus_client.header(consensus_block_hash)?.ok_or( + sp_blockchain::Error::MissingHeader(format!( + "No header found for {consensus_block_hash:?}" + )), + )?; + header + .digest() + .logs + .iter() + .filter_map(|log| log.as_domain_runtime_upgrade()) + .any(|upgraded_runtime_id| upgraded_runtime_id == runtime_id) + }; + + Ok(is_upgraded) } /// Returns new upgraded runtime if upgraded did happen in the provided consensus block. @@ -102,27 +117,9 @@ where CBlock: BlockT, Block: BlockT, { - let header = consensus_client.header(consensus_block_hash)?.ok_or( - sp_blockchain::Error::MissingHeader(format!( - "No header found for {consensus_block_hash:?}" - )), - )?; - - let runtime_api = consensus_client.runtime_api(); - let runtime_id = runtime_api - .runtime_id(consensus_block_hash, domain_id)? - .ok_or(sp_blockchain::Error::Application(Box::from(format!( - "No RuntimeId found for {domain_id:?}" - ))))?; - - if header - .digest() - .logs - .iter() - .filter_map(|log| log.as_domain_runtime_upgrade()) - .any(|upgraded_runtime_id| upgraded_runtime_id == runtime_id) - { - let new_domain_runtime = runtime_api + if is_runtime_upgraded::<_, _, Block>(consensus_client, consensus_block_hash, domain_id)? { + let new_domain_runtime = consensus_client + .runtime_api() .domain_runtime_code(consensus_block_hash, domain_id)? .ok_or_else(|| { sp_blockchain::Error::Application(Box::from(format!( @@ -198,8 +195,22 @@ where let consensus_block_hash = self .maybe_consensus_block_hash .unwrap_or(self.consensus_client.info().best_hash); + let runtime_api = self.consensus_client.runtime_api(); - let timestamp = runtime_api.timestamp(consensus_block_hash)?; + // Some APIs are only present in API versions 3 and later. On earlier versions, we need to + // call legacy code. + // TODO: remove version check before next network + let domains_api_version = runtime_api + .api_version::>(consensus_block_hash)? + // It is safe to return a default version of 1, since there will always be version 1. + .unwrap_or(1); + + let timestamp = if domains_api_version >= 3 { + runtime_api.domain_timestamp(consensus_block_hash)? + } else { + #[allow(deprecated)] + runtime_api.timestamp(consensus_block_hash)? + }; let timestamp_provider = sp_timestamp::InherentDataProvider::new(InherentType::new(timestamp)); @@ -211,8 +222,12 @@ where let runtime_upgrade_provider = sp_executive::InherentDataProvider::new(maybe_runtime_upgrade_code); - let consensus_chain_byte_fee = - runtime_api.consensus_chain_byte_fee(consensus_block_hash)?; + let consensus_chain_byte_fee = if domains_api_version >= 3 { + runtime_api.consensus_transaction_byte_fee(consensus_block_hash)? + } else { + #[allow(deprecated)] + runtime_api.consensus_chain_byte_fee(consensus_block_hash)? + }; let storage_price_provider = sp_block_fees::InherentDataProvider::new(consensus_chain_byte_fee); diff --git a/domains/client/block-preprocessor/src/lib.rs b/domains/client/block-preprocessor/src/lib.rs index de37e7fec5..bc7e98f38d 100644 --- a/domains/client/block-preprocessor/src/lib.rs +++ b/domains/client/block-preprocessor/src/lib.rs @@ -18,7 +18,7 @@ use crate::inherents::is_runtime_upgraded; use codec::Encode; use domain_runtime_primitives::opaque::AccountId; use sc_client_api::BlockBackend; -use sp_api::{ApiError, Core, ProvideRuntimeApi}; +use sp_api::{ApiError, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::H256; use sp_domains::core_api::DomainCoreApi; @@ -29,8 +29,7 @@ use sp_domains::{ }; use sp_messenger::MessengerApi; use sp_mmr_primitives::MmrApi; -use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header, NumberFor}; -use sp_runtime::DigestItem; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, NumberFor}; use sp_state_machine::LayoutV1; use sp_subspace_mmr::ConsensusChainMmrLeafProof; use sp_weights::Weight; @@ -208,13 +207,6 @@ where Vec>, Vec<(Option, Block::Extrinsic)>, )> { - let consensus_spec_version = self - .consensus_client - .runtime_api() - .version(at_consensus_hash) - .map_err(sp_blockchain::Error::RuntimeApiError)? - .spec_version; - let mut inboxed_bundles = Vec::with_capacity(bundles.len()); let mut valid_extrinsics = Vec::new(); @@ -230,42 +222,16 @@ where // NOTE: The receipt's `domain_block_number` is verified by the consensus runtime while // the `domain_block_hash` is not (which is take care of by the fraud proof) so we can't // check the parent domain block hash here. - // TODO: remove consensus runtime version check before next network - if consensus_spec_version >= 6 - && bundle.receipt().domain_block_number != parent_domain_number - { - // If there consensus runtime just upgraded to spec version 6, which bring the receipt - // gap check, the bundle that included in the same block is doesn't perform the check - // because the new runtime take effect in the next block so skip the check here too. - let is_consensus_runtime_upgraded_to_6 = { - let consensus_header = self - .consensus_client - .header(at_consensus_hash)? - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!( - "Consensus block header of {at_consensus_hash:?} unavailable" - )) - })?; - - let runtime_upgraded = consensus_header - .digest() - .logs() - .iter() - .any(|di| di == &DigestItem::RuntimeEnvironmentUpdated); - - runtime_upgraded && consensus_spec_version == 6 - }; - if !is_consensus_runtime_upgraded_to_6 { - return Err(sp_blockchain::Error::RuntimeApiError( - ApiError::Application( - format!( - "Unexpected bundle in consensus block: {:?}, something must be wrong", - at_consensus_hash - ) - .into(), - ), - )); - } + if bundle.receipt().domain_block_number != parent_domain_number { + return Err(sp_blockchain::Error::RuntimeApiError( + ApiError::Application( + format!( + "Unexpected bundle in consensus block: {:?}, something must be wrong", + at_consensus_hash + ) + .into(), + ), + )); } let extrinsic_root = bundle.extrinsics_root(); diff --git a/domains/client/domain-operator/src/domain_bundle_proposer.rs b/domains/client/domain-operator/src/domain_bundle_proposer.rs index 28964f2dd0..fe4afdf5a5 100644 --- a/domains/client/domain-operator/src/domain_bundle_proposer.rs +++ b/domains/client/domain-operator/src/domain_bundle_proposer.rs @@ -154,6 +154,13 @@ where let (domain_bundle_limit, storage_fund_balance, transaction_byte_fee) = { let consensus_runtime_api = self.consensus_client.runtime_api(); + // Some APIs are only present in API versions 3 and later. On earlier versions, we need to + // call legacy code. + // TODO: remove version check before next network + let domains_api_version = consensus_runtime_api + .api_version::>(consensus_best_hash)? + // It is safe to return a default version of 1, since there will always be version 1. + .unwrap_or(1); let domain_bundle_limit = consensus_runtime_api .domain_bundle_limit(consensus_best_hash, self.domain_id)? @@ -166,8 +173,12 @@ where let storage_fund_balance = consensus_runtime_api .storage_fund_account_balance(consensus_best_hash, operator_id)?; - let transaction_byte_fee = - consensus_runtime_api.consensus_chain_byte_fee(consensus_best_hash)?; + let transaction_byte_fee = if domains_api_version >= 3 { + consensus_runtime_api.consensus_transaction_byte_fee(consensus_best_hash)? + } else { + #[allow(deprecated)] + consensus_runtime_api.consensus_chain_byte_fee(consensus_best_hash)? + }; ( domain_bundle_limit, diff --git a/domains/client/domain-operator/src/fraud_proof.rs b/domains/client/domain-operator/src/fraud_proof.rs index 3d7d395e84..a15d79d486 100644 --- a/domains/client/domain-operator/src/fraud_proof.rs +++ b/domains/client/domain-operator/src/fraud_proof.rs @@ -14,8 +14,7 @@ use sp_domain_digests::AsPredigest; use sp_domains::core_api::DomainCoreApi; use sp_domains::proof_provider_and_verifier::StorageProofProvider; use sp_domains::{ - DomainId, DomainsApi, DomainsDigestItem, ExtrinsicDigest, HeaderHashingFor, InvalidBundleType, - RuntimeId, + DomainId, DomainsApi, ExtrinsicDigest, HeaderHashingFor, InvalidBundleType, RuntimeId, }; use sp_domains_fraud_proof::execution_prover::ExecutionProver; use sp_domains_fraud_proof::fraud_proof::{ @@ -389,14 +388,20 @@ where &self.storage_key_provider, )?; - let invalid_inherent_extrinsic_proof = InvalidInherentExtrinsicProof::generate( + let maybe_domain_runtime_upgraded_proof = MaybeDomainRuntimeUpgradedProof::generate( &self.storage_key_provider, self.consensus_client.as_ref(), - domain_id, consensus_block_hash, maybe_runtime_id, )?; + let domain_chain_allowlist_proof = DomainChainsAllowlistUpdateStorageProof::generate( + self.consensus_client.as_ref(), + consensus_block_hash, + domain_id, + &self.storage_key_provider, + )?; + let domain_sudo_call_proof = DomainSudoCallStorageProof::generate( self.consensus_client.as_ref(), consensus_block_hash, @@ -412,7 +417,8 @@ where proof: FraudProofVariant::InvalidExtrinsicsRoot(InvalidExtrinsicsRootProof { valid_bundle_digests, invalid_inherent_extrinsic_proofs, - invalid_inherent_extrinsic_proof, + maybe_domain_runtime_upgraded_proof, + domain_chain_allowlist_proof, domain_sudo_call_proof, }), }; @@ -425,13 +431,6 @@ where domain_id: DomainId, at: CBlock::Hash, ) -> Result, FraudProofError> { - let header = - self.consensus_client - .header(at)? - .ok_or(sp_blockchain::Error::MissingHeader(format!( - "No header found for {at:?}" - )))?; - let runtime_id = self .consensus_client .runtime_api() @@ -439,13 +438,13 @@ where .ok_or(sp_blockchain::Error::Application(Box::from(format!( "No RuntimeId found for {domain_id:?}" ))))?; + // This API is only present in API versions 2 and later, but it is safe to call + // unconditionally, because: + // - on Mainnet, there are no domains yet, and + // - on Taurus, there are no invalid execution receipts yet. + let runtime_upgrades = self.consensus_client.runtime_api().runtime_upgrades(at)?; - let is_runtime_upgraded = header - .digest() - .logs - .iter() - .filter_map(|log| log.as_domain_runtime_upgrade()) - .any(|upgraded_runtime_id| upgraded_runtime_id == runtime_id); + let is_runtime_upgraded = runtime_upgrades.contains(&runtime_id); Ok(is_runtime_upgraded.then_some(runtime_id)) } diff --git a/domains/client/domain-operator/src/tests.rs b/domains/client/domain-operator/src/tests.rs index e914098c86..d201ac9d8e 100644 --- a/domains/client/domain-operator/src/tests.rs +++ b/domains/client/domain-operator/src/tests.rs @@ -57,6 +57,7 @@ use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use sp_weights::Weight; use std::collections::{BTreeMap, VecDeque}; use std::sync::Arc; +use std::time::Duration; use subspace_core_primitives::pot::PotOutput; use subspace_runtime_primitives::opaque::Block as CBlock; use subspace_runtime_primitives::{Balance, SSC}; @@ -64,6 +65,9 @@ use subspace_test_service::{ produce_block_with, produce_blocks, produce_blocks_until, MockConsensusNode, }; use tempfile::TempDir; +use tracing::error; + +const TIMEOUT: Duration = Duration::from_mins(2); fn number_of(consensus_node: &MockConsensusNode, block_hash: Hash) -> u32 { consensus_node @@ -788,13 +792,13 @@ async fn test_executor_inherent_timestamp_is_set() { let consensus_timestamp = ferdie .client .runtime_api() - .timestamp(ferdie.client.info().best_hash) + .domain_timestamp(ferdie.client.info().best_hash) .unwrap(); let domain_timestamp = bob .client .runtime_api() - .timestamp(bob.client.info().best_hash) + .domain_timestamp(bob.client.info().best_hash) .unwrap(); assert_eq!( @@ -1165,7 +1169,7 @@ async fn test_invalid_state_transition_proof_creation_and_verification( ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidStateTransition(proof) = &fp.proof { match (trace_diff_type, mismatch_trace_index) { @@ -1215,7 +1219,7 @@ async fn test_invalid_state_transition_proof_creation_and_verification( } }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -1229,14 +1233,19 @@ async fn test_invalid_state_transition_proof_creation_and_verification( .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - // When the system domain node process the primary block that contains the `bad_submit_bundle_tx`, + // When the system domain node process the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1318,7 +1327,7 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific bundle_to_tx(opaque_bundle) }; - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -1349,7 +1358,7 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::InherentExtrinsic(_) = proof.invalid_bundle_type { @@ -1360,7 +1369,7 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -1374,12 +1383,17 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1461,7 +1475,7 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::InherentExtrinsic(_) = proof.invalid_bundle_type { @@ -1472,7 +1486,7 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -1486,12 +1500,17 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1571,7 +1590,7 @@ async fn test_true_invalid_bundles_undecodeable_tx_proof_creation_and_verificati bundle_to_tx(opaque_bundle) }; - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -1602,7 +1621,7 @@ async fn test_true_invalid_bundles_undecodeable_tx_proof_creation_and_verificati ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::UndecodableTx(_) = proof.invalid_bundle_type { @@ -1613,7 +1632,7 @@ async fn test_true_invalid_bundles_undecodeable_tx_proof_creation_and_verificati false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -1627,12 +1646,17 @@ async fn test_true_invalid_bundles_undecodeable_tx_proof_creation_and_verificati .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1714,7 +1738,7 @@ async fn test_false_invalid_bundles_undecodeable_tx_proof_creation_and_verificat ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::UndecodableTx(_) = proof.invalid_bundle_type { @@ -1725,7 +1749,7 @@ async fn test_false_invalid_bundles_undecodeable_tx_proof_creation_and_verificat false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -1739,12 +1763,17 @@ async fn test_false_invalid_bundles_undecodeable_tx_proof_creation_and_verificat .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1835,7 +1864,7 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() bundle_to_tx(opaque_bundle) }; - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -1866,7 +1895,7 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::InvalidXDM(extrinsic_index) = proof.invalid_bundle_type { @@ -1878,9 +1907,9 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -1893,12 +1922,17 @@ async fn test_true_invalid_bundles_illegal_xdm_proof_creation_and_verification() .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -1999,7 +2033,7 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica bundle_to_tx(opaque_bundle) }; - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -2030,7 +2064,7 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::IllegalTx(extrinsic_index) = proof.invalid_bundle_type { @@ -2042,7 +2076,7 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2056,12 +2090,17 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2162,7 +2201,7 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::IllegalTx(extrinsic_index) = proof.invalid_bundle_type { @@ -2174,7 +2213,7 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2188,12 +2227,17 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2251,7 +2295,7 @@ async fn test_true_invalid_bundle_weight_proof_creation_and_verification() { bundle_to_tx(opaque_bundle) }; - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -2282,7 +2326,7 @@ async fn test_true_invalid_bundle_weight_proof_creation_and_verification() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if InvalidBundleType::InvalidBundleWeight == proof.invalid_bundle_type { @@ -2293,7 +2337,7 @@ async fn test_true_invalid_bundle_weight_proof_creation_and_verification() { false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2307,12 +2351,17 @@ async fn test_true_invalid_bundle_weight_proof_creation_and_verification() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2392,7 +2441,7 @@ async fn test_false_invalid_bundle_weight_proof_creation_and_verification() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if InvalidBundleType::InvalidBundleWeight == proof.invalid_bundle_type { @@ -2403,7 +2452,7 @@ async fn test_false_invalid_bundle_weight_proof_creation_and_verification() { false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2417,12 +2466,17 @@ async fn test_false_invalid_bundle_weight_proof_creation_and_verification() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2503,7 +2557,7 @@ async fn test_false_invalid_bundles_non_exist_extrinsic_proof_creation_and_verif ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundlesProofData::Bundle(_) = proof.proof_data { @@ -2514,7 +2568,7 @@ async fn test_false_invalid_bundles_non_exist_extrinsic_proof_creation_and_verif false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2528,12 +2582,17 @@ async fn test_false_invalid_bundles_non_exist_extrinsic_proof_creation_and_verif .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2585,7 +2644,7 @@ async fn test_invalid_block_fees_proof_creation() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { matches!( fp.proof, @@ -2593,7 +2652,7 @@ async fn test_invalid_block_fees_proof_creation() { ) }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2607,14 +2666,19 @@ async fn test_invalid_block_fees_proof_creation() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // When the domain node operator processes the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2686,7 +2750,7 @@ async fn test_invalid_transfers_fraud_proof() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { matches!( fp.proof, @@ -2694,7 +2758,7 @@ async fn test_invalid_transfers_fraud_proof() { ) }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2708,14 +2772,19 @@ async fn test_invalid_transfers_fraud_proof() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // When the domain node operator processes the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2782,7 +2851,7 @@ async fn test_invalid_domain_block_hash_proof_creation() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { matches!( fp.proof, @@ -2790,7 +2859,7 @@ async fn test_invalid_domain_block_hash_proof_creation() { ) }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2804,14 +2873,19 @@ async fn test_invalid_domain_block_hash_proof_creation() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // When the domain node operator processes the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -2878,7 +2952,7 @@ async fn test_invalid_domain_extrinsics_root_proof_creation() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { matches!( fp.proof, @@ -2886,7 +2960,7 @@ async fn test_invalid_domain_extrinsics_root_proof_creation() { ) }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -2900,14 +2974,19 @@ async fn test_invalid_domain_extrinsics_root_proof_creation() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // When the domain node operator processes the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -3177,7 +3256,7 @@ async fn test_valid_bundle_proof_generation_and_verification() { (bundle.receipt().clone(), bundle_to_tx(bundle)) }; - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree let mut import_tx_stream = ferdie.transaction_pool.import_notification_stream(); produce_block_with!( @@ -3194,7 +3273,7 @@ async fn test_valid_bundle_proof_generation_and_verification() { .does_receipt_exist(bad_receipt.hash::()) .unwrap()); - // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // When the domain node operator processes the primary block that contains `bad_submit_bundle_tx`, // it will generate and submit a fraud proof while let Some(ready_tx_hash) = import_tx_stream.next().await { let ready_tx = ferdie @@ -3239,7 +3318,7 @@ async fn test_valid_bundle_proof_generation_and_verification() { } // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie .does_receipt_exist(bad_receipt.hash::()) @@ -4664,7 +4743,7 @@ async fn test_bad_receipt_chain() { ) }; - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree produce_block_with!( ferdie.produce_block_with_slot_at( @@ -4689,7 +4768,7 @@ async fn test_bad_receipt_chain() { ) && fp.targeted_bad_receipt_hash() == bad_receipt_hash }); - // Produce more bundle with bad ER that use previous bad ER as parent + // Produce more bundles with bad ERs that use the previous bad ER as an ancestor let mut parent_bad_receipt_hash = bad_receipt_hash; let mut bad_receipt_descendants = vec![]; for _ in 0..7 { @@ -4738,9 +4817,12 @@ async fn test_bad_receipt_chain() { } // The fraud proof should be submitted - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); - // The first bad ER should be pruned and its descendants are marked as pending to prune + // The first bad ER should be pruned, and its descendants marked as pending to prune ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); @@ -4766,7 +4848,7 @@ async fn test_bad_receipt_chain() { .head_receipt_number(ferdie_best_hash, EVM_DOMAIN_ID) .unwrap(); assert_eq!(head_domain_number - head_receipt_number, 9); - // The previou bundle will be rejected as there is a receipt gap + // The previous bundle will be rejected as there is a receipt gap match ferdie .submit_transaction(bundle_to_tx(stale_bundle)) .await @@ -4821,7 +4903,7 @@ async fn test_bad_receipt_chain() { let bob_best_number = bob.client.info().best_number; assert_eq!(alice_best_number, bob_best_number); - // Bad receipt should be pruned as singletone receipt submitting + // The bad receipt and its descendants should be pruned immediately for receipt_hash in vec![bad_receipt_hash] .into_iter() .chain(bad_receipt_descendants) @@ -4832,7 +4914,7 @@ async fn test_bad_receipt_chain() { assert!(!ferdie.does_receipt_exist(receipt_hash).unwrap()); } - // The receipt gap should be fill up + // The receipt gap should be filled up let ferdie_best_hash = ferdie.client.info().best_hash; let head_domain_number = ferdie .client @@ -4849,6 +4931,8 @@ async fn test_bad_receipt_chain() { assert_eq!(bob_best_number, bob.client.info().best_number); produce_blocks!(ferdie, bob, 15).await.unwrap(); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } #[tokio::test(flavor = "multi_thread")] @@ -4881,12 +4965,12 @@ async fn test_domain_chain_storage_price_should_be_aligned_with_the_consensus_ch let consensus_chain_byte_fee = ferdie .client .runtime_api() - .consensus_chain_byte_fee(ferdie.client.info().best_hash) + .consensus_transaction_byte_fee(ferdie.client.info().best_hash) .unwrap(); let operator_consensus_chain_byte_fee = alice .client .runtime_api() - .consensus_chain_byte_fee(alice.client.info().best_hash) + .consensus_transaction_byte_fee(alice.client.info().best_hash) .unwrap(); assert!(operator_consensus_chain_byte_fee.is_zero()); assert!(!consensus_chain_byte_fee.is_zero()); @@ -4898,12 +4982,12 @@ async fn test_domain_chain_storage_price_should_be_aligned_with_the_consensus_ch let consensus_chain_byte_fee = ferdie .client .runtime_api() - .consensus_chain_byte_fee(ferdie.client.info().best_hash) + .consensus_transaction_byte_fee(ferdie.client.info().best_hash) .unwrap(); let operator_consensus_chain_byte_fee = alice .client .runtime_api() - .consensus_chain_byte_fee(alice.client.info().best_hash) + .consensus_transaction_byte_fee(alice.client.info().best_hash) .unwrap(); assert_eq!(consensus_chain_byte_fee, operator_consensus_chain_byte_fee); } @@ -5430,7 +5514,7 @@ async fn test_xdm_false_invalid_fraud_proof() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::InvalidXDM(extrinsic_index) = proof.invalid_bundle_type { @@ -5442,9 +5526,9 @@ async fn test_xdm_false_invalid_fraud_proof() { false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -5457,12 +5541,17 @@ async fn test_xdm_false_invalid_fraud_proof() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } // TODO: this test is flaky and sometime hang forever in CI thus disable it temporary, @@ -5676,7 +5765,7 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { ) }; - // Wait for the fraud proof that target the bad ER + // Wait for the fraud proof that targets the bad ER let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { if let FraudProofVariant::InvalidBundles(proof) = &fp.proof { if let InvalidBundleType::InvalidXDM(extrinsic_index) = proof.invalid_bundle_type { @@ -5688,9 +5777,9 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { false }); - // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // Produce a consensus block that contains `bad_submit_bundle_tx` and the bad receipt should // be added to the consensus chain block tree - // Produce a block that contains the `bad_submit_bundle_tx` + // Produce a block that contains `bad_submit_bundle_tx` produce_block_with!( ferdie.produce_block_with_slot_at( slot, @@ -5703,10 +5792,15 @@ async fn test_stale_fork_xdm_true_invalid_fraud_proof() { .unwrap(); assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); - let _ = wait_for_fraud_proof_fut.await; + let timed_out = tokio::time::timeout(TIMEOUT, wait_for_fraud_proof_fut) + .await + .inspect_err(|_| error!("fraud proof was not created before the timeout")) + .is_err(); // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified - // and executed, thus pruned the bad receipt from the block tree + // and executed, and prune the bad receipt from the block tree ferdie.produce_blocks(1).await.unwrap(); assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + // We check for timeouts last, because they are the least useful test failure message. + assert!(!timed_out); } diff --git a/domains/test/primitives/src/lib.rs b/domains/test/primitives/src/lib.rs index b0cfeeafde..12474d68e7 100644 --- a/domains/test/primitives/src/lib.rs +++ b/domains/test/primitives/src/lib.rs @@ -9,7 +9,7 @@ sp_api::decl_runtime_apis! { /// Api that returns the timestamp pub trait TimestampApi { /// Api to construct inherent timestamp extrinsic from given time - fn timestamp() -> Moment; + fn domain_timestamp() -> Moment; } } @@ -27,6 +27,6 @@ sp_api::decl_runtime_apis! { fn get_open_channel_for_chain(dst_chain_id: ChainId) -> Option; /// Api to get the current domain transaction byte fee - fn consensus_chain_byte_fee() -> Balance; + fn consensus_transaction_byte_fee() -> Balance; } } diff --git a/domains/test/runtime/auto-id/src/lib.rs b/domains/test/runtime/auto-id/src/lib.rs index 0f77507876..ca1d80a13d 100644 --- a/domains/test/runtime/auto-id/src/lib.rs +++ b/domains/test/runtime/auto-id/src/lib.rs @@ -982,7 +982,7 @@ impl_runtime_apis! { Messenger::get_open_channel_for_chain(dst_chain_id).map(|(c, _)| c) } - fn consensus_chain_byte_fee() -> Balance { + fn consensus_transaction_byte_fee() -> Balance { BlockFees::consensus_chain_byte_fee() } } diff --git a/domains/test/runtime/evm/src/lib.rs b/domains/test/runtime/evm/src/lib.rs index 26dce324a9..a46ad36a43 100644 --- a/domains/test/runtime/evm/src/lib.rs +++ b/domains/test/runtime/evm/src/lib.rs @@ -1506,7 +1506,7 @@ impl_runtime_apis! { } impl domain_test_primitives::TimestampApi for Runtime { - fn timestamp() -> Moment { + fn domain_timestamp() -> Moment { Timestamp::now() } } @@ -1520,7 +1520,7 @@ impl_runtime_apis! { Messenger::get_open_channel_for_chain(dst_chain_id).map(|(c, _)| c) } - fn consensus_chain_byte_fee() -> Balance { + fn consensus_transaction_byte_fee() -> Balance { BlockFees::consensus_chain_byte_fee() } } diff --git a/shared/subspace-data-retrieval/Cargo.toml b/shared/subspace-data-retrieval/Cargo.toml index 5f53ed11c2..1843628c25 100644 --- a/shared/subspace-data-retrieval/Cargo.toml +++ b/shared/subspace-data-retrieval/Cargo.toml @@ -15,6 +15,7 @@ include = [ anyhow = "1.0.89" async-trait = "0.1.83" futures = "0.3.31" +hex = "0.4.3" parity-scale-codec = { version = "3.6.12", features = ["derive"] } subspace-archiving = { version = "0.1.0", path = "../../crates/subspace-archiving" } subspace-core-primitives = { version = "0.1.0", path = "../../crates/subspace-core-primitives" } diff --git a/shared/subspace-data-retrieval/src/object_fetcher.rs b/shared/subspace-data-retrieval/src/object_fetcher.rs index b1c50444ff..85800987ef 100644 --- a/shared/subspace-data-retrieval/src/object_fetcher.rs +++ b/shared/subspace-data-retrieval/src/object_fetcher.rs @@ -21,6 +21,8 @@ use crate::segment_downloading::{download_segment, SegmentDownloadingError}; use parity_scale_codec::{Compact, CompactLen, Decode, Encode}; use std::sync::Arc; use subspace_archiving::archiver::{Segment, SegmentItem}; +use subspace_core_primitives::hashes::{blake3_hash, Blake3Hash}; +use subspace_core_primitives::objects::{GlobalObject, GlobalObjectMapping}; use subspace_core_primitives::pieces::{Piece, PieceIndex, RawRecord}; use subspace_core_primitives::segments::{RecordedHistorySegment, SegmentIndex}; use subspace_erasure_coding::ErasureCoding; @@ -28,8 +30,9 @@ use tracing::{debug, trace, warn}; /// The maximum amount of segment padding. /// -/// This is the difference between the compact encoding of lengths 1 to 63, and the compact -/// encoding of lengths 2^14 to 2^30 - 1. +/// This is the difference between the lengths of the compact encodings of the minimum and maximum +/// block sizes, in any domain. As of January 2025, the minimum block size is (potentially) 63 or +/// less, and the maximum block size is in the range 2^14 to 2^30 - 1. /// pub const MAX_SEGMENT_PADDING: usize = 3; @@ -43,100 +46,99 @@ pub const MAX_SUPPORTED_OBJECT_LENGTH: usize = 1024 * 1024 * 1024 - 1; #[derive(Debug, thiserror::Error)] pub enum Error { /// Supplied piece index is not a source piece - #[error("Piece index {piece_index} is not a source piece, offset: {piece_offset}")] - NotSourcePiece { - piece_index: PieceIndex, - piece_offset: u32, - }, + #[error("Piece index is not a source piece, object: {mapping:?}")] + NotSourcePiece { mapping: GlobalObject }, /// Supplied piece offset is too large - #[error("Piece offset {piece_offset} is too large, must be less than {}, piece index: {piece_index}", RawRecord::SIZE)] - PieceOffsetTooLarge { - piece_index: PieceIndex, - piece_offset: u32, - }, + #[error( + "Piece offset is too large, must be less than {}, object: {mapping:?}", + RawRecord::SIZE + )] + PieceOffsetTooLarge { mapping: GlobalObject }, /// No item in segment at offset - #[error("Offset {offset_in_segment} in segment {segment_index} is not an item, current progress: {progress}, object: {piece_index:?}, {piece_offset}")] + #[error("Offset {offset_in_segment} in segment {segment_index} is not an item, current progress: {progress}, object: {mapping:?}")] NoSegmentItem { progress: usize, offset_in_segment: usize, segment_index: SegmentIndex, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, }, /// Unexpected item in first segment at offset - #[error("Offset {offset_in_segment} in first segment {segment_index} has unexpected item, current progress: {segment_progress}, object: {piece_index:?}, {piece_offset}, item: {segment_item:?}")] + #[error("Offset {offset_in_segment} in first segment {segment_index} has unexpected item, current progress: {segment_progress}, object: {mapping:?}, item: {segment_item:?}")] UnexpectedFirstSegmentItem { segment_progress: usize, offset_in_segment: usize, segment_index: SegmentIndex, segment_item: Box, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, }, /// Unexpected item in continuing segment at offset - #[error("Continuing segment {segment_index} has unexpected item, collected data: {collected_data}, object: {piece_index:?}, {piece_offset}, item: {segment_item:?}")] + #[error("Continuing segment {segment_index} has unexpected item, collected data: {collected_data}, object: {mapping:?}, item: {segment_item:?}")] UnexpectedContinuingSegmentItem { collected_data: usize, segment_index: SegmentIndex, segment_item: Box, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, }, /// Object not found after downloading expected number of segments - #[error("Object segment range {first_segment_index}..={last_segment_index} did not contain full object, object: {piece_index:?}, {piece_offset}")] + #[error("Object segment range {first_segment_index}..={last_segment_index} did not contain full object, object: {mapping:?}")] TooManySegments { first_segment_index: SegmentIndex, last_segment_index: SegmentIndex, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, }, /// Object is too large error #[error( - "Data length {data_length} exceeds maximum object size {max_object_len} for object: {piece_index:?}, {piece_offset}" + "Data length {data_length} exceeds maximum object size {max_object_len} for object: {mapping:?}" )] ObjectTooLarge { data_length: usize, max_object_len: usize, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, }, /// Length prefix is too large error #[error( - "Length prefix length {length_prefix_len} exceeds maximum object size {max_object_len} for object: {piece_index:?}, {piece_offset}" + "Length prefix length {length_prefix_len} exceeds maximum object size {max_object_len} for object: {mapping:?}" )] LengthPrefixTooLarge { length_prefix_len: usize, max_object_len: usize, - piece_index: PieceIndex, - piece_offset: u32, + mapping: GlobalObject, + }, + + /// Hash doesn't match data + #[error("Incorrect data hash {data_hash:?} for {data_size} byte object: {mapping:?}")] + InvalidDataHash { + data_hash: Blake3Hash, + data_size: usize, + mapping: GlobalObject, }, /// Object decoding error - #[error("Object data decoding error: {source:?}")] + #[error("Object data decoding error: {source:?}, object: {mapping:?}")] ObjectDecoding { - #[from] source: parity_scale_codec::Error, + mapping: GlobalObject, }, /// Segment getter error - #[error("Getting segment failed: {source:?}")] + #[error("Getting segment failed: {source:?}, object: {mapping:?}")] SegmentGetter { - #[from] source: SegmentDownloadingError, + mapping: GlobalObject, }, /// Piece getter error - #[error("Getting piece caused an error: {source:?}")] + #[error("Getting piece caused an error: {source:?}, object: {mapping:?}")] PieceGetterError { - #[from] source: anyhow::Error, + mapping: GlobalObject, }, /// Piece getter couldn't find the piece @@ -192,82 +194,101 @@ where } } - /// Assemble the object in `piece_index` at `piece_offset` by fetching necessary pieces using - /// the piece getter and putting the object's bytes together. + /// Assemble the objects in `mapping` by fetching necessary pieces using the piece getter, and + /// putting the objects' bytes together. /// - /// The caller should check the object's hash to make sure the correct bytes are returned. - pub async fn fetch_object( + /// Checks the objects' hashes to make sure the correct bytes are returned. + pub async fn fetch_objects( &self, - piece_index: PieceIndex, - piece_offset: u32, - ) -> Result, Error> { - // Validate parameters - if !piece_index.is_source() { - tracing::debug!( - %piece_index, - piece_offset, - "Invalid piece index for object: must be a source piece", - ); - - // Parity pieces contain effectively random data, and can't be used to fetch objects - return Err(Error::NotSourcePiece { + mappings: GlobalObjectMapping, + ) -> Result>, Error> { + let mut objects = Vec::with_capacity(mappings.objects().len()); + + // TODO: sort mappings in piece index order, and keep pieces until they're no longer needed + for &mapping in mappings.objects() { + let GlobalObject { + hash, piece_index, - piece_offset, - }); - } + offset, + } = mapping; - if piece_offset >= RawRecord::SIZE as u32 { - tracing::debug!( - %piece_index, - piece_offset, - RawRecord_SIZE = RawRecord::SIZE, - "Invalid piece offset for object: must be less than the size of a raw record", - ); + // Validate parameters + if !piece_index.is_source() { + debug!( + ?mapping, + "Invalid piece index for object: must be a source piece", + ); - return Err(Error::PieceOffsetTooLarge { - piece_index, - piece_offset, - }); - } + // Parity pieces contain effectively random data, and can't be used to fetch objects + return Err(Error::NotSourcePiece { mapping }); + } - // Try fast object assembling from individual pieces - if let Some(data) = self.fetch_object_fast(piece_index, piece_offset).await? { - tracing::debug!( - %piece_index, - piece_offset, - len = %data.len(), - "Fetched object using fast object assembling", - ); + if offset >= RawRecord::SIZE as u32 { + debug!( + ?mapping, + RawRecord_SIZE = RawRecord::SIZE, + "Invalid piece offset for object: must be less than the size of a raw record", + ); - return Ok(data); - } + return Err(Error::PieceOffsetTooLarge { mapping }); + } - // Regular object assembling from segments - let data = self.fetch_object_regular(piece_index, piece_offset).await?; + // Try fast object assembling from individual pieces, + // then regular object assembling from segments + let data = match self.fetch_object_fast(mapping).await? { + Some(data) => data, + None => { + let data = self.fetch_object_regular(mapping).await?; - tracing::debug!( - %piece_index, - piece_offset, - len = %data.len(), - "Fetched object using regular object assembling", - ); + debug!( + ?mapping, + len = %data.len(), + "Fetched object using regular object assembling", + + ); + + data + } + }; + + let data_hash = blake3_hash(&data); + if data_hash != hash { + debug!( + ?data_hash, + data_size = %data.len(), + ?mapping, + "Retrieved data doesn't match requested mapping hash" + ); + trace!(data = %hex::encode(&data), "Retrieved data"); + + return Err(Error::InvalidDataHash { + data_hash, + data_size: data.len(), + mapping, + }); + } - Ok(data) + objects.push(data); + } + + Ok(objects) } /// Fast object fetching and assembling where the object doesn't cross piece (super fast) or /// segment (just fast) boundaries, returns `Ok(None)` if fast retrieval is not guaranteed. // TODO: return already downloaded pieces from fetch_object_fast() and pass them to fetch_object_regular() - async fn fetch_object_fast( - &self, - piece_index: PieceIndex, - piece_offset: u32, - ) -> Result>, Error> { + async fn fetch_object_fast(&self, mapping: GlobalObject) -> Result>, Error> { + let GlobalObject { + piece_index, + offset, + .. + } = mapping; + // If the offset is before the last few bytes of a segment, we might be able to do very // fast object retrieval without assembling and processing the whole segment. // // The last few bytes might contain padding if a piece is the last piece in the segment. - let before_max_padding = piece_offset as usize <= RawRecord::SIZE - 1 - MAX_SEGMENT_PADDING; + let before_max_padding = offset as usize <= RawRecord::SIZE - 1 - MAX_SEGMENT_PADDING; let piece_position_in_segment = piece_index.source_position(); let data_shards = RecordedHistorySegment::NUM_RAW_RECORDS as u32; let last_data_piece_in_segment = piece_position_in_segment >= data_shards - 1; @@ -275,8 +296,7 @@ where if last_data_piece_in_segment && !before_max_padding { trace!( piece_position_in_segment, - %piece_index, - piece_offset, + ?mapping, "Fast object retrieval not possible: last source piece in segment, \ and start of object length bytes is in potential segment padding", ); @@ -290,7 +310,7 @@ where // // The last few bytes might contain padding if a piece is the last piece in the segment. let bytes_available_in_segment = - (data_shards - piece_position_in_segment) * RawRecord::SIZE as u32 - piece_offset; + (data_shards - piece_position_in_segment) * RawRecord::SIZE as u32 - offset; let Some(bytes_available_in_segment) = bytes_available_in_segment.checked_sub(MAX_SEGMENT_PADDING as u32) else { @@ -302,9 +322,7 @@ where let mut read_records_data = Vec::::with_capacity(RawRecord::SIZE * 2); let mut next_source_piece_index = piece_index; - let piece = self - .read_piece(next_source_piece_index, piece_index, piece_offset) - .await?; + let piece = self.read_piece(next_source_piece_index, mapping).await?; next_source_piece_index = next_source_piece_index.next_source_index(); // Discard piece data before the offset read_records_data.extend( @@ -312,7 +330,7 @@ where .record() .to_raw_record_chunks() .flatten() - .skip(piece_offset as usize) + .skip(offset as usize) .copied(), ); @@ -322,7 +340,7 @@ where read_records_data.truncate(read_records_data.len() - MAX_SEGMENT_PADDING); } - let data_length = self.decode_data_length(&read_records_data, piece_index, piece_offset)?; + let data_length = self.decode_data_length(&read_records_data, mapping)?; let data_length = if let Some(data_length) = data_length { data_length @@ -333,25 +351,21 @@ where %next_source_piece_index, piece_position_in_segment, bytes_available_in_segment, - %piece_index, - piece_offset, + ?mapping, "Part of object length bytes is in next piece, fetching", ); - let piece = self - .read_piece(next_source_piece_index, piece_index, piece_offset) - .await?; + let piece = self.read_piece(next_source_piece_index, mapping).await?; next_source_piece_index = next_source_piece_index.next_source_index(); read_records_data.extend(piece.record().to_raw_record_chunks().flatten().copied()); - self.decode_data_length(&read_records_data, piece_index, piece_offset)? + self.decode_data_length(&read_records_data, mapping)? .expect("Extra RawRecord is larger than the length encoding; qed") } else { trace!( piece_position_in_segment, bytes_available_in_segment, - %piece_index, - piece_offset, + ?mapping, "Fast object retrieval not possible: last source piece in segment, \ and part of object length bytes is in potential segment padding", ); @@ -366,8 +380,7 @@ where data_length, bytes_available_in_segment, piece_position_in_segment, - %piece_index, - piece_offset, + ?mapping, "Fast object retrieval not possible: part of object data bytes is in \ potential segment padding", ); @@ -384,7 +397,7 @@ where .filter(|i| i.is_source()) .take(remaining_piece_count) .collect::>(); - self.read_pieces(remaining_piece_indexes) + self.read_pieces(&remaining_piece_indexes, mapping) .await? .into_iter() .for_each(|piece| { @@ -394,35 +407,46 @@ where } // Decode the data, and return it if it's valid - let read_records_data = Vec::::decode(&mut read_records_data.as_slice())?; + let read_records_data = Vec::::decode(&mut read_records_data.as_slice()) + .map_err(|source| Error::ObjectDecoding { source, mapping })?; + + debug!( + ?mapping, + len = %read_records_data.len(), + "Fetched object using fast object assembling", + ); Ok(Some(read_records_data)) } /// Fetch and assemble an object that can cross segment boundaries, which requires assembling /// and iterating over full segments. - async fn fetch_object_regular( - &self, - piece_index: PieceIndex, - piece_offset: u32, - ) -> Result, Error> { + async fn fetch_object_regular(&self, mapping: GlobalObject) -> Result, Error> { + let GlobalObject { + piece_index, + offset, + .. + } = mapping; + let mut segment_index = piece_index.segment_index(); let piece_position_in_segment = piece_index.source_position(); // Used to access the data after it is converted to raw bytes let offset_in_segment = - piece_position_in_segment as usize * RawRecord::SIZE + piece_offset as usize; + piece_position_in_segment as usize * RawRecord::SIZE + offset as usize; - tracing::trace!( + trace!( %segment_index, offset_in_segment, piece_position_in_segment, - %piece_index, - piece_offset, + ?mapping, "Fetching object from segment(s)", ); let mut data = { - let items = self.read_segment(segment_index).await?.into_items(); + let items = self + .read_segment(segment_index, mapping) + .await? + .into_items(); // Go through the segment until we reach the offset. // Unconditional progress is enum variant + compact encoding of number of elements let mut progress = 1 + Compact::compact_len(&(items.len() as u64)); @@ -441,8 +465,7 @@ where progress, offset_in_segment, ?segment_index, - %piece_index, - piece_offset, + ?mapping, "Failed to find item at offset in segment" ); @@ -450,18 +473,16 @@ where progress, offset_in_segment, segment_index, - piece_index, - piece_offset, + mapping, } })?; - tracing::trace!( + trace!( progress, %segment_index, offset_in_segment, piece_position_in_segment, - %piece_index, - piece_offset, + ?mapping, segment_item = format!("{segment_item:?}").chars().take(50).collect::(), "Found item at offset in first segment", ); @@ -484,8 +505,7 @@ where segment_progress = progress, offset_in_segment, %segment_index, - %piece_index, - piece_offset, + ?mapping, segment_item = format!("{segment_item:?}").chars().take(50).collect::(), "Unexpected segment item in first segment", ); @@ -494,32 +514,29 @@ where segment_progress: progress, offset_in_segment, segment_index, - piece_index, - piece_offset, + mapping, segment_item: Box::new(segment_item), }); } } }; - tracing::trace!( + trace!( %segment_index, offset_in_segment, piece_position_in_segment, - %piece_index, - piece_offset, + ?mapping, data_len = data.len(), "Got data at offset in first segment", ); // Return an error if the length is unreasonably large, before we get the next segment - if let Some(data_length) = - self.decode_data_length(data.as_slice(), piece_index, piece_offset)? - { + if let Some(data_length) = self.decode_data_length(data.as_slice(), mapping)? { // If we have the whole object, decode and return it. // TODO: use tokio Bytes type to re-use the same allocation by stripping the length at the start if data.len() >= data_length { - return Ok(Vec::::decode(&mut data.as_slice())?); + return Vec::::decode(&mut data.as_slice()) + .map_err(|source| Error::ObjectDecoding { source, mapping }); } } @@ -528,17 +545,21 @@ where // headers and optional padding. loop { segment_index += SegmentIndex::ONE; - let items = self.read_segment(segment_index).await?.into_items(); + let items = self + .read_segment(segment_index, mapping) + .await? + .into_items(); for segment_item in items { match segment_item { SegmentItem::BlockContinuation { bytes, .. } => { data.extend_from_slice(&bytes); if let Some(data_length) = - self.decode_data_length(data.as_slice(), piece_index, piece_offset)? + self.decode_data_length(data.as_slice(), mapping)? { if data.len() >= data_length { - return Ok(Vec::::decode(&mut data.as_slice())?); + return Vec::::decode(&mut data.as_slice()) + .map_err(|source| Error::ObjectDecoding { source, mapping }); } } } @@ -552,8 +573,7 @@ where debug!( collected_data = ?data.len(), %segment_index, - %piece_index, - piece_offset, + ?mapping, segment_item = format!("{segment_item:?}").chars().take(50).collect::(), "Unexpected segment item in continuing segment", ); @@ -561,8 +581,7 @@ where return Err(Error::UnexpectedContinuingSegmentItem { collected_data: data.len(), segment_index, - piece_index, - piece_offset, + mapping, segment_item: Box::new(segment_item), }); } @@ -572,66 +591,70 @@ where } /// Read the whole segment by its index (just records, skipping witnesses). - async fn read_segment(&self, segment_index: SegmentIndex) -> Result { - Ok(download_segment( + /// + /// The mapping is only used for error reporting. + async fn read_segment( + &self, + segment_index: SegmentIndex, + mapping: GlobalObject, + ) -> Result { + download_segment( segment_index, &self.piece_getter, self.erasure_coding.clone(), ) - .await?) + .await + .map_err(|source| Error::SegmentGetter { source, mapping }) } /// Concurrently read multiple pieces, and return them in the supplied order. - async fn read_pieces(&self, piece_indexes: Vec) -> Result, Error> { + /// + /// The mapping is only used for error reporting. + async fn read_pieces( + &self, + piece_indexes: &Vec, + mapping: GlobalObject, + ) -> Result, Error> { download_pieces(piece_indexes, &self.piece_getter) .await - .map_err(|source| Error::PieceGetterError { source }) + .map_err(|source| { + debug!( + ?piece_indexes, + error = ?source, + ?mapping, + "Error fetching pieces during object assembling" + ); + + Error::PieceGetterError { source, mapping } + }) } /// Read and return a single piece. /// - /// The mapping piece index and offset are only used for error reporting. + /// The mapping is only used for error reporting. async fn read_piece( &self, piece_index: PieceIndex, - mapping_piece_index: PieceIndex, - mapping_piece_offset: u32, + mapping: GlobalObject, ) -> Result { - let piece = self - .piece_getter - .get_piece(piece_index) + download_pieces(&vec![piece_index], &self.piece_getter) .await - .inspect_err(|source| { + .map(|pieces| { + pieces + .first() + .expect("download_pieces always returns exact pieces or error") + .clone() + }) + .map_err(|source| { debug!( %piece_index, error = ?source, - %mapping_piece_index, - mapping_piece_offset, + ?mapping, "Error fetching piece during object assembling" ); - })?; - - if let Some(piece) = piece { - trace!( - %piece_index, - %mapping_piece_index, - mapping_piece_offset, - "Fetched piece during object assembling" - ); - Ok(piece) - } else { - debug!( - %piece_index, - %mapping_piece_index, - mapping_piece_offset, - "Piece not found during object assembling" - ); - - Err(Error::PieceNotFound { - piece_index: mapping_piece_index, - })? - } + Error::PieceGetterError { source, mapping } + }) } /// Validate and decode the encoded length of `data`, including the encoded length bytes. @@ -640,12 +663,11 @@ where /// Returns `Ok(Some(data_length_encoded_length + data_length))` if the length is valid, /// `Ok(None)` if there aren't enough bytes to decode the length, otherwise an error. /// - /// The mapping piece index and offset are only used for error reporting. + /// The mapping is only used for error reporting. fn decode_data_length( &self, mut data: &[u8], - mapping_piece_index: PieceIndex, - mapping_piece_offset: u32, + mapping: GlobalObject, ) -> Result, Error> { let data_length = match Compact::::decode(&mut data) { Ok(Compact(data_length)) => { @@ -654,16 +676,14 @@ where debug!( data_length, max_object_len = self.max_object_len, - %mapping_piece_index, - mapping_piece_offset, + ?mapping, "Data length exceeds object size limit for object fetcher" ); return Err(Error::ObjectTooLarge { data_length, max_object_len: self.max_object_len, - piece_index: mapping_piece_index, - piece_offset: mapping_piece_offset, + mapping, }); } @@ -677,23 +697,20 @@ where debug!( length_prefix_len = data.len(), max_object_len = self.max_object_len, - %mapping_piece_index, - mapping_piece_offset, + ?mapping, "Length prefix exceeds object size limit for object fetcher" ); return Err(Error::LengthPrefixTooLarge { length_prefix_len: data.len(), max_object_len: self.max_object_len, - piece_index: mapping_piece_index, - piece_offset: mapping_piece_offset, + mapping, }); } debug!( ?err, - %mapping_piece_index, - mapping_piece_offset, + ?mapping, "Not enough bytes to decode data length for object" ); @@ -706,8 +723,7 @@ where trace!( data_length, data_length_encoded_length, - %mapping_piece_index, - mapping_piece_offset, + ?mapping, "Decoded data length for object" ); diff --git a/shared/subspace-data-retrieval/src/piece_fetcher.rs b/shared/subspace-data-retrieval/src/piece_fetcher.rs index 49f13a2ff4..ea702a1606 100644 --- a/shared/subspace-data-retrieval/src/piece_fetcher.rs +++ b/shared/subspace-data-retrieval/src/piece_fetcher.rs @@ -28,7 +28,7 @@ use tracing::{debug, trace}; // This code was copied and modified from subspace_service::sync_from_dsn::download_and_reconstruct_blocks(): // pub async fn download_pieces( - piece_indexes: Vec, + piece_indexes: &Vec, piece_getter: &PG, ) -> anyhow::Result> where diff --git a/test/subspace-test-runtime/src/lib.rs b/test/subspace-test-runtime/src/lib.rs index fdbb1cdfd5..db14548917 100644 --- a/test/subspace-test-runtime/src/lib.rs +++ b/test/subspace-test-runtime/src/lib.rs @@ -1109,7 +1109,7 @@ impl FraudProofStorageKeyProvider> for StorageKeyProvider { fn storage_key(req: FraudProofStorageKeyRequest>) -> Vec { match req { FraudProofStorageKeyRequest::InvalidInherentExtrinsicData => { - pallet_domains::BlockInvalidInherentExtrinsicData::::hashed_key().to_vec() + pallet_domains::BlockInherentExtrinsicData::::hashed_key().to_vec() } FraudProofStorageKeyRequest::SuccessfulBundles(domain_id) => { pallet_domains::SuccessfulBundles::::hashed_key_for(domain_id) @@ -1117,7 +1117,9 @@ impl FraudProofStorageKeyProvider> for StorageKeyProvider { FraudProofStorageKeyRequest::DomainAllowlistUpdates(domain_id) => { Messenger::domain_allow_list_update_storage_key(domain_id) } - FraudProofStorageKeyRequest::BlockDigest => sp_domains::system_digest_final_key(), + FraudProofStorageKeyRequest::DomainRuntimeUpgrades => { + pallet_domains::DomainRuntimeUpgrades::::hashed_key().to_vec() + } FraudProofStorageKeyRequest::RuntimeRegistry(runtime_id) => { pallet_domains::RuntimeRegistry::::hashed_key_for(runtime_id) } @@ -1316,14 +1318,30 @@ impl_runtime_apis! { Domains::runtime_id(domain_id) } + fn runtime_upgrades() -> Vec { + Domains::runtime_upgrades() + } + fn domain_instance_data(domain_id: DomainId) -> Option<(DomainInstanceData, NumberFor)> { Domains::domain_instance_data(domain_id) } - fn timestamp() -> Moment{ + fn domain_timestamp() -> Moment { + Domains::timestamp() + } + + fn timestamp() -> Moment { Timestamp::now() } + fn consensus_transaction_byte_fee() -> Balance { + Domains::consensus_transaction_byte_fee() + } + + fn consensus_chain_byte_fee() -> Balance { + DOMAIN_STORAGE_FEE_MULTIPLIER * TransactionFees::transaction_byte_fee() + } + fn domain_tx_range(_: DomainId) -> U256 { U256::MAX } @@ -1367,10 +1385,6 @@ impl_runtime_apis! { Domains::receipt_hash(domain_id, domain_number) } - fn consensus_chain_byte_fee() -> Balance { - DOMAIN_STORAGE_FEE_MULTIPLIER * TransactionFees::transaction_byte_fee() - } - fn latest_confirmed_domain_block(domain_id: DomainId) -> Option<(DomainNumber, DomainHash)>{ Domains::latest_confirmed_domain_block(domain_id) } diff --git a/test/subspace-test-service/src/lib.rs b/test/subspace-test-service/src/lib.rs index 8e0efe006a..f493a68abf 100644 --- a/test/subspace-test-service/src/lib.rs +++ b/test/subspace-test-service/src/lib.rs @@ -809,11 +809,11 @@ impl MockConsensusNode { .is_some()) } - /// Return a future that only resolve if a fraud proof that the given `fraud_proof_predict` + /// Return a future that only resolve if a fraud proof that the given `fraud_proof_predicate` /// return true is submitted to the consensus tx pool pub fn wait_for_fraud_proof( &self, - fraud_proof_predict: FP, + fraud_proof_predicate: FP, ) -> Pin + Send>> where FP: Fn(&FraudProofFor) -> bool + Send + 'static, @@ -833,7 +833,7 @@ impl MockConsensusNode { pallet_domains::Call::submit_fraud_proof { fraud_proof }, ) = ext.function { - if fraud_proof_predict(&fraud_proof) { + if fraud_proof_predicate(&fraud_proof) { break; } }