From 923486f34c24340e675436b5f43d6ba4be91157e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 28 Jul 2021 05:40:21 +0000 Subject: [PATCH] Use bulk verification for sync_aggregate signature (#2415) ## Proposed Changes Add the `sync_aggregate` from `BeaconBlock` to the bulk signature verifier for blocks. This necessitates a new signature set constructor for the sync aggregate, which is different from the others due to the use of [`eth2_fast_aggregate_verify`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/bls.md#eth2_fast_aggregate_verify) for sync aggregates, per [`process_sync_aggregate`](https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.7/specs/altair/beacon-chain.md#sync-aggregate-processing). I made the choice to return an optional signature set, with `None` representing the case where the signature is valid on account of being the point at infinity (requires no further checking). To "dogfood" the changes and prevent duplication, the consensus logic now uses the signature set approach as well whenever it is required to verify signatures (which should only be in testing AFAIK). The EF tests pass with the code as it exists currently, but failed before I adapted the `eth2_fast_aggregate_verify` changes (which is good). As a result of this change Altair block processing should be a little faster, and importantly, we will no longer accidentally verify signatures when replaying blocks, e.g. when replaying blocks from the database. --- .../beacon_chain/src/block_verification.rs | 44 ++++++----- .../src/per_block_processing.rs | 10 ++- .../altair/sync_committee.rs | 61 +++++++-------- .../block_signature_verifier.rs | 45 ++++++++--- .../per_block_processing/signature_sets.rs | 74 ++++++++++++++++++- crypto/bls/src/generic_aggregate_signature.rs | 17 ++--- crypto/bls/tests/tests.rs | 12 +++ testing/ef_tests/src/cases/operations.rs | 2 +- 8 files changed, 186 insertions(+), 79 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ce8826f2505..934c852540a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -69,7 +69,8 @@ use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp use tree_hash::TreeHash; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, - InconsistentFork, PublicKey, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -1394,22 +1395,31 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( state: &'a BeaconState, validator_pubkey_cache: &'a ValidatorPubkeyCache, spec: &'a ChainSpec, -) -> BlockSignatureVerifier<'a, T::EthSpec, impl Fn(usize) -> Option> + Clone> { - BlockSignatureVerifier::new( - state, - move |validator_index| { - // Disallow access to any validator pubkeys that are not in the current beacon - // state. - if validator_index < state.validators().len() { - validator_pubkey_cache - .get(validator_index) - .map(|pk| Cow::Borrowed(pk)) - } else { - None - } - }, - spec, - ) +) -> BlockSignatureVerifier< + 'a, + T::EthSpec, + impl Fn(usize) -> Option> + Clone, + impl Fn(&'a PublicKeyBytes) -> Option>, +> { + let get_pubkey = move |validator_index| { + // Disallow access to any validator pubkeys that are not in the current beacon state. + if validator_index < state.validators().len() { + validator_pubkey_cache + .get(validator_index) + .map(Cow::Borrowed) + } else { + None + } + }; + + let decompressor = move |pk_bytes| { + // Map compressed pubkey to validator index. + let validator_index = validator_pubkey_cache.get_index(pk_bytes)?; + // Map validator index to pubkey (respecting guard on unknown validators). + get_pubkey(validator_index) + }; + + BlockSignatureVerifier::new(state, get_pubkey, decompressor, spec) } /// Verify that `header` was signed with a valid signature from its proposer. diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 57714e68dd5..a38382d2680 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -2,6 +2,7 @@ use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; use safe_arith::{ArithError, SafeArith}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; +use std::borrow::Cow; use tree_hash::TreeHash; use types::*; @@ -102,6 +103,7 @@ pub fn per_block_processing( BlockSignatureVerifier::verify_entire_block( state, |i| get_pubkey_from_state(state, i), + |pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned), signed_block, block_root, spec @@ -130,7 +132,13 @@ pub fn per_block_processing( process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; if let BeaconBlockRef::Altair(inner) = block { - process_sync_aggregate(state, &inner.body.sync_aggregate, proposer_index, spec)?; + process_sync_aggregate( + state, + &inner.body.sync_aggregate, + proposer_index, + verify_signatures, + spec, + )?; } Ok(()) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 7c8714386c3..ac1e247e3fb 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,55 +1,44 @@ use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; +use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; use safe_arith::SafeArith; -use tree_hash::TreeHash; +use std::borrow::Cow; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; -use types::{BeaconState, ChainSpec, Domain, EthSpec, SigningData, SyncAggregate, Unsigned}; +use types::{BeaconState, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned}; pub fn process_sync_aggregate( state: &mut BeaconState, aggregate: &SyncAggregate, proposer_index: u64, + verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - // Verify sync committee aggregate signature signing over the previous slot block root - let previous_slot = state.slot().saturating_sub(1u64); - let current_sync_committee = state.current_sync_committee()?.clone(); - let committee_pubkeys = ¤t_sync_committee.pubkeys; - let participant_pubkeys = committee_pubkeys - .iter() - .zip(aggregate.sync_committee_bits.iter()) - .flat_map(|(pubkey, bit)| { - if bit { - // FIXME(altair): accelerate pubkey decompression with a cache - Some(pubkey.decompress()) - } else { - None - } - }) - .collect::, _>>() - .map_err(|_| SyncAggregateInvalid::PubkeyInvalid)?; + // Verify sync committee aggregate signature signing over the previous slot block root + if verify_signatures.is_true() { + // This decompression could be avoided with a cache, but we're not likely + // to encounter this case in practice due to the use of pre-emptive signature + // verification (which uses the `ValidatorPubkeyCache`). + let decompressor = |pk_bytes: &PublicKeyBytes| pk_bytes.decompress().ok().map(Cow::Owned); - let domain = spec.get_domain( - previous_slot.epoch(T::slots_per_epoch()), - Domain::SyncCommittee, - &state.fork(), - state.genesis_validators_root(), - ); + // Check that the signature is over the previous block root. + let previous_slot = state.slot().saturating_sub(1u64); + let previous_block_root = *state.get_block_root(previous_slot)?; - let signing_root = SigningData { - object_root: *state.get_block_root(previous_slot)?, - domain, - } - .tree_hash_root(); + let signature_set = sync_aggregate_signature_set( + decompressor, + aggregate, + state.slot(), + previous_block_root, + state, + spec, + )?; - let pubkey_refs = participant_pubkeys.iter().collect::>(); - if !aggregate - .sync_committee_signature - .eth2_fast_aggregate_verify(signing_root, &pubkey_refs) - { - return Err(SyncAggregateInvalid::SignatureInvalid.into()); + // If signature set is `None` then the signature is valid (infinity). + if signature_set.map_or(false, |signature| !signature.verify()) { + return Err(SyncAggregateInvalid::SignatureInvalid.into()); + } } // Compute participant and proposer rewards diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3a1f6002c3b..d63bee47aa0 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -3,7 +3,7 @@ use super::signature_sets::{Error as SignatureSetError, *}; use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; -use bls::{verify_signature_sets, PublicKey, SignatureSet}; +use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ @@ -63,27 +63,36 @@ impl From> for Error { /// /// This allows for optimizations related to batch BLS operations (see the /// `Self::verify_entire_block(..)` function). -pub struct BlockSignatureVerifier<'a, T, F> +pub struct BlockSignatureVerifier<'a, T, F, D> where T: EthSpec, F: Fn(usize) -> Option> + Clone, + D: Fn(&'a PublicKeyBytes) -> Option>, { get_pubkey: F, + decompressor: D, state: &'a BeaconState, spec: &'a ChainSpec, sets: Vec>, } -impl<'a, T, F> BlockSignatureVerifier<'a, T, F> +impl<'a, T, F, D> BlockSignatureVerifier<'a, T, F, D> where T: EthSpec, F: Fn(usize) -> Option> + Clone, + D: Fn(&'a PublicKeyBytes) -> Option>, { /// Create a new verifier without any included signatures. See the `include...` functions to /// add signatures, and the `verify` - pub fn new(state: &'a BeaconState, get_pubkey: F, spec: &'a ChainSpec) -> Self { + pub fn new( + state: &'a BeaconState, + get_pubkey: F, + decompressor: D, + spec: &'a ChainSpec, + ) -> Self { Self { get_pubkey, + decompressor, state, spec, sets: vec![], @@ -100,11 +109,12 @@ where pub fn verify_entire_block( state: &'a BeaconState, get_pubkey: F, + decompressor: D, block: &'a SignedBeaconBlock, block_root: Option, spec: &'a ChainSpec, ) -> Result<()> { - let mut verifier = Self::new(state, get_pubkey, spec); + let mut verifier = Self::new(state, get_pubkey, decompressor, spec); verifier.include_all_signatures(block, block_root)?; verifier.verify() } @@ -146,12 +156,7 @@ where block_root: Option, ) -> Result<()> { self.include_block_proposal(block, block_root)?; - self.include_randao_reveal(block)?; - self.include_proposer_slashings(block)?; - self.include_attester_slashings(block)?; - self.include_attestations(block)?; - // Deposits are not included because they can legally have invalid signatures. - self.include_exits(block)?; + self.include_all_signatures_except_proposal(block)?; Ok(()) } @@ -168,6 +173,7 @@ where self.include_attestations(block)?; // Deposits are not included because they can legally have invalid signatures. self.include_exits(block)?; + self.include_sync_aggregate(block)?; Ok(()) } @@ -308,4 +314,21 @@ where Ok(()) }) } + + /// Include the signature of the block's sync aggregate (if it exists) for verification. + pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + if let Some(sync_aggregate) = block.message().body().sync_aggregate() { + if let Some(signature_set) = sync_aggregate_signature_set( + &self.decompressor, + sync_aggregate, + block.slot(), + block.parent_root(), + &self.state, + &self.spec, + )? { + self.sets.push(signature_set); + } + } + Ok(()) + } } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 46eef6b65ac..2a5d6de17c0 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -11,7 +11,7 @@ use types::{ DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, - SignedVoluntaryExit, SigningData, SyncAggregatorSelectionData, Unsigned, + SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -36,6 +36,8 @@ pub enum Error { /// The public keys supplied do not match the number of objects requiring keys. Block validity /// was not determined. MismatchedPublicKeyLen { pubkey_len: usize, other_len: usize }, + /// Pubkey decompression failed. The block is invalid. + PublicKeyDecompressionFailed, /// The public key bytes stored in the `BeaconState` were not valid. This is a serious internal /// error. BadBlsBytes { validator_index: u64 }, @@ -517,3 +519,73 @@ where Ok(SignatureSet::single_pubkey(signature, pubkey, message)) } + +/// Signature set verifier for a block's `sync_aggregate` (Altair and later). +/// +/// The `slot` should be the slot of the block that the sync aggregate is included in, which may be +/// different from `state.slot()`. The `block_root` should be the block root that the sync aggregate +/// signs over. It's passed in rather than extracted from the `state` because when verifying a batch +/// of blocks the `state` will not yet have had the blocks applied. +/// +/// Returns `Ok(None)` in the case where `sync_aggregate` has 0 signatures. The spec +/// uses a separate function `eth2_fast_aggregate_verify` for this, but we can equivalently +/// check the exceptional case eagerly and do a `fast_aggregate_verify` in the case where the +/// check fails (by returning `Some(signature_set)`). +pub fn sync_aggregate_signature_set<'a, T, D>( + decompressor: D, + sync_aggregate: &'a SyncAggregate, + slot: Slot, + block_root: Hash256, + state: &'a BeaconState, + spec: &ChainSpec, +) -> Result>> +where + T: EthSpec, + D: Fn(&'a PublicKeyBytes) -> Option>, +{ + // Allow the point at infinity to count as a signature for 0 validators as per + // `eth2_fast_aggregate_verify` from the spec. + if sync_aggregate.sync_committee_bits.is_zero() + && sync_aggregate.sync_committee_signature.is_infinity() + { + return Ok(None); + } + + let committee_pubkeys = &state + .get_built_sync_committee(slot.epoch(T::slots_per_epoch()), spec)? + .pubkeys; + + let participant_pubkeys = committee_pubkeys + .iter() + .zip(sync_aggregate.sync_committee_bits.iter()) + .filter_map(|(pubkey, bit)| { + if bit { + Some(decompressor(pubkey)) + } else { + None + } + }) + .collect::>>() + .ok_or(Error::PublicKeyDecompressionFailed)?; + + let previous_slot = slot.saturating_sub(1u64); + + let domain = spec.get_domain( + previous_slot.epoch(T::slots_per_epoch()), + Domain::SyncCommittee, + &state.fork(), + state.genesis_validators_root(), + ); + + let message = SigningData { + object_root: block_root, + domain, + } + .tree_hash_root(); + + Ok(Some(SignatureSet::multiple_pubkeys( + &sync_aggregate.sync_committee_signature, + participant_pubkeys, + message, + ))) +} diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 7c3361bc126..62f6349e872 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -110,6 +110,11 @@ where self.point.is_none() } + /// Returns `true` if `self` is equal to the point at infinity. + pub fn is_infinity(&self) -> bool { + self.is_infinity + } + /// Returns a reference to the underlying BLS point. pub(crate) fn point(&self) -> Option<&AggSig> { self.point.as_ref() @@ -189,18 +194,6 @@ where } } - /// Wrapper to `fast_aggregate_verify` accepting the infinity signature when `pubkeys` is empty. - pub fn eth2_fast_aggregate_verify( - &self, - msg: Hash256, - pubkeys: &[&GenericPublicKey], - ) -> bool { - if pubkeys.is_empty() && self.is_infinity { - return true; - } - self.fast_aggregate_verify(msg, pubkeys) - } - /// Verify that `self` represents an aggregate signature where all `pubkeys` have signed their /// corresponding message in `msgs`. /// diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 3289f0ab229..ad498dbfa87 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -34,6 +34,7 @@ macro_rules! test_suite { AggregateSignature::deserialize(&INFINITY_SIGNATURE).unwrap(), AggregateSignature::infinity(), ); + assert!(AggregateSignature::infinity().is_infinity()); } #[test] @@ -297,6 +298,17 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + /// Adding two infinity signatures should yield the infinity signature. + #[test] + fn add_two_infinity_signatures() { + let tester = AggregateSignatureTester::new_with_single_msg(1) + .infinity_sig() + .aggregate_infinity_sig(); + assert!(tester.sig.is_infinity()); + assert_eq!(tester.sig, AggregateSignature::infinity()); + tester.assert_single_message_verify(false) + } + /// The wrong signature should not verify. #[test] fn fast_aggregate_verify_wrong_signature() { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index c9fbec0cbe2..293195662db 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -198,7 +198,7 @@ impl Operation for SyncAggregate { spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; - process_sync_aggregate(state, self, proposer_index, spec) + process_sync_aggregate(state, self, proposer_index, VerifySignatures::True, spec) } }