Skip to content

Commit

Permalink
refactor: organize consensus-core
Browse files Browse the repository at this point in the history
  • Loading branch information
ncitron committed Aug 26, 2024
1 parent bb38e21 commit ea5d459
Show file tree
Hide file tree
Showing 6 changed files with 260 additions and 240 deletions.
5 changes: 0 additions & 5 deletions config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ use figment::{
use serde::Deserialize;

use common::config::types::Forks;
use consensus_core::calculate_fork_version;

use crate::base::BaseConfig;
use crate::cli::CliConfig;
Expand Down Expand Up @@ -73,10 +72,6 @@ impl Config {
}
}

pub fn fork_version(&self, slot: u64) -> Vec<u8> {
calculate_fork_version(&self.forks, slot).to_vec()
}

pub fn to_base_config(&self) -> BaseConfig {
BaseConfig {
rpc_bind_ip: self.rpc_bind_ip.unwrap_or(IpAddr::V4(Ipv4Addr::LOCALHOST)),
Expand Down
221 changes: 98 additions & 123 deletions consensus-core/src/consensus_core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,98 +2,116 @@ use std::cmp;

use alloy::primitives::B256;
use eyre::Result;
use ssz_types::{BitVector, FixedVector};
use ssz_types::BitVector;
use tracing::{info, warn};
use tree_hash::TreeHash;
use zduny_wasm_timer::{SystemTime, UNIX_EPOCH};

use common::config::types::Forks;

use crate::errors::ConsensusError;
use crate::proof::{
is_current_committee_proof_valid, is_finality_proof_valid, is_next_committee_proof_valid,
};
use crate::types::bls::{PublicKey, Signature};
use crate::types::{
FinalityUpdate, GenericUpdate, Header, LightClientStore, OptimisticUpdate, SyncCommittee,
Update,
Bootstrap, FinalityUpdate, GenericUpdate, Header, LightClientStore, OptimisticUpdate, Update
};
use crate::utils::{
calc_sync_period, compute_domain, compute_fork_data_root, compute_signing_root, is_proof_valid,
calculate_fork_version, compute_committee_sign_root, compute_fork_data_root, get_participating_keys
};

pub fn get_participating_keys(
committee: &SyncCommittee,
bitfield: &BitVector<typenum::U512>,
) -> Result<Vec<PublicKey>> {
let mut pks: Vec<PublicKey> = Vec::new();
pub fn verify_bootstrap(bootstrap: &Bootstrap, checkpoint: B256) -> Result<()> {
let committee_valid = is_current_committee_proof_valid(
&bootstrap.header,
&bootstrap.current_sync_committee,
&bootstrap.current_sync_committee_branch,
);

let header_hash = bootstrap.header.tree_hash_root();
let header_valid = header_hash == checkpoint;

bitfield.iter().enumerate().for_each(|(i, bit)| {
if bit {
let pk = committee.pubkeys[i].clone();
pks.push(pk);
}
});
if !header_valid {
return Err(ConsensusError::InvalidHeaderHash(checkpoint, header_hash).into());
}

if !committee_valid {
return Err(ConsensusError::InvalidCurrentSyncCommitteeProof.into());
}

Ok(pks)
Ok(())
}

pub fn get_bits(bitfield: &BitVector<typenum::U512>) -> u64 {
bitfield.iter().filter(|v| *v).count() as u64
pub fn verify_update(
update: &Update,
expected_current_slot: u64,
store: &LightClientStore,
genesis_root: B256,
forks: &Forks,
) -> Result<()> {
let update = GenericUpdate::from(update);
verify_generic_update(&update, expected_current_slot, store, genesis_root, forks)
}

pub fn is_finality_proof_valid(
attested_header: &Header,
finality_header: &Header,
finality_branch: &[B256],
) -> bool {
is_proof_valid(attested_header, finality_header, finality_branch, 6, 41)
pub fn verify_finality_update(
update: &FinalityUpdate,
expected_current_slot: u64,
store: &LightClientStore,
genesis_root: B256,
forks: &Forks,
) -> Result<()> {
let update = GenericUpdate::from(update);
verify_generic_update(&update, expected_current_slot, store, genesis_root, forks)
}

pub fn is_next_committee_proof_valid(
attested_header: &Header,
next_committee: &SyncCommittee,
next_committee_branch: &[B256],
) -> bool {
is_proof_valid(
attested_header,
next_committee,
next_committee_branch,
5,
23,
)
pub fn verify_optimistic_update(
update: &OptimisticUpdate,
expected_current_slot: u64,
store: &LightClientStore,
genesis_root: B256,
forks: &Forks,
) -> Result<()> {
let update = GenericUpdate::from(update);
verify_generic_update(&update, expected_current_slot, store, genesis_root, forks)
}

pub fn is_current_committee_proof_valid(
attested_header: &Header,
current_committee: &SyncCommittee,
current_committee_branch: &[B256],
) -> bool {
is_proof_valid(
attested_header,
current_committee,
current_committee_branch,
5,
22,
)
pub fn apply_bootstrap(store: &mut LightClientStore, bootstrap: &Bootstrap) {
*store = LightClientStore {
finalized_header: bootstrap.header.clone(),
current_sync_committee: bootstrap.current_sync_committee.clone(),
next_sync_committee: None,
optimistic_header: bootstrap.header.clone(),
previous_max_active_participants: 0,
current_max_active_participants: 0,
};
}

pub fn safety_threshold(store: &LightClientStore) -> u64 {
cmp::max(
store.current_max_active_participants,
store.previous_max_active_participants,
) / 2
pub fn apply_update(store: &mut LightClientStore, update: &Update) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
}

pub fn has_sync_update(update: &GenericUpdate) -> bool {
update.next_sync_committee.is_some() && update.next_sync_committee_branch.is_some()
pub fn apply_finality_update(
store: &mut LightClientStore,
update: &FinalityUpdate,
) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
}

pub fn has_finality_update(update: &GenericUpdate) -> bool {
update.finalized_header.is_some() && update.finality_branch.is_some()
pub fn apply_optimistic_update(
store: &mut LightClientStore,
update: &OptimisticUpdate,
) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
}


// implements state changes from apply_light_client_update and process_light_client_update in
// the specification
/// Returns the new checkpoint if one is created, otherwise None
pub fn apply_generic_update(store: &mut LightClientStore, update: &GenericUpdate) -> Option<B256> {
fn apply_generic_update(store: &mut LightClientStore, update: &GenericUpdate) -> Option<B256> {
let committee_bits = get_bits(&update.sync_aggregate.sync_committee_bits);

store.current_max_active_participants =
Expand Down Expand Up @@ -169,7 +187,7 @@ pub fn apply_generic_update(store: &mut LightClientStore, update: &GenericUpdate

// implements checks from validate_light_client_update and process_light_client_update in the
// specification
pub fn verify_generic_update(
fn verify_generic_update(
update: &GenericUpdate,
expected_current_slot: u64,
store: &LightClientStore,
Expand Down Expand Up @@ -259,59 +277,33 @@ pub fn verify_generic_update(
Ok(())
}

pub fn verify_update(
update: &Update,
expected_current_slot: u64,
store: &LightClientStore,
genesis_root: B256,
forks: &Forks,
) -> Result<()> {
let update = GenericUpdate::from(update);

verify_generic_update(&update, expected_current_slot, store, genesis_root, forks)
}

pub fn verify_finality_update(
update: &FinalityUpdate,
expected_current_slot: u64,
store: &LightClientStore,
genesis_root: B256,
forks: &Forks,
) -> Result<()> {
let update = GenericUpdate::from(update);
pub fn expected_current_slot(now: SystemTime, genesis_time: u64) -> u64 {
let now = now.duration_since(UNIX_EPOCH).unwrap();
let since_genesis = now - std::time::Duration::from_secs(genesis_time);

verify_generic_update(&update, expected_current_slot, store, genesis_root, forks)
since_genesis.as_secs() / 12
}

pub fn apply_update(store: &mut LightClientStore, update: &Update) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
pub fn calc_sync_period(slot: u64) -> u64 {
// 32 slots per epoch
let epoch = slot / 32;
// 256 epochs per sync committee
epoch / 256
}

pub fn apply_finality_update(
store: &mut LightClientStore,
update: &FinalityUpdate,
) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
pub fn get_bits(bitfield: &BitVector<typenum::U512>) -> u64 {
bitfield.iter().filter(|v| *v).count() as u64
}

pub fn apply_optimistic_update(
store: &mut LightClientStore,
update: &OptimisticUpdate,
) -> Option<B256> {
let update = GenericUpdate::from(update);
apply_generic_update(store, &update)
fn has_sync_update(update: &GenericUpdate) -> bool {
update.next_sync_committee.is_some() && update.next_sync_committee_branch.is_some()
}

pub fn expected_current_slot(now: SystemTime, genesis_time: u64) -> u64 {
let now = now.duration_since(UNIX_EPOCH).unwrap();
let since_genesis = now - std::time::Duration::from_secs(genesis_time);

since_genesis.as_secs() / 12
fn has_finality_update(update: &GenericUpdate) -> bool {
update.finalized_header.is_some() && update.finality_branch.is_some()
}

pub fn verify_sync_committee_signture(
fn verify_sync_committee_signture(
pks: &[PublicKey],
attested_header: &Header,
signature: &Signature,
Expand All @@ -322,26 +314,9 @@ pub fn verify_sync_committee_signture(
signature.verify(signing_root.as_slice(), pks)
}

pub fn compute_committee_sign_root(header: B256, fork_data_root: B256) -> B256 {
let domain_type = [7, 00, 00, 00];
let domain = compute_domain(domain_type, fork_data_root);
compute_signing_root(header, domain)
}

pub fn calculate_fork_version(forks: &Forks, slot: u64) -> FixedVector<u8, typenum::U4> {
let epoch = slot / 32;

let version = if epoch >= forks.deneb.epoch {
forks.deneb.fork_version
} else if epoch >= forks.capella.epoch {
forks.capella.fork_version
} else if epoch >= forks.bellatrix.epoch {
forks.bellatrix.fork_version
} else if epoch >= forks.altair.epoch {
forks.altair.fork_version
} else {
forks.genesis.fork_version
};

FixedVector::from(version.as_slice().to_vec())
fn safety_threshold(store: &LightClientStore) -> u64 {
cmp::max(
store.current_max_active_participants,
store.previous_max_active_participants,
) / 2
}
4 changes: 3 additions & 1 deletion consensus-core/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
pub mod errors;
pub mod types;
pub mod utils;

mod consensus_core;
mod proof;
mod utils;

pub use crate::consensus_core::*;
70 changes: 70 additions & 0 deletions consensus-core/src/proof.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
use alloy::primitives::B256;
use sha2::{Digest, Sha256};
use tree_hash::TreeHash;

use crate::types::{Header, SyncCommittee};

pub fn is_finality_proof_valid(
attested_header: &Header,
finality_header: &Header,
finality_branch: &[B256],
) -> bool {
is_proof_valid(attested_header, finality_header, finality_branch, 6, 41)
}

pub fn is_next_committee_proof_valid(
attested_header: &Header,
next_committee: &SyncCommittee,
next_committee_branch: &[B256],
) -> bool {
is_proof_valid(
attested_header,
next_committee,
next_committee_branch,
5,
23,
)
}

pub fn is_current_committee_proof_valid(
attested_header: &Header,
current_committee: &SyncCommittee,
current_committee_branch: &[B256],
) -> bool {
is_proof_valid(
attested_header,
current_committee,
current_committee_branch,
5,
22,
)
}

fn is_proof_valid<T: TreeHash>(
attested_header: &Header,
leaf_object: &T,
branch: &[B256],
depth: usize,
index: usize,
) -> bool {
if branch.len() != depth {
return false;
}

let mut derived_root = leaf_object.tree_hash_root();
let mut hasher = Sha256::new();

for (i, node) in branch.iter().enumerate() {
if (index / 2usize.pow(i as u32)) % 2 != 0 {
hasher.update(node);
hasher.update(derived_root);
} else {
hasher.update(derived_root);
hasher.update(node);
}

derived_root = B256::from_slice(&hasher.finalize_reset());
}

derived_root == attested_header.state_root
}
Loading

0 comments on commit ea5d459

Please sign in to comment.