Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support for v2 txhashset (continuing to use v1 in mainnet and floonet) #3051

Merged
merged 1 commit into from
Sep 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 20 additions & 6 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, Output, OutputIdentifier, Transaction, TxKernel,
TxKernelEntry,
};
use crate::core::global;
use crate::core::pow;
Expand Down Expand Up @@ -176,9 +175,24 @@ impl Chain {
// open the txhashset, creating a new one if necessary
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;

let mut header_pmmr =
PMMRHandle::new(&db_root, "header", "header_head", false, true, None)?;
let mut sync_pmmr = PMMRHandle::new(&db_root, "header", "sync_head", false, true, None)?;
let mut header_pmmr = PMMRHandle::new(
&db_root,
"header",
"header_head",
false,
true,
ProtocolVersion(1),
None,
)?;
let mut sync_pmmr = PMMRHandle::new(
&db_root,
"header",
"sync_head",
false,
true,
ProtocolVersion(1),
None,
)?;

setup_head(
&genesis,
Expand Down Expand Up @@ -661,7 +675,7 @@ impl Chain {
pub fn kernel_data_write(&self, reader: &mut dyn Read) -> Result<(), Error> {
let mut count = 0;
let mut stream = StreamingReader::new(reader, ProtocolVersion::local());
while let Ok(_kernel) = TxKernelEntry::read(&mut stream) {
while let Ok(_kernel) = TxKernel::read(&mut stream) {
count += 1;
}

Expand Down Expand Up @@ -1146,7 +1160,7 @@ impl Chain {
}

/// as above, for kernels
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernelEntry)> {
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
self.txhashset.read().last_n_kernel(distance)
}

Expand Down
111 changes: 76 additions & 35 deletions chain/src/txhashset/txhashset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,8 @@ use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::{
Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel, TxKernelEntry,
};
use crate::core::ser::{PMMRIndexHashable, PMMRable};
use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel};
use crate::core::ser::{PMMRIndexHashable, PMMRable, ProtocolVersion};
use crate::error::{Error, ErrorKind};
use crate::store::{Batch, ChainStore};
use crate::txhashset::{RewindableKernelView, UTXOView};
Expand Down Expand Up @@ -62,14 +60,16 @@ impl<T: PMMRable> PMMRHandle<T> {
file_name: &str,
prunable: bool,
fixed_size: bool,
version: ProtocolVersion,
header: Option<&BlockHeader>,
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(root_dir).join(sub_dir).join(file_name);
fs::create_dir_all(path.clone())?;
let path_str = path.to_str().ok_or(Error::from(ErrorKind::Other(
"invalid file path".to_owned(),
)))?;
let backend = PMMRBackend::new(path_str.to_string(), prunable, fixed_size, header)?;
let backend =
PMMRBackend::new(path_str.to_string(), prunable, fixed_size, version, header)?;
let last_pos = backend.unpruned_size();
Ok(PMMRHandle { backend, last_pos })
}
Expand Down Expand Up @@ -113,33 +113,78 @@ impl TxHashSet {
commit_index: Arc<ChainStore>,
header: Option<&BlockHeader>,
) -> Result<TxHashSet, Error> {
Ok(TxHashSet {
output_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
OUTPUT_SUBDIR,
true,
true,
header,
)?,
rproof_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
RANGE_PROOF_SUBDIR,
true,
true,
header,
)?,
kernel_pmmr_h: PMMRHandle::new(
let output_pmmr_h = PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
OUTPUT_SUBDIR,
true,
true,
ProtocolVersion(1),
header,
)?;

let rproof_pmmr_h = PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
RANGE_PROOF_SUBDIR,
true,
true,
ProtocolVersion(1),
header,
)?;

let mut maybe_kernel_handle: Option<PMMRHandle<TxKernel>> = None;
let versions = vec![ProtocolVersion(2), ProtocolVersion(1)];
for version in versions {
let handle = PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
KERNEL_SUBDIR,
false, // not prunable
false, // variable size kernel data file
version,
None,
)?,
commit_index,
})
)?;
if handle.last_pos == 0 {
debug!(
"attempting to open (empty) kernel PMMR using {:?} - SUCCESS",
version
);
maybe_kernel_handle = Some(handle);
break;
}
let kernel: Option<TxKernel> = ReadonlyPMMR::at(&handle.backend, 1).get_data(1);
if let Some(kernel) = kernel {
if kernel.verify().is_ok() {
debug!(
"attempting to open kernel PMMR using {:?} - SUCCESS",
version
);
maybe_kernel_handle = Some(handle);
break;
} else {
debug!(
"attempting to open kernel PMMR using {:?} - FAIL (verify failed)",
version
);
}
} else {
debug!(
"attempting to open kernel PMMR using {:?} - FAIL (read failed)",
version
);
}
}
if let Some(kernel_pmmr_h) = maybe_kernel_handle {
Ok(TxHashSet {
output_pmmr_h,
rproof_pmmr_h,
kernel_pmmr_h,
commit_index,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("failed to open kernel PMMR")).into())
}
}

/// Close all backend file handles
Expand Down Expand Up @@ -192,7 +237,7 @@ impl TxHashSet {
}

/// as above, for kernels
pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernelEntry)> {
pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos)
.get_last_n_insertions(distance)
}
Expand Down Expand Up @@ -247,9 +292,9 @@ impl TxHashSet {
let mut index = max_index + 1;
while index > min_index {
index -= 1;
if let Some(t) = pmmr.get_data(index) {
if &t.kernel.excess == excess {
return Some((t.kernel, index));
if let Some(kernel) = pmmr.get_data(index) {
if &kernel.excess == excess {
return Some((kernel, index));
}
}
}
Expand All @@ -258,8 +303,6 @@ impl TxHashSet {

/// Get MMR roots.
pub fn roots(&self) -> TxHashSetRoots {
// let header_pmmr =
// ReadonlyPMMR::at(&self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
let output_pmmr =
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
let rproof_pmmr =
Expand All @@ -268,7 +311,6 @@ impl TxHashSet {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);

TxHashSetRoots {
// header_root: header_pmmr.root(),
output_root: output_pmmr.root(),
rproof_root: rproof_pmmr.root(),
kernel_root: kernel_pmmr.root(),
Expand Down Expand Up @@ -1192,8 +1234,7 @@ impl<'a> Extension<'a> {
.kernel_pmmr
.get_data(n)
.ok_or::<Error>(ErrorKind::TxKernelNotFound.into())?;

tx_kernels.push(kernel.kernel);
tx_kernels.push(kernel);
}

if tx_kernels.len() >= KERNEL_BATCH_SIZE || n >= self.kernel_pmmr.unpruned_size() {
Expand Down
70 changes: 11 additions & 59 deletions core/src/core/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,15 +344,22 @@ impl Readable for TxKernel {
}
}

/// We store TxKernelEntry in the kernel MMR.
/// We store kernels in the kernel MMR.
/// Note: These are "variable size" to support different kernel featuere variants.
impl PMMRable for TxKernel {
type E = TxKernelEntry;
type E = Self;

fn as_elmt(&self) -> TxKernelEntry {
TxKernelEntry::from_kernel(self)
fn as_elmt(&self) -> Self::E {
self.clone()
}
}

/// Kernels are "variable size" but we need to implement FixedLength for legacy reasons.
/// At some point we will refactor the MMR backend so this is no longer required.
impl FixedLength for TxKernel {
const LEN: usize = 0;
}

impl KernelFeatures {
/// Is this a coinbase kernel?
pub fn is_coinbase(&self) -> bool {
Expand Down Expand Up @@ -494,61 +501,6 @@ impl TxKernel {
}
}

/// Wrapper around a tx kernel used when maintaining them in the MMR.
/// These will be useful once we implement relative lockheights via relative kernels
/// as a kernel may have an optional rel_kernel but we will not want to store these
/// directly in the kernel MMR.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxKernelEntry {
/// The underlying tx kernel.
pub kernel: TxKernel,
}

impl Writeable for TxKernelEntry {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.kernel.write(writer)?;
Ok(())
}
}

impl Readable for TxKernelEntry {
fn read(reader: &mut dyn Reader) -> Result<TxKernelEntry, ser::Error> {
let kernel = TxKernel::read(reader)?;
Ok(TxKernelEntry { kernel })
}
}

impl TxKernelEntry {
/// The excess on the underlying tx kernel.
pub fn excess(&self) -> Commitment {
self.kernel.excess
}

/// Verify the underlying tx kernel.
pub fn verify(&self) -> Result<(), Error> {
self.kernel.verify()
}

/// Build a new tx kernel entry from a kernel.
pub fn from_kernel(kernel: &TxKernel) -> TxKernelEntry {
TxKernelEntry {
kernel: kernel.clone(),
}
}
}

impl From<TxKernel> for TxKernelEntry {
fn from(kernel: TxKernel) -> Self {
TxKernelEntry { kernel }
}
}

impl FixedLength for TxKernelEntry {
const LEN: usize = 17 // features plus fee and lock_height
+ secp::constants::PEDERSEN_COMMITMENT_SIZE
+ secp::constants::AGG_SIGNATURE_SIZE;
}

/// Enum of possible tx weight verification options -
///
/// * As "transaction" checks tx (as block) weight does not exceed max_block_weight.
Expand Down
6 changes: 1 addition & 5 deletions store/src/pmmr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,13 +204,9 @@ impl<T: PMMRable> PMMRBackend<T> {
data_dir: P,
prunable: bool,
fixed_size: bool,
version: ProtocolVersion,
header: Option<&BlockHeader>,
) -> io::Result<PMMRBackend<T>> {
// Note: Explicit protocol version here.
// Regardless of our "default" protocol version we have existing MMR files
// and we need to be able to support these across upgrades.
let version = ProtocolVersion(1);

let data_dir = data_dir.as_ref();

// Are we dealing with "fixed size" data elements or "variable size" data elements
Expand Down
15 changes: 14 additions & 1 deletion store/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,17 @@ pub struct AppendOnlyFile<T> {
_marker: marker::PhantomData<T>,
}

impl AppendOnlyFile<SizeEntry> {
fn sum_sizes(&self) -> io::Result<u64> {
let mut sum = 0;
for pos in 0..self.buffer_start_pos {
let entry = self.read_as_elmt(pos)?;
sum += entry.size as u64;
}
Ok(sum)
}
}

impl<T> AppendOnlyFile<T>
where
T: Debug + Readable + Writeable,
Expand Down Expand Up @@ -215,8 +226,9 @@ where
// This will occur during "fast sync" as we do not sync the size_file
// and must build it locally.
// And we can *only* do this after init() the data file (so we know sizes).
let expected_size = aof.size()?;
if let SizeInfo::VariableSize(ref mut size_file) = &mut aof.size_info {
if size_file.size()? == 0 {
if size_file.sum_sizes()? != expected_size {
aof.rebuild_size_file()?;

// (Re)init the entire file as we just rebuilt the size_file
Expand Down Expand Up @@ -517,6 +529,7 @@ where
if let SizeInfo::VariableSize(ref mut size_file) = &mut self.size_info {
// Note: Reading from data file and writing sizes to the associated (tmp) size_file.
let tmp_path = size_file.path.with_extension("tmp");
debug!("rebuild_size_file: {:?}", tmp_path);

// Scope the reader and writer to within the block so we can safely replace files later on.
{
Expand Down
2 changes: 1 addition & 1 deletion store/tests/lmdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl PhatChunkStruct {
}

impl Readable for PhatChunkStruct {
fn read(reader: &mut Reader) -> Result<PhatChunkStruct, ser::Error> {
fn read(reader: &mut dyn Reader) -> Result<PhatChunkStruct, ser::Error> {
let mut retval = PhatChunkStruct::new();
for _ in 0..TEST_ALLOC_SIZE {
retval.phatness = reader.read_u64()?;
Expand Down
Loading