mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
[PIBD] PMMR Desegmenter Structure (Pt. 1) (#3667)
* initial commit of WIP pibd explorations * correct calling for obtaining and validating first segment * update test to properly iterate through each segment of the test pmmrs, validating each segment as it goes * updated test to fully segment and validate PMMRs from compacted and uncompacted sample data. Also contains method of running test againt live chain data * remove logger change * change test file name * change test file name * change directory reference in test for CI * add initial (experimental) structure for PIBD desegmenting * move bitmap desegmentation logic into desegmenter * added txhashset methods to apply pibd segments (note this only works for fully unpruned trees atm) * change last_pos to mmr_size * fix to pmmr::peaks call * don't verify POW when copying headers * prepare for commit of work thus far' * update test paths * few updates based on early review
This commit is contained in:
parent
53414ae105
commit
2f5cfbe4eb
8 changed files with 587 additions and 10 deletions
|
@ -27,7 +27,7 @@ use crate::error::{Error, ErrorKind};
|
|||
use crate::pipe;
|
||||
use crate::store;
|
||||
use crate::txhashset;
|
||||
use crate::txhashset::{PMMRHandle, Segmenter, TxHashSet};
|
||||
use crate::txhashset::{Desegmenter, PMMRHandle, Segmenter, TxHashSet};
|
||||
use crate::types::{
|
||||
BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus,
|
||||
};
|
||||
|
@ -153,6 +153,7 @@ pub struct Chain {
|
|||
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
|
||||
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
|
||||
pibd_segmenter: Arc<RwLock<Option<Segmenter>>>,
|
||||
pibd_desegmenter: Arc<RwLock<Option<Desegmenter>>>,
|
||||
// POW verification function
|
||||
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
|
||||
denylist: Arc<RwLock<Vec<Hash>>>,
|
||||
|
@ -202,6 +203,7 @@ impl Chain {
|
|||
txhashset: Arc::new(RwLock::new(txhashset)),
|
||||
header_pmmr: Arc::new(RwLock::new(header_pmmr)),
|
||||
pibd_segmenter: Arc::new(RwLock::new(None)),
|
||||
pibd_desegmenter: Arc::new(RwLock::new(None)),
|
||||
pow_verifier,
|
||||
denylist: Arc::new(RwLock::new(vec![])),
|
||||
archive_mode,
|
||||
|
@ -863,6 +865,43 @@ impl Chain {
|
|||
))
|
||||
}
|
||||
|
||||
/// instantiate desegmenter (in same lazy fashion as segmenter, though this should not be as
|
||||
/// expensive an operation)
|
||||
pub fn desegmenter(&self, archive_header: &BlockHeader) -> Result<Desegmenter, Error> {
|
||||
// Use our cached desegmenter if we have one and the associated header matches.
|
||||
if let Some(d) = self.pibd_desegmenter.read().as_ref() {
|
||||
if d.header() == archive_header {
|
||||
return Ok(d.clone());
|
||||
}
|
||||
}
|
||||
// If no desegmenter or headers don't match init
|
||||
// TODO: (Check whether we can do this.. we *should* be able to modify this as the desegmenter
|
||||
// is in flight and we cross a horizon boundary, but needs more thinking)
|
||||
let desegmenter = self.init_desegmenter(archive_header)?;
|
||||
let mut cache = self.pibd_desegmenter.write();
|
||||
*cache = Some(desegmenter.clone());
|
||||
|
||||
return Ok(desegmenter);
|
||||
}
|
||||
|
||||
/// initialize a desegmenter, which is capable of extending the hashset by appending
|
||||
/// PIBD segments of the three PMMR trees + Bitmap PMMR
|
||||
/// header should be the same header as selected for the txhashset.zip archive
|
||||
fn init_desegmenter(&self, header: &BlockHeader) -> Result<Desegmenter, Error> {
|
||||
debug!(
|
||||
"init_desegmenter: initializing new desegmenter for {} at {}",
|
||||
header.hash(),
|
||||
header.height
|
||||
);
|
||||
|
||||
Ok(Desegmenter::new(
|
||||
self.txhashset(),
|
||||
self.header_pmmr.clone(),
|
||||
header.clone(),
|
||||
self.store.clone(),
|
||||
))
|
||||
}
|
||||
|
||||
/// To support the ability to download the txhashset from multiple peers in parallel,
|
||||
/// the peers must all agree on the exact binary representation of the txhashset.
|
||||
/// This means compacting and rewinding to the exact same header.
|
||||
|
|
|
@ -16,12 +16,14 @@
|
|||
//! kernel) more conveniently and transactionally.
|
||||
|
||||
mod bitmap_accumulator;
|
||||
mod desegmenter;
|
||||
mod rewindable_kernel_view;
|
||||
mod segmenter;
|
||||
mod txhashset;
|
||||
mod utxo_view;
|
||||
|
||||
pub use self::bitmap_accumulator::*;
|
||||
pub use self::desegmenter::*;
|
||||
pub use self::rewindable_kernel_view::*;
|
||||
pub use self::segmenter::*;
|
||||
pub use self::txhashset::*;
|
||||
|
|
|
@ -21,7 +21,7 @@ use croaring::Bitmap;
|
|||
|
||||
use crate::core::core::hash::{DefaultHashable, Hash};
|
||||
use crate::core::core::pmmr::segment::{Segment, SegmentIdentifier, SegmentProof};
|
||||
use crate::core::core::pmmr::{self, ReadablePMMR, ReadonlyPMMR, VecBackend, PMMR};
|
||||
use crate::core::core::pmmr::{self, Backend, ReadablePMMR, ReadonlyPMMR, VecBackend, PMMR};
|
||||
use crate::core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use enum_primitive::FromPrimitive;
|
||||
|
@ -190,6 +190,17 @@ impl BitmapAccumulator {
|
|||
pub fn readonly_pmmr(&self) -> ReadonlyPMMR<BitmapChunk, VecBackend<BitmapChunk>> {
|
||||
ReadonlyPMMR::at(&self.backend, self.backend.size())
|
||||
}
|
||||
|
||||
/// Return a raw in-memory bitmap of this accumulator
|
||||
pub fn as_bitmap(&self) -> Result<Bitmap, Error> {
|
||||
let mut bitmap = Bitmap::create();
|
||||
for (chunk_count, chunk_index) in self.backend.leaf_idx_iter(0).enumerate() {
|
||||
//TODO: Unwrap
|
||||
let chunk = self.backend.get_data(chunk_index).unwrap();
|
||||
bitmap.add_many(&chunk.set_iter(chunk_count * 1024).collect::<Vec<u32>>());
|
||||
}
|
||||
Ok(bitmap)
|
||||
}
|
||||
}
|
||||
|
||||
/// A bitmap "chunk" representing 1024 contiguous bits of the overall bitmap.
|
||||
|
|
243
chain/src/txhashset/desegmenter.rs
Normal file
243
chain/src/txhashset/desegmenter.rs
Normal file
|
@ -0,0 +1,243 @@
|
|||
// Copyright 2021 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Manages the reconsitution of a txhashset from segments produced by the
|
||||
//! segmenter
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::core::pmmr;
|
||||
use crate::core::core::{BlockHeader, OutputIdentifier, Segment, TxKernel};
|
||||
use crate::error::Error;
|
||||
use crate::txhashset::{BitmapAccumulator, BitmapChunk, TxHashSet};
|
||||
use crate::util::secp::pedersen::RangeProof;
|
||||
use crate::util::RwLock;
|
||||
|
||||
use crate::store;
|
||||
use crate::txhashset;
|
||||
|
||||
use croaring::Bitmap;
|
||||
|
||||
/// Desegmenter for rebuilding a txhashset from PIBD segments
|
||||
#[derive(Clone)]
|
||||
pub struct Desegmenter {
|
||||
txhashset: Arc<RwLock<TxHashSet>>,
|
||||
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
|
||||
archive_header: BlockHeader,
|
||||
store: Arc<store::ChainStore>,
|
||||
|
||||
bitmap_accumulator: BitmapAccumulator,
|
||||
bitmap_segments: Vec<Segment<BitmapChunk>>,
|
||||
output_segments: Vec<Segment<OutputIdentifier>>,
|
||||
rangeproof_segments: Vec<Segment<RangeProof>>,
|
||||
kernel_segments: Vec<Segment<TxKernel>>,
|
||||
|
||||
bitmap_mmr_leaf_count: u64,
|
||||
bitmap_mmr_size: u64,
|
||||
// In-memory 'raw' bitmap corresponding to contents of bitmap accumulator
|
||||
bitmap_cache: Option<Bitmap>,
|
||||
}
|
||||
|
||||
impl Desegmenter {
|
||||
/// Create a new segmenter based on the provided txhashset and the specified block header
|
||||
pub fn new(
|
||||
txhashset: Arc<RwLock<TxHashSet>>,
|
||||
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
|
||||
archive_header: BlockHeader,
|
||||
store: Arc<store::ChainStore>,
|
||||
) -> Desegmenter {
|
||||
let mut retval = Desegmenter {
|
||||
txhashset,
|
||||
header_pmmr,
|
||||
archive_header,
|
||||
store,
|
||||
bitmap_accumulator: BitmapAccumulator::new(),
|
||||
bitmap_segments: vec![],
|
||||
output_segments: vec![],
|
||||
rangeproof_segments: vec![],
|
||||
kernel_segments: vec![],
|
||||
|
||||
bitmap_mmr_leaf_count: 0,
|
||||
bitmap_mmr_size: 0,
|
||||
|
||||
bitmap_cache: None,
|
||||
};
|
||||
retval.calc_bitmap_mmr_sizes();
|
||||
retval
|
||||
}
|
||||
|
||||
/// Return reference to the header used for validation
|
||||
pub fn header(&self) -> &BlockHeader {
|
||||
&self.archive_header
|
||||
}
|
||||
/// Return size of bitmap mmr
|
||||
pub fn expected_bitmap_mmr_size(&self) -> u64 {
|
||||
self.bitmap_mmr_size
|
||||
}
|
||||
|
||||
/// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for
|
||||
/// use in further validation and setting the accumulator on the underlying txhashset
|
||||
/// TODO: Could be called automatically when we have the calculated number of
|
||||
/// required segments for the archive header
|
||||
/// TODO: Accumulator will likely need to be stored locally to deal with server
|
||||
/// being shut down and restarted
|
||||
pub fn finalize_bitmap(&mut self) -> Result<(), Error> {
|
||||
debug!(
|
||||
"pibd_desgmenter: caching bitmap - accumulator root: {}",
|
||||
self.bitmap_accumulator.root()
|
||||
);
|
||||
self.bitmap_cache = Some(self.bitmap_accumulator.as_bitmap()?);
|
||||
|
||||
// Set the txhashset's bitmap accumulator
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(
|
||||
&mut header_pmmr,
|
||||
&mut txhashset,
|
||||
&mut batch,
|
||||
|ext, _batch| {
|
||||
let extension = &mut ext.extension;
|
||||
// TODO: Unwrap
|
||||
extension.set_bitmap_accumulator(self.bitmap_accumulator.clone());
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Calculate and store number of leaves and positions in the bitmap mmr given the number of
|
||||
// outputs specified in the header. Should be called whenever the header changes
|
||||
fn calc_bitmap_mmr_sizes(&mut self) {
|
||||
// Number of leaves (BitmapChunks)
|
||||
self.bitmap_mmr_leaf_count =
|
||||
(pmmr::n_leaves(self.archive_header.output_mmr_size) + 1023) / 1024;
|
||||
debug!(
|
||||
"pibd_desgmenter - expected number of leaves in bitmap MMR: {}",
|
||||
self.bitmap_mmr_leaf_count
|
||||
);
|
||||
// Total size of Bitmap PMMR
|
||||
self.bitmap_mmr_size = pmmr::peaks(self.bitmap_mmr_leaf_count)
|
||||
.last()
|
||||
.unwrap_or(&pmmr::insertion_to_pmmr_index(self.bitmap_mmr_leaf_count))
|
||||
.clone();
|
||||
debug!(
|
||||
"pibd_desgmenter - expected size of bitmap MMR: {}",
|
||||
self.bitmap_mmr_size
|
||||
);
|
||||
}
|
||||
|
||||
/// Adds and validates a bitmap chunk
|
||||
/// TODO: Still experimenting, this expects chunks received to be in order
|
||||
pub fn add_bitmap_segment(
|
||||
&mut self,
|
||||
segment: Segment<BitmapChunk>,
|
||||
output_root_hash: Hash,
|
||||
) -> Result<(), Error> {
|
||||
debug!("pibd_desegmenter: add bitmap segment");
|
||||
segment.validate_with(
|
||||
self.bitmap_mmr_size, // Last MMR pos at the height being validated, in this case of the bitmap root
|
||||
None,
|
||||
self.archive_header.output_root, // Output root we're checking for
|
||||
self.archive_header.output_mmr_size,
|
||||
output_root_hash, // Other root
|
||||
true,
|
||||
)?;
|
||||
// All okay, add leaves to bitmap accumulator
|
||||
let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = segment.parts();
|
||||
for chunk in leaf_data.into_iter() {
|
||||
self.bitmap_accumulator.append_chunk(chunk)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a output segment
|
||||
/// TODO: Still experimenting, expects chunks received to be in order
|
||||
pub fn add_output_segment(&self, segment: Segment<OutputIdentifier>) -> Result<(), Error> {
|
||||
debug!("pibd_desegmenter: add output segment");
|
||||
segment.validate_with(
|
||||
self.archive_header.output_mmr_size, // Last MMR pos at the height being validated
|
||||
self.bitmap_cache.as_ref(),
|
||||
self.archive_header.output_root, // Output root we're checking for
|
||||
self.archive_header.output_mmr_size,
|
||||
self.bitmap_accumulator.root(), // Other root
|
||||
false,
|
||||
)?;
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(
|
||||
&mut header_pmmr,
|
||||
&mut txhashset,
|
||||
&mut batch,
|
||||
|ext, _batch| {
|
||||
let extension = &mut ext.extension;
|
||||
extension.apply_output_segment(segment)?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a Rangeproof segment
|
||||
/// TODO: Still experimenting, expects chunks received to be in order
|
||||
pub fn add_rangeproof_segment(&self, segment: Segment<RangeProof>) -> Result<(), Error> {
|
||||
debug!("pibd_desegmenter: add rangeproof segment");
|
||||
segment.validate(
|
||||
self.archive_header.output_mmr_size, // Last MMR pos at the height being validated
|
||||
self.bitmap_cache.as_ref(),
|
||||
self.archive_header.range_proof_root, // Range proof root we're checking for
|
||||
)?;
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(
|
||||
&mut header_pmmr,
|
||||
&mut txhashset,
|
||||
&mut batch,
|
||||
|ext, _batch| {
|
||||
let extension = &mut ext.extension;
|
||||
extension.apply_rangeproof_segment(segment)?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a Kernel segment
|
||||
/// TODO: Still experimenting, expects chunks received to be in order
|
||||
pub fn add_kernel_segment(&self, segment: Segment<TxKernel>) -> Result<(), Error> {
|
||||
debug!("pibd_desegmenter: add kernel segment");
|
||||
segment.validate(
|
||||
self.archive_header.kernel_mmr_size, // Last MMR pos at the height being validated
|
||||
None,
|
||||
self.archive_header.kernel_root, // Kernel root we're checking for
|
||||
)?;
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(
|
||||
&mut header_pmmr,
|
||||
&mut txhashset,
|
||||
&mut batch,
|
||||
|ext, _batch| {
|
||||
let extension = &mut ext.extension;
|
||||
extension.apply_kernel_segment(segment)?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -22,7 +22,9 @@ use crate::core::core::merkle_proof::MerkleProof;
|
|||
use crate::core::core::pmmr::{
|
||||
self, Backend, ReadablePMMR, ReadonlyPMMR, RewindablePMMR, VecBackend, PMMR,
|
||||
};
|
||||
use crate::core::core::{Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, TxKernel};
|
||||
use crate::core::core::{
|
||||
Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, Segment, TxKernel,
|
||||
};
|
||||
use crate::core::global;
|
||||
use crate::core::ser::{PMMRable, ProtocolVersion};
|
||||
use crate::error::{Error, ErrorKind};
|
||||
|
@ -1177,6 +1179,11 @@ impl<'a> Extension<'a> {
|
|||
)
|
||||
}
|
||||
|
||||
/// Sets the bitmap accumulator (as received during PIBD sync)
|
||||
pub fn set_bitmap_accumulator(&mut self, accumulator: BitmapAccumulator) {
|
||||
self.bitmap_accumulator = accumulator;
|
||||
}
|
||||
|
||||
// Prune output and rangeproof PMMRs based on provided pos.
|
||||
// Input is not valid if we cannot prune successfully.
|
||||
fn apply_input(&mut self, commit: Commitment, pos: CommitPos) -> Result<(), Error> {
|
||||
|
@ -1232,6 +1239,33 @@ impl<'a> Extension<'a> {
|
|||
Ok(1 + output_pos)
|
||||
}
|
||||
|
||||
/// Apply an output segment to the output PMMR. must be called in order
|
||||
/// TODO: Not complete
|
||||
pub fn apply_output_segment(
|
||||
&mut self,
|
||||
segment: Segment<OutputIdentifier>,
|
||||
) -> Result<(), Error> {
|
||||
let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = segment.parts();
|
||||
for output_identifier in leaf_data {
|
||||
self.output_pmmr
|
||||
.push(&output_identifier)
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply a rangeproof segment to the output PMMR. must be called in order
|
||||
/// TODO: Not complete
|
||||
pub fn apply_rangeproof_segment(&mut self, segment: Segment<RangeProof>) -> Result<(), Error> {
|
||||
let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = segment.parts();
|
||||
for proof in leaf_data {
|
||||
self.rproof_pmmr
|
||||
.push(&proof)
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply kernels to the kernel MMR.
|
||||
/// Validate any NRD relative height locks via the "recent" kernel index.
|
||||
/// Note: This is used for both block processing and tx validation.
|
||||
|
@ -1251,6 +1285,18 @@ impl<'a> Extension<'a> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply a kernel segment to the output PMMR. must be called in order
|
||||
/// TODO: Not complete
|
||||
pub fn apply_kernel_segment(&mut self, segment: Segment<TxKernel>) -> Result<(), Error> {
|
||||
let (_sid, _hash_pos, _hashes, _leaf_pos, leaf_data, _proof) = segment.parts();
|
||||
for kernel in leaf_data {
|
||||
self.kernel_pmmr
|
||||
.push(&kernel)
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Push kernel onto MMR (hash and data files).
|
||||
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<u64, Error> {
|
||||
let pos = self
|
||||
|
|
235
chain/tests/test_pibd_copy.rs
Normal file
235
chain/tests/test_pibd_copy.rs
Normal file
|
@ -0,0 +1,235 @@
|
|||
// Copyright 2021 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_util as util;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::chain::types::{NoopAdapter, Options};
|
||||
use crate::core::core::{hash::Hashed, pmmr::segment::SegmentIdentifier};
|
||||
use crate::core::{genesis, global, pow};
|
||||
|
||||
use self::chain_test_helper::clean_output_dir;
|
||||
|
||||
mod chain_test_helper;
|
||||
|
||||
fn test_pibd_copy_impl(is_test_chain: bool, src_root_dir: &str, dest_root_dir: &str) {
|
||||
global::set_local_chain_type(global::ChainTypes::Mainnet);
|
||||
let mut genesis = genesis::genesis_main();
|
||||
// Height at which to read kernel segments (lower than thresholds defined in spec - for testing)
|
||||
let mut target_segment_height = 11;
|
||||
|
||||
if is_test_chain {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
genesis = pow::mine_genesis_block().unwrap();
|
||||
target_segment_height = 3;
|
||||
}
|
||||
|
||||
{
|
||||
debug!("Reading Chain, genesis block: {}", genesis.hash());
|
||||
let dummy_adapter = Arc::new(NoopAdapter {});
|
||||
|
||||
// The original chain we're reading from
|
||||
let src_chain = Arc::new(
|
||||
chain::Chain::init(
|
||||
src_root_dir.into(),
|
||||
dummy_adapter.clone(),
|
||||
genesis.clone(),
|
||||
pow::verify_size,
|
||||
false,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// And the output chain we're writing to
|
||||
let dest_chain = Arc::new(
|
||||
chain::Chain::init(
|
||||
dest_root_dir.into(),
|
||||
dummy_adapter,
|
||||
genesis.clone(),
|
||||
pow::verify_size,
|
||||
false,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// For test compaction purposes
|
||||
/*src_chain.compact().unwrap();
|
||||
src_chain
|
||||
.validate(true)
|
||||
.expect("Source chain validation failed, stop");*/
|
||||
|
||||
let sh = src_chain.get_header_by_height(0).unwrap();
|
||||
debug!("Source Genesis - {}", sh.hash());
|
||||
|
||||
let dh = dest_chain.get_header_by_height(0).unwrap();
|
||||
debug!("Destination Genesis - {}", dh.hash());
|
||||
|
||||
let horizon_header = src_chain.txhashset_archive_header().unwrap();
|
||||
|
||||
debug!("Horizon header: {:?}", horizon_header);
|
||||
|
||||
// Copy the headers from source to output in chunks
|
||||
let dest_sync_head = dest_chain.header_head().unwrap();
|
||||
let copy_chunk_size = 1000;
|
||||
let mut copied_header_index = 1;
|
||||
let mut src_headers = vec![];
|
||||
while copied_header_index <= horizon_header.height {
|
||||
let h = src_chain.get_header_by_height(copied_header_index).unwrap();
|
||||
src_headers.push(h);
|
||||
copied_header_index += 1;
|
||||
if copied_header_index % copy_chunk_size == 0 {
|
||||
debug!(
|
||||
"Copying headers to {} of {}",
|
||||
copied_header_index, horizon_header.height
|
||||
);
|
||||
dest_chain
|
||||
.sync_block_headers(&src_headers, dest_sync_head, Options::SKIP_POW)
|
||||
.unwrap();
|
||||
src_headers = vec![];
|
||||
}
|
||||
}
|
||||
if !src_headers.is_empty() {
|
||||
dest_chain
|
||||
.sync_block_headers(&src_headers, dest_sync_head, Options::NONE)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Init segmenter, (note this still has to be lazy init somewhere on a peer)
|
||||
// This is going to use the same block as horizon_header
|
||||
let segmenter = src_chain.segmenter().unwrap();
|
||||
// Init desegmenter
|
||||
let mut desegmenter = dest_chain.desegmenter(&horizon_header).unwrap();
|
||||
|
||||
// And total size of the bitmap PMMR
|
||||
let bitmap_mmr_size = desegmenter.expected_bitmap_mmr_size();
|
||||
debug!(
|
||||
"Bitmap Segments required: {}",
|
||||
SegmentIdentifier::count_segments_required(bitmap_mmr_size, target_segment_height)
|
||||
);
|
||||
// TODO: This can probably be derived from the PMMR we'll eventually be building
|
||||
// (check if total size is equal to total size at horizon header)
|
||||
let identifier_iter =
|
||||
SegmentIdentifier::traversal_iter(bitmap_mmr_size, target_segment_height);
|
||||
|
||||
for sid in identifier_iter {
|
||||
debug!("Getting bitmap segment with Segment Identifier {:?}", sid);
|
||||
let (bitmap_segment, output_root_hash) = segmenter.bitmap_segment(sid).unwrap();
|
||||
debug!(
|
||||
"Bitmap segmenter reports output root hash is {:?}",
|
||||
output_root_hash
|
||||
);
|
||||
// Add segment to desegmenter / validate
|
||||
if let Err(e) = desegmenter.add_bitmap_segment(bitmap_segment, output_root_hash) {
|
||||
panic!("Unable to add bitmap segment: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize segmenter bitmap, which means we've recieved all bitmap MMR chunks and
|
||||
// Are ready to use it to validate outputs
|
||||
desegmenter.finalize_bitmap().unwrap();
|
||||
|
||||
// OUTPUTS - Read + Validate
|
||||
let identifier_iter = SegmentIdentifier::traversal_iter(
|
||||
horizon_header.output_mmr_size,
|
||||
target_segment_height,
|
||||
);
|
||||
|
||||
for sid in identifier_iter {
|
||||
debug!("Getting output segment with Segment Identifier {:?}", sid);
|
||||
let (output_segment, bitmap_root_hash) = segmenter.output_segment(sid).unwrap();
|
||||
debug!(
|
||||
"Output segmenter reports bitmap hash is {:?}",
|
||||
bitmap_root_hash
|
||||
);
|
||||
// Add segment to desegmenter / validate
|
||||
if let Err(e) = desegmenter.add_output_segment(output_segment) {
|
||||
panic!("Unable to add output segment: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// PROOFS - Read + Validate
|
||||
let identifier_iter = SegmentIdentifier::traversal_iter(
|
||||
horizon_header.output_mmr_size,
|
||||
target_segment_height,
|
||||
);
|
||||
|
||||
for sid in identifier_iter {
|
||||
debug!(
|
||||
"Getting rangeproof segment with Segment Identifier {:?}",
|
||||
sid
|
||||
);
|
||||
let rangeproof_segment = segmenter.rangeproof_segment(sid).unwrap();
|
||||
// Add segment to desegmenter / validate
|
||||
if let Err(e) = desegmenter.add_rangeproof_segment(rangeproof_segment) {
|
||||
panic!("Unable to add rangeproof segment: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// KERNELS - Read + Validate
|
||||
let identifier_iter = SegmentIdentifier::traversal_iter(
|
||||
horizon_header.kernel_mmr_size,
|
||||
target_segment_height,
|
||||
);
|
||||
|
||||
for sid in identifier_iter {
|
||||
debug!("Getting kernel segment with Segment Identifier {:?}", sid);
|
||||
let kernel_segment = segmenter.kernel_segment(sid).unwrap();
|
||||
if let Err(e) = desegmenter.add_kernel_segment(kernel_segment) {
|
||||
panic!("Unable to add kernel segment: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
let dest_txhashset = dest_chain.txhashset();
|
||||
debug!("Dest TxHashset Roots: {:?}", dest_txhashset.read().roots());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pibd_copy_sample() {
|
||||
util::init_test_logger();
|
||||
// Note there is now a 'test' in grin_wallet_controller/build_chain
|
||||
// that can be manually tweaked to create a
|
||||
// small test chain with actual transaction data
|
||||
|
||||
// Test on uncompacted and non-compacted chains
|
||||
let src_root_dir = format!("./tests/test_data/chain_raw");
|
||||
let dest_root_dir = format!("./tests/test_output/.segment_copy");
|
||||
clean_output_dir(&dest_root_dir);
|
||||
test_pibd_copy_impl(true, &src_root_dir, &dest_root_dir);
|
||||
let src_root_dir = format!("./tests/test_data/chain_compacted");
|
||||
clean_output_dir(&dest_root_dir);
|
||||
test_pibd_copy_impl(true, &src_root_dir, &dest_root_dir);
|
||||
clean_output_dir(&dest_root_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
// Note this test is intended to be run manually, as testing the copy of an
|
||||
// entire live chain is beyond the capability of current CI
|
||||
// As above, but run on a real instance of a chain pointed where you like
|
||||
fn test_pibd_copy_real() {
|
||||
util::init_test_logger();
|
||||
// if testing against a real chain, insert location here
|
||||
let src_root_dir = format!("/Users/yeastplume/Projects/grin_project/server/chain_data");
|
||||
let dest_root_dir = format!("/Users/yeastplume/Projects/grin_project/server/.chain_data_copy");
|
||||
clean_output_dir(&dest_root_dir);
|
||||
test_pibd_copy_impl(false, &src_root_dir, &dest_root_dir);
|
||||
clean_output_dir(&dest_root_dir);
|
||||
}
|
|
@ -87,7 +87,7 @@ fn test_pibd_chain_validation_impl(is_test_chain: bool, src_root_dir: &str) {
|
|||
println!("BITMAP PMMR NUM_LEAVES: {}", bitmap_mmr_num_leaves);
|
||||
|
||||
// And total size of the bitmap PMMR
|
||||
let bitmap_pmmr_size = pmmr::peaks(bitmap_mmr_num_leaves + 1)
|
||||
let bitmap_pmmr_size = pmmr::peaks(bitmap_mmr_num_leaves)
|
||||
.last()
|
||||
.unwrap_or(&pmmr::insertion_to_pmmr_index(bitmap_mmr_num_leaves))
|
||||
.clone();
|
||||
|
|
|
@ -87,7 +87,8 @@ impl SegmentIdentifier {
|
|||
/// Returns number of segments required that would needed in order to read a
|
||||
/// pmmr of size `target_mmr_size` in segments of height `segment_height`
|
||||
pub fn count_segments_required(target_mmr_size: u64, segment_height: u8) -> usize {
|
||||
pmmr::n_leaves(target_mmr_size) as usize / (1 << segment_height as usize)
|
||||
let d = 1 << segment_height;
|
||||
((pmmr::n_leaves(target_mmr_size) + d - 1) / d) as usize
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -126,17 +127,17 @@ impl<T> Segment<T> {
|
|||
self.identifier.idx * self.segment_capacity()
|
||||
}
|
||||
|
||||
/// Number of leaves in this segment. Equal to capacity except for the final segment, which can be smaller
|
||||
fn segment_unpruned_size(&self, last_pos: u64) -> u64 {
|
||||
// Number of leaves in this segment. Equal to capacity except for the final segment, which can be smaller
|
||||
fn segment_unpruned_size(&self, mmr_size: u64) -> u64 {
|
||||
min(
|
||||
self.segment_capacity(),
|
||||
pmmr::n_leaves(last_pos).saturating_sub(self.leaf_offset()),
|
||||
pmmr::n_leaves(mmr_size).saturating_sub(self.leaf_offset()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Whether the segment is full (segment size == capacity)
|
||||
fn full_segment(&self, last_pos: u64) -> bool {
|
||||
self.segment_unpruned_size(last_pos) == self.segment_capacity()
|
||||
fn full_segment(&self, mmr_size: u64) -> bool {
|
||||
self.segment_unpruned_size(mmr_size) == self.segment_capacity()
|
||||
}
|
||||
|
||||
/// Inclusive range of MMR positions for this segment
|
||||
|
|
Loading…
Reference in a new issue