[PIBD_IMPL] Catch-Up functionality + Fixes based on testing (#3702)

* ensure desegmenter attempts to apply correct block after a resumte

* ensure txhashset's committed implementation takes into account output bitmap for summing purposes

* remove check to de-apply outputs during segment application

* return removal of spent outputs during pibd

* remove unneeded status

* remove uneeded change to rewind function
This commit is contained in:
Yeastplume 2022-03-30 13:47:42 +01:00 committed by GitHub
parent 50450ba71a
commit 6a7b66b329
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 74 additions and 12 deletions

View file

@ -197,6 +197,7 @@ impl Desegmenter {
if local_kernel_mmr_size == self.archive_header.kernel_mmr_size
&& local_output_mmr_size == self.archive_header.output_mmr_size
&& local_rangeproof_mmr_size == self.archive_header.output_mmr_size
&& self.bitmap_cache.is_some()
{
// All is complete
return true;
@ -206,6 +207,22 @@ impl Desegmenter {
false
}
/// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets
/// match the bitmap (particularly in the case of outputs being spent after a PIBD catch-up)
pub fn check_update_leaf_set_state(&self) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let mut _batch = self.store.batch()?;
txhashset::extending(&mut header_pmmr, &mut txhashset, &mut _batch, |ext, _| {
let extension = &mut ext.extension;
if let Some(b) = &self.bitmap_cache {
extension.update_leaf_sets(&b)?;
}
Ok(())
})?;
Ok(())
}
/// TODO: This is largely copied from chain.rs txhashset_write and related functions,
/// the idea being that these will eventually be broken out to perform validation while
/// segments are still being downloaded and applied. Current validation logic is all tied up
@ -782,7 +799,7 @@ impl Desegmenter {
// with naught but a humble genesis block. We need segment 0, (and
// also need to skip the genesis block when applying the segment)
let cur_segment_count = if local_output_mmr_size == 1 {
let mut cur_segment_count = if local_output_mmr_size == 1 {
0
} else {
SegmentIdentifier::count_segments_required(
@ -791,9 +808,12 @@ impl Desegmenter {
)
};
// TODO: When resuming, the output pmmr size has increased by one and this
// returns 1 segment ahead of where it should, requiring a small rewind on startup
// Figure out why
// When resuming, we need to ensure we're getting the previous segment if needed
let theoretical_pmmr_size =
SegmentIdentifier::pmmr_size(cur_segment_count, self.default_output_segment_height);
if local_output_mmr_size < theoretical_pmmr_size {
cur_segment_count -= 1;
}
let total_segment_count = SegmentIdentifier::count_segments_required(
self.archive_header.output_mmr_size,
@ -892,7 +912,7 @@ impl Desegmenter {
// with naught but a humble genesis block. We need segment 0, (and
// also need to skip the genesis block when applying the segment)
let cur_segment_count = if local_rangeproof_mmr_size == 1 {
let mut cur_segment_count = if local_rangeproof_mmr_size == 1 {
0
} else {
SegmentIdentifier::count_segments_required(
@ -901,6 +921,13 @@ impl Desegmenter {
)
};
// When resuming, we need to ensure we're getting the previous segment if needed
let theoretical_pmmr_size =
SegmentIdentifier::pmmr_size(cur_segment_count, self.default_rangeproof_segment_height);
if local_rangeproof_mmr_size < theoretical_pmmr_size {
cur_segment_count -= 1;
}
let total_segment_count = SegmentIdentifier::count_segments_required(
self.archive_header.output_mmr_size,
self.default_rangeproof_segment_height,
@ -980,7 +1007,7 @@ impl Desegmenter {
local_kernel_mmr_size = txhashset.kernel_mmr_size();
}
let cur_segment_count = if local_kernel_mmr_size == 1 {
let mut cur_segment_count = if local_kernel_mmr_size == 1 {
0
} else {
SegmentIdentifier::count_segments_required(
@ -989,6 +1016,13 @@ impl Desegmenter {
)
};
// When resuming, we need to ensure we're getting the previous segment if needed
let theoretical_pmmr_size =
SegmentIdentifier::pmmr_size(cur_segment_count, self.default_kernel_segment_height);
if local_kernel_mmr_size < theoretical_pmmr_size {
cur_segment_count -= 1;
}
let total_segment_count = SegmentIdentifier::count_segments_required(
self.archive_header.kernel_mmr_size,
self.default_kernel_segment_height,

View file

@ -1363,6 +1363,18 @@ impl<'a> Extension<'a> {
Ok(1 + output_pos)
}
/// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets
/// match the bitmap (particularly in the case of outputs being spent after a PIBD catch-up)
pub fn update_leaf_sets(&mut self, bitmap: &Bitmap) -> Result<(), Error> {
let flipped = bitmap.flip(0..bitmap.maximum().unwrap() as u64 + 1);
for spent_pmmr_index in flipped.iter() {
let pos0 = pmmr::insertion_to_pmmr_index(spent_pmmr_index.into());
self.output_pmmr.remove_from_leaf_set(pos0);
self.rproof_pmmr.remove_from_leaf_set(pos0);
}
Ok(())
}
/// Order and sort output segments and hashes, returning an array
/// of elements that can be applied in order to a pmmr
fn sort_pmmr_hashes_and_leaves(
@ -1423,8 +1435,6 @@ impl<'a> Extension<'a> {
.map_err(&ErrorKind::TxHashSetErr)?;
}
let pmmr_index = pmmr::pmmr_leaf_to_insertion_index(pos0);
// Remove any elements that may be spent but not fully
// pruned
match pmmr_index {
Some(i) => {
if !self.bitmap_cache.contains(i as u32) {
@ -1472,7 +1482,7 @@ impl<'a> Extension<'a> {
match pmmr_index {
Some(i) => {
if !self.bitmap_cache.contains(i as u32) {
self.output_pmmr.remove_from_leaf_set(pos0);
self.rproof_pmmr.remove_from_leaf_set(pos0);
}
}
None => {}
@ -1574,7 +1584,8 @@ impl<'a> Extension<'a> {
}
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
/// and last kernel pos of that block.
/// and last kernel pos of that block. If `updated_bitmap` is supplied, the
/// bitmap accumulator will be replaced with its contents
pub fn rewind(&mut self, header: &BlockHeader, batch: &Batch<'_>) -> Result<(), Error> {
debug!(
"Rewind extension to {} at {} from {} at {}",
@ -1785,7 +1796,8 @@ impl<'a> Extension<'a> {
Ok(())
}
/// Validate full kernel sums against the provided header (for overage and kernel_offset).
/// Validate full kernel sums against the provided header and unspent output bitmap
/// (for overage and kernel_offset).
/// This is an expensive operation as we need to retrieve all the UTXOs and kernels
/// from the respective MMRs.
/// For a significantly faster way of validating full kernel sums see BlockSums.

View file

@ -124,8 +124,13 @@ impl SegmentIdentifier {
((pmmr::n_leaves(target_mmr_size) + d - 1) / d) as usize
}
/// Return pmmr size of number of segments of the given height
pub fn pmmr_size(num_segments: usize, height: u8) -> u64 {
pmmr::insertion_to_pmmr_index(num_segments as u64 * (1 << height))
}
/// Maximum number of leaves in a segment, given by `2**height`
fn segment_capacity(&self) -> u64 {
pub fn segment_capacity(&self) -> u64 {
1 << self.height
}

View file

@ -172,6 +172,17 @@ impl StateSync {
// All segments in, validate
if let Some(d) = desegmenter.read().as_ref() {
if d.check_progress(self.sync_state.clone()) {
if let Err(e) = d.check_update_leaf_set_state() {
error!("error updating PIBD leaf set: {}", e);
self.sync_state.update_pibd_progress(
false,
true,
0,
1,
&archive_header,
);
return false;
}
if let Err(e) = d.validate_complete_state(
self.sync_state.clone(),
stop_state.clone(),