2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2018-02-22 16:45:13 +03:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Implementation of the persistent Backend for the prunable MMR tree.
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
use std::fs;
|
|
|
|
use std::io;
|
2018-04-22 00:03:45 +03:00
|
|
|
use std::marker;
|
2018-06-18 18:18:38 +03:00
|
|
|
use std::path::Path;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
use croaring::Bitmap;
|
|
|
|
|
2018-06-21 04:30:22 +03:00
|
|
|
use core::core::BlockHeader;
|
2018-06-18 18:18:38 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-05-30 23:57:13 +03:00
|
|
|
use core::core::pmmr::{self, family, Backend};
|
2018-06-18 18:18:38 +03:00
|
|
|
use core::core::prune_list::PruneList;
|
2018-06-14 15:16:14 +03:00
|
|
|
use core::ser::{self, PMMRable};
|
2018-06-18 18:18:38 +03:00
|
|
|
use leaf_set::LeafSet;
|
|
|
|
use rm_log::RemoveLog;
|
|
|
|
use types::{prune_noop, read_ordered_vec, write_vec, AppendOnlyFile};
|
2018-04-24 22:53:01 +03:00
|
|
|
use util::LOGGER;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
|
|
|
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
|
|
|
|
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
|
2018-06-18 18:18:38 +03:00
|
|
|
const PMMR_LEAF_FILE: &'static str = "pmmr_leaf.bin";
|
2018-02-22 16:45:13 +03:00
|
|
|
const PMMR_RM_LOG_FILE: &'static str = "pmmr_rm_log.bin";
|
|
|
|
const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
|
|
|
|
|
|
|
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
|
|
|
/// handle writing, reading and pruning.
|
|
|
|
///
|
2018-06-18 18:18:38 +03:00
|
|
|
/// * A main storage file appends Hash instances as they come.
|
|
|
|
/// This AppendOnlyFile is also backed by a mmap for reads.
|
2018-02-22 16:45:13 +03:00
|
|
|
/// * An in-memory backend buffers the latest batch of writes to ensure the
|
|
|
|
/// PMMR can always read recent values even if they haven't been flushed to
|
|
|
|
/// disk yet.
|
2018-06-18 18:18:38 +03:00
|
|
|
/// * A leaf_set tracks unpruned (unremoved) leaf positions in the MMR..
|
|
|
|
/// * A prune_list tracks the positions of pruned (and compacted) roots in the
|
|
|
|
/// MMR.
|
2018-02-22 16:45:13 +03:00
|
|
|
pub struct PMMRBackend<T>
|
|
|
|
where
|
|
|
|
T: PMMRable,
|
|
|
|
{
|
|
|
|
data_dir: String,
|
|
|
|
hash_file: AppendOnlyFile,
|
|
|
|
data_file: AppendOnlyFile,
|
2018-06-18 18:18:38 +03:00
|
|
|
leaf_set: LeafSet,
|
|
|
|
pruned_nodes: PruneList,
|
2018-04-22 00:03:45 +03:00
|
|
|
_marker: marker::PhantomData<T>,
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Backend<T> for PMMRBackend<T>
|
|
|
|
where
|
2018-03-15 19:53:40 +03:00
|
|
|
T: PMMRable + ::std::fmt::Debug,
|
2018-02-22 16:45:13 +03:00
|
|
|
{
|
|
|
|
/// Append the provided Hashes to the backend storage.
|
|
|
|
#[allow(unused_variables)]
|
|
|
|
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> {
|
|
|
|
for d in data {
|
|
|
|
self.hash_file.append(&mut ser::ser_vec(&d.0).unwrap());
|
|
|
|
if let Some(elem) = d.1 {
|
|
|
|
self.data_file.append(&mut ser::ser_vec(&elem).unwrap());
|
2018-06-18 18:18:38 +03:00
|
|
|
|
|
|
|
// Add the new position to our leaf_set.
|
|
|
|
self.leaf_set.add(position);
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
fn get_from_file(&self, position: u64) -> Option<Hash> {
|
2018-02-22 16:45:13 +03:00
|
|
|
let shift = self.pruned_nodes.get_shift(position);
|
|
|
|
if let None = shift {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read PMMR
|
|
|
|
// The MMR starts at 1, our binary backend starts at 0
|
|
|
|
let pos = position - 1;
|
|
|
|
|
|
|
|
// Must be on disk, doing a read at the correct position
|
|
|
|
let hash_record_len = 32;
|
|
|
|
let file_offset = ((pos - shift.unwrap()) as usize) * hash_record_len;
|
|
|
|
let data = self.hash_file.read(file_offset, hash_record_len);
|
2018-03-02 23:47:27 +03:00
|
|
|
match ser::deserialize(&mut &data[..]) {
|
|
|
|
Ok(h) => Some(h),
|
2018-02-22 16:45:13 +03:00
|
|
|
Err(e) => {
|
|
|
|
error!(
|
|
|
|
LOGGER,
|
2018-03-04 03:19:54 +03:00
|
|
|
"Corrupted storage, could not read an entry from hash store: {:?}", e
|
2018-02-22 16:45:13 +03:00
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
2018-03-02 23:47:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 20:12:30 +03:00
|
|
|
fn get_data_from_file(&self, position: u64) -> Option<T> {
|
|
|
|
let shift = self.pruned_nodes.get_leaf_shift(position);
|
|
|
|
if let None = shift {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let pos = pmmr::n_leaves(position) - 1;
|
|
|
|
|
|
|
|
// Must be on disk, doing a read at the correct position
|
|
|
|
let record_len = T::len();
|
|
|
|
let file_offset = ((pos - shift.unwrap()) as usize) * record_len;
|
|
|
|
let data = self.data_file.read(file_offset, record_len);
|
|
|
|
match ser::deserialize(&mut &data[..]) {
|
|
|
|
Ok(h) => Some(h),
|
|
|
|
Err(e) => {
|
|
|
|
error!(
|
|
|
|
LOGGER,
|
|
|
|
"Corrupted storage, could not read an entry from data store: {:?}", e
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-24 02:33:59 +03:00
|
|
|
/// Get the hash at pos.
|
2018-06-18 18:18:38 +03:00
|
|
|
/// Return None if pos is a leaf and it has been removed (or pruned or
|
|
|
|
/// compacted).
|
2018-03-24 02:33:59 +03:00
|
|
|
fn get_hash(&self, pos: u64) -> Option<(Hash)> {
|
2018-06-18 18:18:38 +03:00
|
|
|
if pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) {
|
|
|
|
return None;
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
self.get_from_file(pos)
|
2018-03-24 02:33:59 +03:00
|
|
|
}
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-03-24 02:33:59 +03:00
|
|
|
/// Get the data at pos.
|
|
|
|
/// Return None if it has been removed or if pos is not a leaf node.
|
|
|
|
fn get_data(&self, pos: u64) -> Option<(T)> {
|
2018-06-18 18:18:38 +03:00
|
|
|
if !pmmr::is_leaf(pos) {
|
|
|
|
return None;
|
2018-03-13 21:22:34 +03:00
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
if !self.leaf_set.includes(pos) {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
self.get_data_from_file(pos)
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
/// Rewind the PMMR backend to the given position.
|
2018-06-18 18:18:38 +03:00
|
|
|
fn rewind(
|
|
|
|
&mut self,
|
|
|
|
position: u64,
|
|
|
|
rewind_add_pos: &Bitmap,
|
|
|
|
rewind_rm_pos: &Bitmap,
|
|
|
|
) -> Result<(), String> {
|
|
|
|
// First rewind the leaf_set with the necessary added and removed positions.
|
|
|
|
self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos);
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
// Rewind the hash file accounting for pruned/compacted pos
|
2018-02-22 16:45:13 +03:00
|
|
|
let shift = self.pruned_nodes.get_shift(position).unwrap_or(0);
|
2018-04-24 22:53:01 +03:00
|
|
|
let record_len = 32 as u64;
|
|
|
|
let file_pos = (position - shift) * record_len;
|
2018-02-22 16:45:13 +03:00
|
|
|
self.hash_file.rewind(file_pos);
|
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
// Rewind the data file accounting for pruned/compacted pos
|
2018-04-19 21:52:46 +03:00
|
|
|
let leaf_shift = self.pruned_nodes.get_leaf_shift(position).unwrap_or(0);
|
|
|
|
let flatfile_pos = pmmr::n_leaves(position);
|
2018-04-24 22:53:01 +03:00
|
|
|
let record_len = T::len() as u64;
|
|
|
|
let file_pos = (flatfile_pos - leaf_shift) * record_len;
|
|
|
|
self.data_file.rewind(file_pos);
|
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
/// Remove by insertion position.
|
|
|
|
fn remove(&mut self, pos: u64) -> Result<(), String> {
|
|
|
|
self.leaf_set.remove(pos);
|
|
|
|
Ok(())
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return data file path
|
|
|
|
fn get_data_file_path(&self) -> String {
|
|
|
|
self.data_file.path()
|
|
|
|
}
|
2018-03-15 19:53:40 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
fn snapshot(&self, header: &BlockHeader) -> Result<(), String> {
|
|
|
|
self.leaf_set
|
|
|
|
.snapshot(header)
|
|
|
|
.map_err(|_| format!("Failed to save copy of leaf_set for {}", header.hash()))?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-15 19:53:40 +03:00
|
|
|
fn dump_stats(&self) {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-06-18 18:18:38 +03:00
|
|
|
"pmmr backend: unpruned: {}, hashes: {}, data: {}, leaf_set: {}, prune_list: {}",
|
2018-03-15 19:53:40 +03:00
|
|
|
self.unpruned_size().unwrap_or(0),
|
|
|
|
self.hash_size().unwrap_or(0),
|
2018-03-20 04:31:57 +03:00
|
|
|
self.data_size().unwrap_or(0),
|
2018-06-18 18:18:38 +03:00
|
|
|
self.leaf_set.len(),
|
2018-04-24 22:53:01 +03:00
|
|
|
self.pruned_nodes.pruned_nodes.len(),
|
2018-03-15 19:53:40 +03:00
|
|
|
);
|
|
|
|
}
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> PMMRBackend<T>
|
|
|
|
where
|
2018-03-13 21:22:34 +03:00
|
|
|
T: PMMRable + ::std::fmt::Debug,
|
2018-02-22 16:45:13 +03:00
|
|
|
{
|
2018-04-24 22:53:01 +03:00
|
|
|
/// Instantiates a new PMMR backend.
|
|
|
|
/// Use the provided dir to store its files.
|
2018-06-18 18:18:38 +03:00
|
|
|
pub fn new(data_dir: String, header: Option<&BlockHeader>) -> io::Result<PMMRBackend<T>> {
|
2018-02-22 16:45:13 +03:00
|
|
|
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
2018-06-18 18:18:38 +03:00
|
|
|
let pruned_nodes = PruneList {
|
2018-04-24 22:53:01 +03:00
|
|
|
pruned_nodes: prune_list,
|
|
|
|
};
|
|
|
|
let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?;
|
|
|
|
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
let leaf_set_path = format!("{}/{}", data_dir, PMMR_LEAF_FILE);
|
|
|
|
let rm_log_path = format!("{}/{}", data_dir, PMMR_RM_LOG_FILE);
|
|
|
|
|
|
|
|
if let Some(header) = header {
|
|
|
|
let leaf_snapshot_path = format!("{}/{}.{}", data_dir, PMMR_LEAF_FILE, header.hash());
|
|
|
|
LeafSet::copy_snapshot(leaf_set_path.clone(), leaf_snapshot_path.clone())?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we need to migrate an old rm_log to a new leaf_set do it here before we
|
|
|
|
// start. Do *not* migrate if we already have a leaf_set.
|
|
|
|
let mut leaf_set = LeafSet::open(leaf_set_path.clone())?;
|
|
|
|
if leaf_set.is_empty() && Path::new(&rm_log_path).exists() {
|
|
|
|
let mut rm_log = RemoveLog::open(rm_log_path)?;
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"pmmr: leaf_set: {}, rm_log: {}",
|
|
|
|
leaf_set.len(),
|
|
|
|
rm_log.len()
|
|
|
|
);
|
|
|
|
debug!(LOGGER, "pmmr: migrating rm_log -> leaf_set");
|
|
|
|
|
|
|
|
if let Some(header) = header {
|
|
|
|
// Rewind the rm_log back to the height of the header we care about.
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"pmmr: first rewinding rm_log to height {}", header.height
|
|
|
|
);
|
|
|
|
rm_log.rewind(header.height as u32)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// do not like this here but we have no pmmr to call
|
|
|
|
// unpruned_size() on yet...
|
|
|
|
let last_pos = {
|
|
|
|
let total_shift = pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
|
|
|
let record_len = 32;
|
|
|
|
let sz = hash_file.size()?;
|
|
|
|
sz / record_len + total_shift
|
|
|
|
};
|
|
|
|
|
|
|
|
migrate_rm_log(&mut leaf_set, &rm_log, &pruned_nodes, last_pos)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let leaf_set = LeafSet::open(leaf_set_path)?;
|
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
Ok(PMMRBackend {
|
2018-04-24 22:53:01 +03:00
|
|
|
data_dir,
|
|
|
|
hash_file,
|
|
|
|
data_file,
|
2018-06-18 18:18:38 +03:00
|
|
|
leaf_set,
|
2018-04-24 22:53:01 +03:00
|
|
|
pruned_nodes,
|
2018-04-22 00:03:45 +03:00
|
|
|
_marker: marker::PhantomData,
|
2018-02-22 16:45:13 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
fn is_pruned(&self, pos: u64) -> bool {
|
|
|
|
let path = pmmr::path(pos, self.unpruned_size().unwrap_or(0));
|
|
|
|
path.iter()
|
|
|
|
.any(|x| self.pruned_nodes.pruned_nodes.contains(x))
|
|
|
|
}
|
|
|
|
|
2018-03-01 00:15:29 +03:00
|
|
|
/// Number of elements in the PMMR stored by this backend. Only produces the
|
|
|
|
/// fully sync'd size.
|
2018-02-22 16:45:13 +03:00
|
|
|
pub fn unpruned_size(&self) -> io::Result<u64> {
|
|
|
|
let total_shift = self.pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
let record_len = 32;
|
|
|
|
let sz = self.hash_file.size()?;
|
|
|
|
Ok(sz / record_len + total_shift)
|
|
|
|
}
|
|
|
|
|
2018-03-01 00:15:29 +03:00
|
|
|
/// Number of elements in the underlying stored data. Extremely dependent on
|
|
|
|
/// pruning and compaction.
|
|
|
|
pub fn data_size(&self) -> io::Result<u64> {
|
|
|
|
let record_len = T::len() as u64;
|
|
|
|
self.data_file.size().map(|sz| sz / record_len)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Size of the underlying hashed data. Extremely dependent on pruning
|
|
|
|
/// and compaction.
|
|
|
|
pub fn hash_size(&self) -> io::Result<u64> {
|
|
|
|
self.hash_file.size().map(|sz| sz / 32)
|
|
|
|
}
|
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
/// Syncs all files to disk. A call to sync is required to ensure all the
|
|
|
|
/// data has been successfully written to disk.
|
|
|
|
pub fn sync(&mut self) -> io::Result<()> {
|
|
|
|
if let Err(e) = self.hash_file.flush() {
|
|
|
|
return Err(io::Error::new(
|
2018-03-04 03:19:54 +03:00
|
|
|
io::ErrorKind::Interrupted,
|
|
|
|
format!("Could not write to log hash storage, disk full? {:?}", e),
|
|
|
|
));
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
if let Err(e) = self.data_file.flush() {
|
|
|
|
return Err(io::Error::new(
|
2018-03-04 03:19:54 +03:00
|
|
|
io::ErrorKind::Interrupted,
|
|
|
|
format!("Could not write to log data storage, disk full? {:?}", e),
|
|
|
|
));
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
self.leaf_set.flush()?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Discard the current, non synced state of the backend.
|
|
|
|
pub fn discard(&mut self) {
|
|
|
|
self.hash_file.discard();
|
2018-06-18 18:18:38 +03:00
|
|
|
self.leaf_set.discard();
|
2018-02-22 16:45:13 +03:00
|
|
|
self.data_file.discard();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the data file path
|
|
|
|
pub fn data_file_path(&self) -> String {
|
|
|
|
self.get_data_file_path()
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
/// Takes the leaf_set at a given cutoff_pos and generates an updated
|
|
|
|
/// prune_list. Saves the updated prune_list to disk
|
|
|
|
/// Compacts the hash and data files based on the prune_list and saves both
|
|
|
|
/// to disk.
|
2018-02-22 16:45:13 +03:00
|
|
|
///
|
2018-06-18 18:18:38 +03:00
|
|
|
/// A cutoff position limits compaction on recent data.
|
|
|
|
/// This will be the last position of a particular block
|
|
|
|
/// to keep things aligned.
|
|
|
|
/// The block_marker in the db/index for the particular block
|
|
|
|
/// will have a suitable output_pos.
|
|
|
|
/// This is used to enforce a horizon after which the local node
|
|
|
|
/// should have all the data to allow rewinding.
|
2018-03-06 20:58:33 +03:00
|
|
|
pub fn check_compact<P>(
|
|
|
|
&mut self,
|
2018-06-18 18:18:38 +03:00
|
|
|
cutoff_pos: u64,
|
|
|
|
rewind_add_pos: &Bitmap,
|
|
|
|
rewind_rm_pos: &Bitmap,
|
2018-03-06 20:58:33 +03:00
|
|
|
prune_cb: P,
|
|
|
|
) -> io::Result<bool>
|
|
|
|
where
|
|
|
|
P: Fn(&[u8]),
|
|
|
|
{
|
2018-03-13 21:22:34 +03:00
|
|
|
// Paths for tmp hash and data files.
|
|
|
|
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
|
2018-03-15 20:12:30 +03:00
|
|
|
let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir, PMMR_DATA_FILE);
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
// Calculate the sets of leaf positions and node positions to remove based
|
|
|
|
// on the cutoff_pos provided.
|
|
|
|
let (leaves_removed, pos_to_rm) = self.pos_to_rm(cutoff_pos, rewind_add_pos, rewind_rm_pos);
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// 1. Save compact copy of the hash file, skipping removed data.
|
|
|
|
{
|
|
|
|
let record_len = 32;
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
let off_to_rm = map_vec!(pos_to_rm, |pos| {
|
|
|
|
let shift = self.pruned_nodes.get_shift(pos.into()).unwrap();
|
|
|
|
((pos as u64) - 1 - shift) * record_len
|
2018-04-02 23:49:35 +03:00
|
|
|
});
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
self.hash_file.save_prune(
|
|
|
|
tmp_prune_file_hash.clone(),
|
|
|
|
off_to_rm,
|
|
|
|
record_len,
|
|
|
|
&prune_noop,
|
|
|
|
)?;
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
// 2. Save compact copy of the data file, skipping removed leaves.
|
2018-03-15 20:12:30 +03:00
|
|
|
{
|
|
|
|
let record_len = T::len() as u64;
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
let leaf_pos_to_rm = pos_to_rm
|
|
|
|
.iter()
|
|
|
|
.filter(|&x| pmmr::is_leaf(x.into()))
|
|
|
|
.map(|x| x as u64)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let off_to_rm = map_vec!(leaf_pos_to_rm, |&pos| {
|
|
|
|
let flat_pos = pmmr::n_leaves(pos);
|
|
|
|
let shift = self.pruned_nodes.get_leaf_shift(pos).unwrap();
|
2018-06-07 00:46:21 +03:00
|
|
|
(flat_pos - 1 - shift) * record_len
|
2018-04-02 23:49:35 +03:00
|
|
|
});
|
2018-03-15 20:12:30 +03:00
|
|
|
|
|
|
|
self.data_file.save_prune(
|
|
|
|
tmp_prune_file_data.clone(),
|
|
|
|
off_to_rm,
|
|
|
|
record_len,
|
|
|
|
prune_cb,
|
|
|
|
)?;
|
|
|
|
}
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// 3. Update the prune list and save it in place.
|
|
|
|
{
|
2018-06-18 18:18:38 +03:00
|
|
|
for pos in leaves_removed.iter() {
|
|
|
|
self.pruned_nodes.add(pos.into());
|
2018-03-01 00:15:29 +03:00
|
|
|
}
|
2018-06-07 00:46:21 +03:00
|
|
|
// TODO - we can get rid of leaves in the prunelist here (and things still work)
|
|
|
|
// self.pruned_nodes.pruned_nodes.retain(|&x| !pmmr::is_leaf(x));
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
// Prunelist contains *only* non-leaf roots.
|
|
|
|
// Contrast this with the leaf_set that contains *only* leaves.
|
|
|
|
self.pruned_nodes
|
|
|
|
.pruned_nodes
|
|
|
|
.retain(|&x| !pmmr::is_leaf(x));
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
write_vec(
|
|
|
|
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
|
|
|
&self.pruned_nodes.pruned_nodes,
|
|
|
|
)?;
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
// 4. Rename the compact copy of hash file and reopen it.
|
2018-02-22 16:45:13 +03:00
|
|
|
fs::rename(
|
|
|
|
tmp_prune_file_hash.clone(),
|
|
|
|
format!("{}/{}", self.data_dir, PMMR_HASH_FILE),
|
|
|
|
)?;
|
2018-04-24 22:53:01 +03:00
|
|
|
self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE))?;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
// 5. Rename the compact copy of the data file and reopen it.
|
2018-03-15 20:12:30 +03:00
|
|
|
fs::rename(
|
|
|
|
tmp_prune_file_data.clone(),
|
|
|
|
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
|
|
|
)?;
|
2018-04-24 22:53:01 +03:00
|
|
|
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
// 6. Write the leaf_set to disk.
|
|
|
|
// Optimize the bitmap storage in the process.
|
|
|
|
self.leaf_set.flush()?;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
Ok(true)
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
|
|
|
|
fn pos_to_rm(
|
|
|
|
&self,
|
|
|
|
cutoff_pos: u64,
|
|
|
|
rewind_add_pos: &Bitmap,
|
|
|
|
rewind_rm_pos: &Bitmap,
|
|
|
|
) -> (Bitmap, Bitmap) {
|
|
|
|
let mut expanded = Bitmap::create();
|
|
|
|
|
|
|
|
let leaf_pos_to_rm = self.leaf_set.removed_pre_cutoff(
|
|
|
|
cutoff_pos,
|
|
|
|
rewind_add_pos,
|
|
|
|
rewind_rm_pos,
|
|
|
|
&self.pruned_nodes,
|
|
|
|
);
|
|
|
|
|
|
|
|
for x in leaf_pos_to_rm.iter() {
|
|
|
|
expanded.add(x);
|
|
|
|
let mut current = x as u64;
|
|
|
|
loop {
|
|
|
|
let (parent, sibling) = family(current);
|
|
|
|
let sibling_pruned = self.is_pruned(sibling);
|
|
|
|
|
|
|
|
// if sibling previously pruned
|
|
|
|
// push it back onto list of pos to remove
|
|
|
|
// so we can remove it and traverse up to parent
|
|
|
|
if sibling_pruned {
|
|
|
|
expanded.add(sibling as u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
if sibling_pruned || expanded.contains(sibling as u32) {
|
|
|
|
expanded.add(parent as u32);
|
|
|
|
current = parent;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(leaf_pos_to_rm, removed_excl_roots(expanded))
|
|
|
|
}
|
2018-02-22 16:45:13 +03:00
|
|
|
}
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
/// Filter remove list to exclude roots.
|
|
|
|
/// We want to keep roots around so we have hashes for Merkle proofs.
|
2018-06-18 18:18:38 +03:00
|
|
|
fn removed_excl_roots(removed: Bitmap) -> Bitmap {
|
2018-03-13 21:22:34 +03:00
|
|
|
removed
|
|
|
|
.iter()
|
2018-06-18 18:18:38 +03:00
|
|
|
.filter(|pos| {
|
|
|
|
let (parent_pos, _) = family(*pos as u64);
|
|
|
|
removed.contains(parent_pos as u32)
|
2018-03-13 21:22:34 +03:00
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
fn migrate_rm_log(
|
|
|
|
leaf_set: &mut LeafSet,
|
|
|
|
rm_log: &RemoveLog,
|
|
|
|
prune_list: &PruneList,
|
|
|
|
last_pos: u64,
|
|
|
|
) -> io::Result<()> {
|
|
|
|
info!(
|
|
|
|
LOGGER,
|
|
|
|
"Migrating rm_log -> leaf_set. Might take a little while... {} pos", last_pos
|
|
|
|
);
|
|
|
|
|
|
|
|
// check every leaf
|
|
|
|
// if not pruned and not removed, add it to the leaf_set
|
|
|
|
for x in 1..=last_pos {
|
|
|
|
if pmmr::is_leaf(x) && !prune_list.is_pruned(x) && !rm_log.includes(x) {
|
|
|
|
leaf_set.add(x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
leaf_set.flush()?;
|
|
|
|
Ok(())
|
2018-03-13 21:22:34 +03:00
|
|
|
}
|