2018-03-02 23:47:27 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-09-28 02:46:32 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
//! Utility structs to handle the 3 MMRs (output, rangeproof,
|
|
|
|
//! kernel) along the overall header MMR conveniently and transactionally.
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-09-19 21:59:17 +03:00
|
|
|
use std::collections::HashSet;
|
2018-08-03 05:16:16 +03:00
|
|
|
use std::fs::{self, File};
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::path::{Path, PathBuf};
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::Arc;
|
2018-03-21 03:34:19 +03:00
|
|
|
use std::time::Instant;
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
use croaring::Bitmap;
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
use util::secp::pedersen::{Commitment, RangeProof};
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-06-02 21:00:44 +03:00
|
|
|
use core::core::committed::Committed;
|
2018-05-28 21:22:22 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-06-20 22:18:52 +03:00
|
|
|
use core::core::merkle_proof::MerkleProof;
|
2018-10-15 19:16:34 +03:00
|
|
|
use core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, DBPMMR, PMMR};
|
2018-09-24 11:24:10 +03:00
|
|
|
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel};
|
2018-03-06 20:58:33 +03:00
|
|
|
use core::global;
|
2018-03-21 15:28:05 +03:00
|
|
|
use core::ser::{PMMRIndexHashable, PMMRable};
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-07-01 01:36:38 +03:00
|
|
|
use error::{Error, ErrorKind};
|
2017-09-28 02:46:32 +03:00
|
|
|
use grin_store;
|
2018-10-15 19:16:34 +03:00
|
|
|
use grin_store::pmmr::{HashOnlyMMRBackend, PMMRBackend, PMMR_FILES};
|
2018-03-06 20:58:33 +03:00
|
|
|
use grin_store::types::prune_noop;
|
2018-06-22 11:08:06 +03:00
|
|
|
use store::{Batch, ChainStore};
|
2018-09-26 11:59:00 +03:00
|
|
|
use txhashset::{RewindableKernelView, UTXOView};
|
2018-10-15 19:16:34 +03:00
|
|
|
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
2018-10-21 23:30:56 +03:00
|
|
|
use util::{file, secp_static, zip};
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
const HEADERHASHSET_SUBDIR: &'static str = "header";
|
2018-03-05 22:33:44 +03:00
|
|
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
2018-10-15 19:16:34 +03:00
|
|
|
|
|
|
|
const HEADER_HEAD_SUBDIR: &'static str = "header_head";
|
|
|
|
const SYNC_HEAD_SUBDIR: &'static str = "sync_head";
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
const OUTPUT_SUBDIR: &'static str = "output";
|
2017-09-28 02:46:32 +03:00
|
|
|
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
|
|
|
const KERNEL_SUBDIR: &'static str = "kernel";
|
2018-10-15 19:16:34 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
const TXHASHSET_ZIP: &'static str = "txhashset_snapshot.zip";
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
struct HashOnlyMMRHandle {
|
|
|
|
backend: HashOnlyMMRBackend,
|
|
|
|
last_pos: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl HashOnlyMMRHandle {
|
|
|
|
fn new(root_dir: &str, sub_dir: &str, file_name: &str) -> Result<HashOnlyMMRHandle, Error> {
|
|
|
|
let path = Path::new(root_dir).join(sub_dir).join(file_name);
|
|
|
|
fs::create_dir_all(path.clone())?;
|
|
|
|
let backend = HashOnlyMMRBackend::new(path.to_str().unwrap().to_string())?;
|
|
|
|
let last_pos = backend.unpruned_size()?;
|
|
|
|
Ok(HashOnlyMMRHandle { backend, last_pos })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
struct PMMRHandle<T>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
2018-02-22 16:45:13 +03:00
|
|
|
T: PMMRable,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2017-09-28 02:46:32 +03:00
|
|
|
backend: PMMRBackend<T>,
|
|
|
|
last_pos: u64,
|
|
|
|
}
|
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
impl<T> PMMRHandle<T>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
2018-03-13 21:22:34 +03:00
|
|
|
T: PMMRable + ::std::fmt::Debug,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2018-06-18 18:18:38 +03:00
|
|
|
fn new(
|
2018-10-15 19:16:34 +03:00
|
|
|
root_dir: &str,
|
|
|
|
sub_dir: &str,
|
2018-06-18 18:18:38 +03:00
|
|
|
file_name: &str,
|
2018-07-05 05:31:08 +03:00
|
|
|
prunable: bool,
|
2018-06-18 18:18:38 +03:00
|
|
|
header: Option<&BlockHeader>,
|
|
|
|
) -> Result<PMMRHandle<T>, Error> {
|
2018-10-15 19:16:34 +03:00
|
|
|
let path = Path::new(root_dir).join(sub_dir).join(file_name);
|
2017-09-28 02:46:32 +03:00
|
|
|
fs::create_dir_all(path.clone())?;
|
2018-10-15 19:16:34 +03:00
|
|
|
let backend = PMMRBackend::new(path.to_str().unwrap().to_string(), prunable, header)?;
|
|
|
|
let last_pos = backend.unpruned_size()?;
|
|
|
|
Ok(PMMRHandle { backend, last_pos })
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
2018-03-05 22:33:44 +03:00
|
|
|
/// validate blocks and capturing the Output set, the range proofs and the
|
2017-09-28 02:46:32 +03:00
|
|
|
/// kernels. Also handles the index of Commitments to positions in the
|
2018-02-22 16:45:13 +03:00
|
|
|
/// output and range proof pmmr trees.
|
2017-09-28 02:46:32 +03:00
|
|
|
///
|
|
|
|
/// Note that the index is never authoritative, only the trees are
|
|
|
|
/// guaranteed to indicate whether an output is spent or not. The index
|
|
|
|
/// may have commitments that have already been spent, even with
|
|
|
|
/// pruning enabled.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub struct TxHashSet {
|
2018-10-15 19:16:34 +03:00
|
|
|
/// Header MMR to support the header_head chain.
|
|
|
|
/// This is rewound and applied transactionally with the
|
|
|
|
/// output, rangeproof and kernel MMRs during an extension or a
|
|
|
|
/// readonly_extension.
|
|
|
|
/// It can also be rewound and applied separately via a header_extension.
|
|
|
|
/// Note: the header MMR is backed by the database maintains just the hash file.
|
|
|
|
header_pmmr_h: HashOnlyMMRHandle,
|
|
|
|
|
|
|
|
/// Header MMR to support exploratory sync_head.
|
|
|
|
/// The header_head and sync_head chains can diverge so we need to maintain
|
|
|
|
/// multiple header MMRs during the sync process.
|
|
|
|
///
|
|
|
|
/// Note: this is rewound and applied separately to the other MMRs
|
|
|
|
/// via a "sync_extension".
|
|
|
|
/// Note: the sync MMR is backed by the database and maintains just the hash file.
|
|
|
|
sync_pmmr_h: HashOnlyMMRHandle,
|
|
|
|
|
2018-03-22 03:10:11 +03:00
|
|
|
output_pmmr_h: PMMRHandle<OutputIdentifier>,
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_pmmr_h: PMMRHandle<RangeProof>,
|
|
|
|
kernel_pmmr_h: PMMRHandle<TxKernel>,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
|
|
|
// chain store used as index of commitments to MMR positions
|
|
|
|
commit_index: Arc<ChainStore>,
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
impl TxHashSet {
|
|
|
|
/// Open an existing or new set of backends for the TxHashSet
|
2018-06-18 18:18:38 +03:00
|
|
|
pub fn open(
|
|
|
|
root_dir: String,
|
|
|
|
commit_index: Arc<ChainStore>,
|
|
|
|
header: Option<&BlockHeader>,
|
|
|
|
) -> Result<TxHashSet, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
Ok(TxHashSet {
|
2018-10-15 19:16:34 +03:00
|
|
|
header_pmmr_h: HashOnlyMMRHandle::new(
|
|
|
|
&root_dir,
|
|
|
|
HEADERHASHSET_SUBDIR,
|
|
|
|
HEADER_HEAD_SUBDIR,
|
|
|
|
)?,
|
|
|
|
sync_pmmr_h: HashOnlyMMRHandle::new(&root_dir, HEADERHASHSET_SUBDIR, SYNC_HEAD_SUBDIR)?,
|
|
|
|
output_pmmr_h: PMMRHandle::new(
|
|
|
|
&root_dir,
|
|
|
|
TXHASHSET_SUBDIR,
|
|
|
|
OUTPUT_SUBDIR,
|
|
|
|
true,
|
|
|
|
header,
|
|
|
|
)?,
|
|
|
|
rproof_pmmr_h: PMMRHandle::new(
|
|
|
|
&root_dir,
|
|
|
|
TXHASHSET_SUBDIR,
|
|
|
|
RANGE_PROOF_SUBDIR,
|
|
|
|
true,
|
|
|
|
header,
|
|
|
|
)?,
|
|
|
|
kernel_pmmr_h: PMMRHandle::new(
|
|
|
|
&root_dir,
|
|
|
|
TXHASHSET_SUBDIR,
|
|
|
|
KERNEL_SUBDIR,
|
|
|
|
false,
|
|
|
|
None,
|
|
|
|
)?,
|
2018-04-24 22:53:01 +03:00
|
|
|
commit_index,
|
2017-09-28 02:46:32 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Check if an output is unspent.
|
2018-01-17 06:03:40 +03:00
|
|
|
/// We look in the index to find the output MMR pos.
|
|
|
|
/// Then we check the entry in the output MMR and confirm the hash matches.
|
2018-08-20 21:02:44 +03:00
|
|
|
pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<(Hash, u64), Error> {
|
2018-02-22 16:45:13 +03:00
|
|
|
match self.commit_index.get_output_pos(&output_id.commit) {
|
2017-11-22 23:14:42 +03:00
|
|
|
Ok(pos) => {
|
2018-03-22 03:10:11 +03:00
|
|
|
let output_pmmr: PMMR<OutputIdentifier, _> =
|
2018-03-05 22:33:44 +03:00
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
2018-03-24 02:33:59 +03:00
|
|
|
if let Some(hash) = output_pmmr.get_hash(pos) {
|
2018-03-30 09:02:40 +03:00
|
|
|
if hash == output_id.hash_with_index(pos - 1) {
|
2018-08-20 21:02:44 +03:00
|
|
|
Ok((hash, pos))
|
2018-01-17 06:03:40 +03:00
|
|
|
} else {
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(ErrorKind::TxHashSetErr(format!("txhashset hash mismatch")).into())
|
2018-01-17 06:03:40 +03:00
|
|
|
}
|
2017-11-22 23:14:42 +03:00
|
|
|
} else {
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(ErrorKind::OutputNotFound.into())
|
2017-11-22 23:14:42 +03:00
|
|
|
}
|
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(grin_store::Error::NotFoundErr(_)) => Err(ErrorKind::OutputNotFound.into()),
|
|
|
|
Err(e) => Err(ErrorKind::StoreErr(e, format!("txhashset unspent check")).into()),
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
}
|
2017-10-24 21:11:58 +03:00
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
/// returns the last N nodes inserted into the tree (i.e. the 'bottom'
|
|
|
|
/// nodes at level 0
|
2018-05-28 21:22:22 +03:00
|
|
|
/// TODO: These need to return the actual data from the flat-files instead
|
|
|
|
/// of hashes now
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn last_n_output(&mut self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
|
2018-03-22 03:10:11 +03:00
|
|
|
let output_pmmr: PMMR<OutputIdentifier, _> =
|
2018-03-05 22:33:44 +03:00
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
|
|
|
output_pmmr.get_last_n_insertions(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for range proofs
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, RangeProof)> {
|
2018-03-04 03:19:54 +03:00
|
|
|
let rproof_pmmr: PMMR<RangeProof, _> =
|
|
|
|
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
2017-10-28 00:57:04 +03:00
|
|
|
rproof_pmmr.get_last_n_insertions(distance)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for kernels
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, TxKernel)> {
|
2018-03-04 03:19:54 +03:00
|
|
|
let kernel_pmmr: PMMR<TxKernel, _> =
|
|
|
|
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
2017-10-28 00:57:04 +03:00
|
|
|
kernel_pmmr.get_last_n_insertions(distance)
|
|
|
|
}
|
|
|
|
|
2018-05-28 21:22:22 +03:00
|
|
|
/// returns outputs from the given insertion (leaf) index up to the
|
|
|
|
/// specified limit. Also returns the last index actually populated
|
2018-04-11 12:02:07 +03:00
|
|
|
pub fn outputs_by_insertion_index(
|
|
|
|
&mut self,
|
|
|
|
start_index: u64,
|
|
|
|
max_count: u64,
|
|
|
|
) -> (u64, Vec<OutputIdentifier>) {
|
|
|
|
let output_pmmr: PMMR<OutputIdentifier, _> =
|
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
|
|
|
output_pmmr.elements_from_insertion_index(start_index, max_count)
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// highest output insertion index available
|
2018-04-11 12:02:07 +03:00
|
|
|
pub fn highest_output_insertion_index(&mut self) -> u64 {
|
|
|
|
pmmr::n_leaves(self.output_pmmr_h.last_pos)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// As above, for rangeproofs
|
|
|
|
pub fn rangeproofs_by_insertion_index(
|
|
|
|
&mut self,
|
|
|
|
start_index: u64,
|
|
|
|
max_count: u64,
|
|
|
|
) -> (u64, Vec<RangeProof>) {
|
|
|
|
let rproof_pmmr: PMMR<RangeProof, _> =
|
|
|
|
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
|
|
|
rproof_pmmr.elements_from_insertion_index(start_index, max_count)
|
|
|
|
}
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
/// Get MMR roots.
|
|
|
|
pub fn roots(&mut self) -> TxHashSetRoots {
|
|
|
|
let header_pmmr: DBPMMR<BlockHeader, _> =
|
|
|
|
DBPMMR::at(&mut self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
|
2018-03-22 03:10:11 +03:00
|
|
|
let output_pmmr: PMMR<OutputIdentifier, _> =
|
2018-03-05 22:33:44 +03:00
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
2018-03-04 03:19:54 +03:00
|
|
|
let rproof_pmmr: PMMR<RangeProof, _> =
|
|
|
|
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
|
|
|
let kernel_pmmr: PMMR<TxKernel, _> =
|
|
|
|
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
2018-10-15 19:16:34 +03:00
|
|
|
|
|
|
|
TxHashSetRoots {
|
|
|
|
header_root: header_pmmr.root(),
|
|
|
|
output_root: output_pmmr.root(),
|
|
|
|
rproof_root: rproof_pmmr.root(),
|
|
|
|
kernel_root: kernel_pmmr.root(),
|
|
|
|
}
|
2017-10-24 21:11:58 +03:00
|
|
|
}
|
2018-03-06 20:58:33 +03:00
|
|
|
|
2018-04-26 16:01:01 +03:00
|
|
|
/// build a new merkle proof for the given position
|
|
|
|
pub fn merkle_proof(&mut self, commit: Commitment) -> Result<MerkleProof, String> {
|
|
|
|
let pos = self.commit_index.get_output_pos(&commit).unwrap();
|
|
|
|
let output_pmmr: PMMR<OutputIdentifier, _> =
|
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
|
|
|
output_pmmr.merkle_proof(pos)
|
|
|
|
}
|
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
/// Compact the MMR data files and flush the rm logs
|
|
|
|
pub fn compact(&mut self) -> Result<(), Error> {
|
|
|
|
let commit_index = self.commit_index.clone();
|
2018-06-18 18:18:38 +03:00
|
|
|
let head_header = commit_index.head_header()?;
|
|
|
|
let current_height = head_header.height;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// horizon for compacting is based on current_height
|
2018-06-18 18:18:38 +03:00
|
|
|
let horizon = current_height.saturating_sub(global::cut_through_horizon().into());
|
|
|
|
let horizon_header = self.commit_index.get_header_by_height(horizon)?;
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
let batch = self.commit_index.batch()?;
|
2018-08-20 22:34:12 +03:00
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, &batch)?;
|
2018-08-20 22:34:12 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
{
|
|
|
|
let clean_output_index = |commit: &[u8]| {
|
|
|
|
let _ = batch.delete_output_pos(commit);
|
|
|
|
};
|
|
|
|
|
|
|
|
self.output_pmmr_h.backend.check_compact(
|
2018-06-23 02:36:10 +03:00
|
|
|
horizon_header.output_mmr_size,
|
2018-08-20 22:34:12 +03:00
|
|
|
&rewind_rm_pos,
|
2018-06-22 11:08:06 +03:00
|
|
|
clean_output_index,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
self.rproof_pmmr_h.backend.check_compact(
|
2018-06-23 02:36:10 +03:00
|
|
|
horizon_header.output_mmr_size,
|
2018-08-20 22:34:12 +03:00
|
|
|
&rewind_rm_pos,
|
2018-06-22 11:08:06 +03:00
|
|
|
&prune_noop,
|
|
|
|
)?;
|
|
|
|
}
|
2018-08-20 22:34:12 +03:00
|
|
|
|
|
|
|
// Finally commit the batch, saving everything to the db.
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.commit()?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-05-28 21:22:22 +03:00
|
|
|
/// Starts a new unit of work to extend (or rewind) the chain with additional
|
|
|
|
/// blocks. Accepts a closure that will operate within that unit of work.
|
2018-04-09 19:37:46 +03:00
|
|
|
/// The closure has access to an Extension object that allows the addition
|
|
|
|
/// of blocks to the txhashset and the checking of the current tree roots.
|
|
|
|
///
|
|
|
|
/// The unit of work is always discarded (always rollback) as this is read-only.
|
|
|
|
pub fn extending_readonly<'a, F, T>(trees: &'a mut TxHashSet, inner: F) -> Result<T, Error>
|
|
|
|
where
|
|
|
|
F: FnOnce(&mut Extension) -> Result<T, Error>,
|
|
|
|
{
|
2018-10-15 19:16:34 +03:00
|
|
|
let commit_index = trees.commit_index.clone();
|
|
|
|
let batch = commit_index.batch()?;
|
2018-04-09 19:37:46 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
// We want to use the current head of the most work chain unless
|
|
|
|
// we explicitly rewind the extension.
|
|
|
|
let header = batch.head_header()?;
|
|
|
|
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Starting new txhashset (readonly) extension.");
|
2018-09-26 11:59:00 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
let res = {
|
2018-09-26 11:59:00 +03:00
|
|
|
let mut extension = Extension::new(trees, &batch, header);
|
2018-05-30 23:57:13 +03:00
|
|
|
extension.force_rollback();
|
2018-10-15 19:16:34 +03:00
|
|
|
|
|
|
|
// TODO - header_mmr may be out ahead via the header_head
|
|
|
|
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
|
|
|
|
|
|
|
inner(&mut extension)
|
|
|
|
};
|
2018-04-09 19:37:46 +03:00
|
|
|
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Rollbacking txhashset (readonly) extension.");
|
2018-04-09 19:37:46 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.backend.discard();
|
2018-04-09 19:37:46 +03:00
|
|
|
trees.output_pmmr_h.backend.discard();
|
|
|
|
trees.rproof_pmmr_h.backend.discard();
|
|
|
|
trees.kernel_pmmr_h.backend.discard();
|
|
|
|
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("TxHashSet (readonly) extension done.");
|
2018-04-09 19:37:46 +03:00
|
|
|
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
/// Readonly view on the UTXO set.
|
|
|
|
/// Based on the current txhashset output_pmmr.
|
|
|
|
pub fn utxo_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result<T, Error>
|
|
|
|
where
|
|
|
|
F: FnOnce(&UTXOView) -> Result<T, Error>,
|
|
|
|
{
|
|
|
|
let res: Result<T, Error>;
|
|
|
|
{
|
|
|
|
let output_pmmr =
|
|
|
|
ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos);
|
|
|
|
|
|
|
|
// Create a new batch here to pass into the utxo_view.
|
|
|
|
// Discard it (rollback) after we finish with the utxo_view.
|
|
|
|
let batch = trees.commit_index.batch()?;
|
|
|
|
let utxo = UTXOView::new(output_pmmr, &batch);
|
|
|
|
res = inner(&utxo);
|
|
|
|
}
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
/// Rewindable (but still readonly) view on the kernel MMR.
|
|
|
|
/// The underlying backend is readonly. But we permit the PMMR to be "rewound"
|
|
|
|
/// via last_pos.
|
|
|
|
/// We create a new db batch for this view and discard it (rollback)
|
|
|
|
/// when we are done with the view.
|
|
|
|
pub fn rewindable_kernel_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result<T, Error>
|
|
|
|
where
|
|
|
|
F: FnOnce(&mut RewindableKernelView) -> Result<T, Error>,
|
|
|
|
{
|
|
|
|
let res: Result<T, Error>;
|
|
|
|
{
|
|
|
|
let kernel_pmmr =
|
|
|
|
RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos);
|
|
|
|
|
|
|
|
// Create a new batch here to pass into the kernel_view.
|
|
|
|
// Discard it (rollback) after we finish with the kernel_view.
|
|
|
|
let batch = trees.commit_index.batch()?;
|
|
|
|
let header = batch.head_header()?;
|
|
|
|
let mut view = RewindableKernelView::new(kernel_pmmr, &batch, header);
|
|
|
|
res = inner(&mut view);
|
|
|
|
}
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
/// Starts a new unit of work to extend the chain with additional blocks,
|
|
|
|
/// accepting a closure that will work within that unit of work. The closure
|
|
|
|
/// has access to an Extension object that allows the addition of blocks to
|
2018-03-05 22:33:44 +03:00
|
|
|
/// the txhashset and the checking of the current tree roots.
|
2017-09-28 02:46:32 +03:00
|
|
|
///
|
|
|
|
/// If the closure returns an error, modifications are canceled and the unit
|
|
|
|
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
|
2018-06-22 11:08:06 +03:00
|
|
|
pub fn extending<'a, F, T>(
|
|
|
|
trees: &'a mut TxHashSet,
|
|
|
|
batch: &'a mut Batch,
|
|
|
|
inner: F,
|
|
|
|
) -> Result<T, Error>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
|
|
|
F: FnOnce(&mut Extension) -> Result<T, Error>,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2018-10-15 19:16:34 +03:00
|
|
|
let sizes: (u64, u64, u64, u64);
|
2017-09-28 02:46:32 +03:00
|
|
|
let res: Result<T, Error>;
|
|
|
|
let rollback: bool;
|
2018-04-24 22:53:01 +03:00
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
// We want to use the current head of the most work chain unless
|
|
|
|
// we explicitly rewind the extension.
|
|
|
|
let header = batch.head_header()?;
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
// create a child transaction so if the state is rolled back by itself, all
|
|
|
|
// index saving can be undone
|
|
|
|
let child_batch = batch.child()?;
|
2017-09-28 02:46:32 +03:00
|
|
|
{
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Starting new txhashset extension.");
|
2018-10-15 19:16:34 +03:00
|
|
|
|
|
|
|
// TODO - header_mmr may be out ahead via the header_head
|
|
|
|
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
2018-09-26 11:59:00 +03:00
|
|
|
let mut extension = Extension::new(trees, &child_batch, header);
|
2017-09-28 02:46:32 +03:00
|
|
|
res = inner(&mut extension);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
rollback = extension.rollback;
|
|
|
|
sizes = extension.sizes();
|
|
|
|
}
|
2018-04-24 22:53:01 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
match res {
|
|
|
|
Err(e) => {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Error returned, discarding txhashset extension: {}", e);
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.backend.discard();
|
2018-03-05 22:33:44 +03:00
|
|
|
trees.output_pmmr_h.backend.discard();
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.discard();
|
|
|
|
trees.kernel_pmmr_h.backend.discard();
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
Ok(r) => {
|
|
|
|
if rollback {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.backend.discard();
|
2018-03-05 22:33:44 +03:00
|
|
|
trees.output_pmmr_h.backend.discard();
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.discard();
|
|
|
|
trees.kernel_pmmr_h.backend.discard();
|
|
|
|
} else {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Committing txhashset extension. sizes {:?}", sizes);
|
2018-06-22 11:08:06 +03:00
|
|
|
child_batch.commit()?;
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.backend.sync()?;
|
2018-03-05 22:33:44 +03:00
|
|
|
trees.output_pmmr_h.backend.sync()?;
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.sync()?;
|
|
|
|
trees.kernel_pmmr_h.backend.sync()?;
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.last_pos = sizes.0;
|
|
|
|
trees.output_pmmr_h.last_pos = sizes.1;
|
|
|
|
trees.rproof_pmmr_h.last_pos = sizes.2;
|
|
|
|
trees.kernel_pmmr_h.last_pos = sizes.3;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("TxHashSet extension done.");
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
/// Start a new sync MMR unit of work. This MMR tracks the sync_head.
|
|
|
|
/// This is used during header sync to validate batches of headers as they arrive
|
|
|
|
/// without needing to repeatedly rewind the header MMR that continues to track
|
|
|
|
/// the header_head as they diverge during sync.
|
|
|
|
pub fn sync_extending<'a, F, T>(
|
|
|
|
trees: &'a mut TxHashSet,
|
|
|
|
batch: &'a mut Batch,
|
|
|
|
inner: F,
|
|
|
|
) -> Result<T, Error>
|
|
|
|
where
|
|
|
|
F: FnOnce(&mut HeaderExtension) -> Result<T, Error>,
|
|
|
|
{
|
|
|
|
let size: u64;
|
|
|
|
let res: Result<T, Error>;
|
|
|
|
let rollback: bool;
|
|
|
|
|
|
|
|
// We want to use the current sync_head unless
|
|
|
|
// we explicitly rewind the extension.
|
|
|
|
let head = batch.get_sync_head()?;
|
|
|
|
let header = batch.get_block_header(&head.last_block_h)?;
|
|
|
|
|
|
|
|
// create a child transaction so if the state is rolled back by itself, all
|
|
|
|
// index saving can be undone
|
|
|
|
let child_batch = batch.child()?;
|
|
|
|
{
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Starting new txhashset sync_head extension.");
|
2018-10-15 19:16:34 +03:00
|
|
|
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
|
|
|
|
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
|
|
|
|
|
|
|
res = inner(&mut extension);
|
|
|
|
|
|
|
|
rollback = extension.rollback;
|
|
|
|
size = extension.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Error returned, discarding txhashset sync_head extension: {}",
|
|
|
|
e
|
2018-10-15 19:16:34 +03:00
|
|
|
);
|
|
|
|
trees.sync_pmmr_h.backend.discard();
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
Ok(r) => {
|
|
|
|
if rollback {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.sync_pmmr_h.backend.discard();
|
|
|
|
} else {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Committing txhashset sync_head extension. size {:?}", size);
|
2018-10-15 19:16:34 +03:00
|
|
|
child_batch.commit()?;
|
|
|
|
trees.sync_pmmr_h.backend.sync()?;
|
|
|
|
trees.sync_pmmr_h.last_pos = size;
|
|
|
|
}
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("TxHashSet sync_head extension done.");
|
2018-10-15 19:16:34 +03:00
|
|
|
Ok(r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Start a new header MMR unit of work. This MMR tracks the header_head.
|
|
|
|
/// This MMR can be extended individually beyond the other (output, rangeproof and kernel) MMRs
|
|
|
|
/// to allow headers to be validated before we receive the full block data.
|
|
|
|
pub fn header_extending<'a, F, T>(
|
|
|
|
trees: &'a mut TxHashSet,
|
|
|
|
batch: &'a mut Batch,
|
|
|
|
inner: F,
|
|
|
|
) -> Result<T, Error>
|
|
|
|
where
|
|
|
|
F: FnOnce(&mut HeaderExtension) -> Result<T, Error>,
|
|
|
|
{
|
|
|
|
let size: u64;
|
|
|
|
let res: Result<T, Error>;
|
|
|
|
let rollback: bool;
|
|
|
|
|
|
|
|
// We want to use the current head of the header chain unless
|
|
|
|
// we explicitly rewind the extension.
|
|
|
|
let head = batch.header_head()?;
|
|
|
|
let header = batch.get_block_header(&head.last_block_h)?;
|
|
|
|
|
|
|
|
// create a child transaction so if the state is rolled back by itself, all
|
|
|
|
// index saving can be undone
|
|
|
|
let child_batch = batch.child()?;
|
|
|
|
{
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Starting new txhashset header extension.");
|
2018-10-15 19:16:34 +03:00
|
|
|
let pmmr = DBPMMR::at(
|
|
|
|
&mut trees.header_pmmr_h.backend,
|
|
|
|
trees.header_pmmr_h.last_pos,
|
|
|
|
);
|
|
|
|
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
|
|
|
res = inner(&mut extension);
|
|
|
|
|
|
|
|
rollback = extension.rollback;
|
|
|
|
size = extension.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
match res {
|
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Error returned, discarding txhashset header extension: {}",
|
|
|
|
e
|
2018-10-15 19:16:34 +03:00
|
|
|
);
|
|
|
|
trees.header_pmmr_h.backend.discard();
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
Ok(r) => {
|
|
|
|
if rollback {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Rollbacking txhashset header extension. size {:?}", size);
|
2018-10-15 19:16:34 +03:00
|
|
|
trees.header_pmmr_h.backend.discard();
|
|
|
|
} else {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Committing txhashset header extension. size {:?}", size);
|
2018-10-15 19:16:34 +03:00
|
|
|
child_batch.commit()?;
|
|
|
|
trees.header_pmmr_h.backend.sync()?;
|
|
|
|
trees.header_pmmr_h.last_pos = size;
|
|
|
|
}
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("TxHashSet header extension done.");
|
2018-10-15 19:16:34 +03:00
|
|
|
Ok(r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A header extension to allow the header MMR to extend beyond the other MMRs individually.
|
|
|
|
/// This is to allow headers to be validated against the MMR before we have the full block data.
|
|
|
|
pub struct HeaderExtension<'a> {
|
|
|
|
header: BlockHeader,
|
|
|
|
|
|
|
|
pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
|
|
|
|
|
|
|
|
/// Rollback flag.
|
|
|
|
rollback: bool,
|
|
|
|
|
|
|
|
/// Batch in which the extension occurs, public so it can be used within
|
|
|
|
/// an `extending` closure. Just be careful using it that way as it will
|
|
|
|
/// get rolled back with the extension (i.e on a losing fork).
|
|
|
|
pub batch: &'a Batch<'a>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> HeaderExtension<'a> {
|
|
|
|
fn new(
|
|
|
|
pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
|
|
|
|
batch: &'a Batch,
|
|
|
|
header: BlockHeader,
|
|
|
|
) -> HeaderExtension<'a> {
|
|
|
|
HeaderExtension {
|
|
|
|
header,
|
|
|
|
pmmr,
|
|
|
|
rollback: false,
|
|
|
|
batch,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Apply a new header to the header MMR extension.
|
|
|
|
/// This may be either the header MMR or the sync MMR depending on the
|
|
|
|
/// extension.
|
|
|
|
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
2018-10-23 22:09:16 +03:00
|
|
|
self.pmmr.push(&header).map_err(&ErrorKind::TxHashSetErr)?;
|
2018-10-15 19:16:34 +03:00
|
|
|
self.header = header.clone();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Rewind the header extension to the specified header.
|
|
|
|
/// Note the close relationship between header height and insertion index.
|
|
|
|
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
debug!(
|
|
|
|
"Rewind header extension to {} at {}",
|
|
|
|
header.hash(),
|
|
|
|
header.height
|
|
|
|
);
|
|
|
|
|
|
|
|
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
|
|
|
|
self.pmmr
|
|
|
|
.rewind(header_pos)
|
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
|
|
|
|
|
|
|
// Update our header to reflect the one we rewound to.
|
|
|
|
self.header = header.clone();
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Truncate the header MMR (rewind all the way back to pos 0).
|
|
|
|
/// Used when rebuilding the header MMR by reapplying all headers
|
|
|
|
/// including the genesis block header.
|
|
|
|
pub fn truncate(&mut self) -> Result<(), Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Truncating header extension.");
|
2018-10-15 19:16:34 +03:00
|
|
|
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The size of the header MMR.
|
|
|
|
pub fn size(&self) -> u64 {
|
|
|
|
self.pmmr.unpruned_size()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// TODO - think about how to optimize this.
|
|
|
|
/// Requires *all* header hashes to be iterated over in ascending order.
|
|
|
|
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
|
|
|
|
debug!(
|
|
|
|
"About to rebuild header extension from {:?} to {:?}.",
|
|
|
|
genesis.hash(),
|
|
|
|
head.last_block_h,
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut header_hashes = vec![];
|
|
|
|
let mut current = self.batch.get_block_header(&head.last_block_h)?;
|
|
|
|
while current.height > 0 {
|
|
|
|
header_hashes.push(current.hash());
|
|
|
|
current = self.batch.get_block_header(¤t.previous)?;
|
|
|
|
}
|
2018-10-18 13:23:04 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
header_hashes.reverse();
|
|
|
|
|
|
|
|
// Trucate the extension (back to pos 0).
|
|
|
|
self.truncate()?;
|
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
// Re-apply the genesis header after truncation.
|
|
|
|
self.apply_header(&genesis)?;
|
2018-10-15 19:16:34 +03:00
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
if header_hashes.len() > 0 {
|
|
|
|
debug!(
|
|
|
|
"Re-applying {} headers to extension, from {:?} to {:?}.",
|
|
|
|
header_hashes.len(),
|
|
|
|
header_hashes.first().unwrap(),
|
|
|
|
header_hashes.last().unwrap(),
|
|
|
|
);
|
2018-10-15 21:24:01 +03:00
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
for h in header_hashes {
|
|
|
|
let header = self.batch.get_block_header(&h)?;
|
|
|
|
self.validate_root(&header)?;
|
|
|
|
self.apply_header(&header)?;
|
|
|
|
}
|
2018-10-15 19:16:34 +03:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-10-17 12:06:38 +03:00
|
|
|
|
|
|
|
/// The root of the header MMR for convenience.
|
|
|
|
pub fn root(&self) -> Hash {
|
|
|
|
self.pmmr.root()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Validate the prev_root of the header against the root of the current header MMR.
|
|
|
|
pub fn validate_root(&self, header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
// If we are validating the genesis block then we have no prev_root.
|
|
|
|
// So we are done here.
|
2018-10-31 23:24:21 +03:00
|
|
|
if header.height == 0 {
|
2018-10-17 12:06:38 +03:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.root() != header.prev_root {
|
|
|
|
Err(ErrorKind::InvalidRoot.into())
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2018-10-15 19:16:34 +03:00
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
/// Allows the application of new blocks on top of the sum trees in a
|
|
|
|
/// reversible manner within a unit of work provided by the `extending`
|
|
|
|
/// function.
|
|
|
|
pub struct Extension<'a> {
|
2018-09-26 11:59:00 +03:00
|
|
|
header: BlockHeader,
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
header_pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
|
2018-03-22 03:10:11 +03:00
|
|
|
output_pmmr: PMMR<'a, OutputIdentifier, PMMRBackend<OutputIdentifier>>,
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
|
|
|
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
/// Rollback flag.
|
2017-09-29 21:44:25 +03:00
|
|
|
rollback: bool,
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
|
|
/// Batch in which the extension occurs, public so it can be used within
|
2018-09-25 13:01:19 +03:00
|
|
|
/// an `extending` closure. Just be careful using it that way as it will
|
2018-06-22 11:08:06 +03:00
|
|
|
/// get rolled back with the extension (i.e on a losing fork).
|
|
|
|
pub batch: &'a Batch<'a>,
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:23:33 +03:00
|
|
|
impl<'a> Committed for Extension<'a> {
|
|
|
|
fn inputs_committed(&self) -> Vec<Commitment> {
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
|
|
|
|
fn outputs_committed(&self) -> Vec<Commitment> {
|
|
|
|
let mut commitments = vec![];
|
|
|
|
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
|
|
|
if pmmr::is_leaf(n) {
|
|
|
|
if let Some(out) = self.output_pmmr.get_data(n) {
|
|
|
|
commitments.push(out.commit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commitments
|
|
|
|
}
|
|
|
|
|
|
|
|
fn kernels_committed(&self) -> Vec<Commitment> {
|
|
|
|
let mut commitments = vec![];
|
|
|
|
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
|
|
|
|
if pmmr::is_leaf(n) {
|
|
|
|
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
|
|
|
|
commitments.push(kernel.excess);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
commitments
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
impl<'a> Extension<'a> {
|
2018-09-26 11:59:00 +03:00
|
|
|
fn new(trees: &'a mut TxHashSet, batch: &'a Batch, header: BlockHeader) -> Extension<'a> {
|
2017-09-28 02:46:32 +03:00
|
|
|
Extension {
|
2018-09-26 11:59:00 +03:00
|
|
|
header,
|
2018-10-15 19:16:34 +03:00
|
|
|
header_pmmr: DBPMMR::at(
|
|
|
|
&mut trees.header_pmmr_h.backend,
|
|
|
|
trees.header_pmmr_h.last_pos,
|
|
|
|
),
|
2018-03-06 20:58:33 +03:00
|
|
|
output_pmmr: PMMR::at(
|
|
|
|
&mut trees.output_pmmr_h.backend,
|
|
|
|
trees.output_pmmr_h.last_pos,
|
|
|
|
),
|
2017-09-29 21:44:25 +03:00
|
|
|
rproof_pmmr: PMMR::at(
|
|
|
|
&mut trees.rproof_pmmr_h.backend,
|
|
|
|
trees.rproof_pmmr_h.last_pos,
|
|
|
|
),
|
|
|
|
kernel_pmmr: PMMR::at(
|
|
|
|
&mut trees.kernel_pmmr_h.backend,
|
|
|
|
trees.kernel_pmmr_h.last_pos,
|
|
|
|
),
|
2017-09-28 02:46:32 +03:00
|
|
|
rollback: false,
|
2018-06-22 11:08:06 +03:00
|
|
|
batch,
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
/// Build a view of the current UTXO set based on the output PMMR.
|
|
|
|
pub fn utxo_view(&'a self) -> UTXOView<'a> {
|
|
|
|
UTXOView::new(self.output_pmmr.readonly_pmmr(), self.batch)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO - move this into "utxo_view"
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Verify we are not attempting to spend any coinbase outputs
|
|
|
|
/// that have not sufficiently matured.
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn verify_coinbase_maturity(&self, inputs: &Vec<Input>, height: u64) -> Result<(), Error> {
|
2018-06-29 04:56:07 +03:00
|
|
|
// Find the greatest output pos of any coinbase
|
|
|
|
// outputs we are attempting to spend.
|
|
|
|
let pos = inputs
|
|
|
|
.iter()
|
|
|
|
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
2018-09-25 13:01:19 +03:00
|
|
|
.filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok())
|
2018-06-29 04:56:07 +03:00
|
|
|
.max()
|
|
|
|
.unwrap_or(0);
|
|
|
|
|
|
|
|
if pos > 0 {
|
2018-09-04 12:59:55 +03:00
|
|
|
// If we have not yet reached 1,000 / 1,440 blocks then
|
2018-06-29 04:56:07 +03:00
|
|
|
// we can fail immediately as coinbase cannot be mature.
|
2018-10-17 02:14:22 +03:00
|
|
|
if height < global::coinbase_maturity() {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::ImmatureCoinbase.into());
|
2018-05-30 23:57:13 +03:00
|
|
|
}
|
|
|
|
|
2018-06-29 04:56:07 +03:00
|
|
|
// Find the "cutoff" pos in the output MMR based on the
|
|
|
|
// header from 1,000 blocks ago.
|
2018-10-18 13:23:22 +03:00
|
|
|
let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0);
|
2018-09-25 13:01:19 +03:00
|
|
|
let cutoff_header = self.batch.get_header_by_height(cutoff_height)?;
|
2018-06-29 04:56:07 +03:00
|
|
|
let cutoff_pos = cutoff_header.output_mmr_size;
|
2018-06-20 22:18:52 +03:00
|
|
|
|
2018-09-03 14:09:28 +03:00
|
|
|
// If any output pos exceed the cutoff_pos
|
2018-06-29 04:56:07 +03:00
|
|
|
// we know they have not yet sufficiently matured.
|
|
|
|
if pos > cutoff_pos {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::ImmatureCoinbase.into());
|
2018-06-29 04:56:07 +03:00
|
|
|
}
|
|
|
|
}
|
2018-06-20 22:18:52 +03:00
|
|
|
|
2018-06-29 04:56:07 +03:00
|
|
|
Ok(())
|
2018-06-20 22:18:52 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
/// Apply a new block to the existing state.
|
2018-10-15 19:16:34 +03:00
|
|
|
///
|
|
|
|
/// Applies the following -
|
|
|
|
/// * header
|
|
|
|
/// * outputs
|
|
|
|
/// * inputs
|
|
|
|
/// * kernels
|
|
|
|
///
|
2017-09-28 02:46:32 +03:00
|
|
|
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
2018-10-15 19:16:34 +03:00
|
|
|
self.apply_header(&b.header)?;
|
|
|
|
|
2018-08-16 00:14:48 +03:00
|
|
|
for out in b.outputs() {
|
2018-09-19 21:59:17 +03:00
|
|
|
let pos = self.apply_output(out)?;
|
|
|
|
// Update the output_pos index for the new output.
|
|
|
|
self.batch.save_output_pos(&out.commitment(), pos)?;
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
2018-01-17 06:03:40 +03:00
|
|
|
|
2018-08-16 00:14:48 +03:00
|
|
|
for input in b.inputs() {
|
2018-06-18 18:18:38 +03:00
|
|
|
self.apply_input(input)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-08-16 00:14:48 +03:00
|
|
|
for kernel in b.kernels() {
|
2018-01-08 04:23:23 +03:00
|
|
|
self.apply_kernel(kernel)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2018-03-02 23:47:27 +03:00
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
// Update the header on the extension to reflect the block we just applied.
|
|
|
|
self.header = b.header.clone();
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
|
2018-01-08 04:23:23 +03:00
|
|
|
let commit = input.commitment();
|
2018-06-22 11:08:06 +03:00
|
|
|
let pos_res = self.batch.get_output_pos(&commit);
|
2018-01-08 04:23:23 +03:00
|
|
|
if let Ok(pos) = pos_res {
|
2018-03-30 09:02:40 +03:00
|
|
|
let output_id_hash = OutputIdentifier::from_input(input).hash_with_index(pos - 1);
|
2018-03-24 02:33:59 +03:00
|
|
|
if let Some(read_hash) = self.output_pmmr.get_hash(pos) {
|
2018-02-22 16:45:13 +03:00
|
|
|
// check hash from pmmr matches hash from input (or corresponding output)
|
2018-01-17 06:03:40 +03:00
|
|
|
// if not then the input is not being honest about
|
|
|
|
// what it is attempting to spend...
|
2018-03-24 02:33:59 +03:00
|
|
|
let read_elem = self.output_pmmr.get_data(pos);
|
2018-05-30 23:57:13 +03:00
|
|
|
let read_elem_hash = read_elem
|
|
|
|
.expect("no output at pos")
|
|
|
|
.hash_with_index(pos - 1);
|
|
|
|
if output_id_hash != read_hash || output_id_hash != read_elem_hash {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(
|
|
|
|
ErrorKind::TxHashSetErr(format!("output pmmr hash mismatch")).into(),
|
|
|
|
);
|
2018-01-17 06:03:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
// Now prune the output_pmmr, rproof_pmmr and their storage.
|
2018-03-04 03:19:54 +03:00
|
|
|
// Input is not valid if we cannot prune successfully (to spend an unspent
|
|
|
|
// output).
|
2018-06-18 18:18:38 +03:00
|
|
|
match self.output_pmmr.prune(pos) {
|
2018-01-08 04:23:23 +03:00
|
|
|
Ok(true) => {
|
|
|
|
self.rproof_pmmr
|
2018-06-18 18:18:38 +03:00
|
|
|
.prune(pos)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|s| ErrorKind::TxHashSetErr(s))?;
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()),
|
|
|
|
Err(s) => return Err(ErrorKind::TxHashSetErr(s).into()),
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
} else {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::AlreadySpent(commit).into());
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-09-19 21:59:17 +03:00
|
|
|
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
|
2018-01-08 04:23:23 +03:00
|
|
|
let commit = out.commitment();
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
|
|
if let Ok(pos) = self.batch.get_output_pos(&commit) {
|
2018-09-11 21:36:00 +03:00
|
|
|
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
|
|
|
|
if out_mmr.commitment() == commit {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-09-19 21:59:17 +03:00
|
|
|
// push the new output to the MMR.
|
|
|
|
let output_pos = self
|
2018-08-10 16:56:35 +03:00
|
|
|
.output_pmmr
|
2018-03-22 03:10:11 +03:00
|
|
|
.push(OutputIdentifier::from_output(out))
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-01-08 04:23:23 +03:00
|
|
|
|
2018-09-19 21:59:17 +03:00
|
|
|
// push the rangeproof to the MMR.
|
|
|
|
let rproof_pos = self
|
|
|
|
.rproof_pmmr
|
2018-02-22 16:45:13 +03:00
|
|
|
.push(out.proof)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-09-19 21:59:17 +03:00
|
|
|
|
|
|
|
// The output and rproof MMRs should be exactly the same size
|
|
|
|
// and we should have inserted to both in exactly the same pos.
|
2018-09-20 11:19:32 +03:00
|
|
|
{
|
|
|
|
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
|
|
|
|
return Err(
|
|
|
|
ErrorKind::Other(format!("output vs rproof MMRs different sizes")).into(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
if output_pos != rproof_pos {
|
|
|
|
return Err(ErrorKind::Other(format!("output vs rproof MMRs different pos")).into());
|
|
|
|
}
|
|
|
|
}
|
2018-09-19 21:59:17 +03:00
|
|
|
|
|
|
|
Ok(output_pos)
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
/// Push kernel onto MMR (hash and data files).
|
2018-01-08 04:23:23 +03:00
|
|
|
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
2018-03-09 00:36:51 +03:00
|
|
|
self.kernel_pmmr
|
2018-02-22 16:45:13 +03:00
|
|
|
.push(kernel.clone())
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-10-15 19:16:34 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
self.header_pmmr
|
2018-10-23 22:09:16 +03:00
|
|
|
.push(&header)
|
2018-10-15 19:16:34 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-01-08 04:23:23 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
/// TODO - move this into "utxo_view"
|
2018-09-26 11:59:00 +03:00
|
|
|
/// Build a Merkle proof for the given output and the block
|
|
|
|
/// this extension is currently referencing.
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Note: this relies on the MMR being stable even after pruning/compaction.
|
|
|
|
/// We need the hash of each sibling pos from the pos up to the peak
|
|
|
|
/// including the sibling leaf node which may have been removed.
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
|
2018-03-02 23:47:27 +03:00
|
|
|
// then calculate the Merkle Proof based on the known pos
|
2018-06-22 11:08:06 +03:00
|
|
|
let pos = self.batch.get_output_pos(&output.commit)?;
|
2018-08-10 16:56:35 +03:00
|
|
|
let merkle_proof = self
|
|
|
|
.output_pmmr
|
2018-03-02 23:47:27 +03:00
|
|
|
.merkle_proof(pos)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-03-02 23:47:27 +03:00
|
|
|
|
|
|
|
Ok(merkle_proof)
|
|
|
|
}
|
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
/// Saves a snapshot of the output and rangeproof MMRs to disk.
|
|
|
|
/// Specifically - saves a snapshot of the utxo file, tagged with
|
|
|
|
/// the block hash as filename suffix.
|
|
|
|
/// Needed for fast-sync (utxo file needs to be rewound before sending
|
|
|
|
/// across).
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn snapshot(&mut self) -> Result<(), Error> {
|
2018-06-18 18:18:38 +03:00
|
|
|
self.output_pmmr
|
2018-09-26 11:59:00 +03:00
|
|
|
.snapshot(&self.header)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::Other(e))?;
|
2018-06-18 18:18:38 +03:00
|
|
|
self.rproof_pmmr
|
2018-09-26 11:59:00 +03:00
|
|
|
.snapshot(&self.header)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::Other(e))?;
|
2018-06-18 18:18:38 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
|
|
|
|
/// and last kernel pos of that block.
|
2018-10-15 19:16:34 +03:00
|
|
|
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Rewind to header {} at {}", header.hash(), header.height,);
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
// We need to build bitmaps of added and removed output positions
|
|
|
|
// so we can correctly rewind all operations applied to the output MMR
|
|
|
|
// after the position we are rewinding to (these operations will be
|
|
|
|
// undone during rewind).
|
|
|
|
// Rewound output pos will be removed from the MMR.
|
|
|
|
// Rewound input (spent) pos will be added back to the MMR.
|
2018-10-15 19:16:34 +03:00
|
|
|
let rewind_rm_pos = input_pos_to_rewind(header, &self.header, &self.batch)?;
|
|
|
|
|
|
|
|
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
|
2018-06-18 18:18:38 +03:00
|
|
|
|
|
|
|
self.rewind_to_pos(
|
2018-10-15 19:16:34 +03:00
|
|
|
header_pos,
|
|
|
|
header.output_mmr_size,
|
|
|
|
header.kernel_mmr_size,
|
2018-08-20 22:34:12 +03:00
|
|
|
&rewind_rm_pos,
|
2018-09-26 11:59:00 +03:00
|
|
|
)?;
|
|
|
|
|
|
|
|
// Update our header to reflect the one we rewound to.
|
2018-10-15 19:16:34 +03:00
|
|
|
self.header = header.clone();
|
2018-09-26 11:59:00 +03:00
|
|
|
|
|
|
|
Ok(())
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Rewinds the MMRs to the provided positions, given the output and
|
|
|
|
/// kernel we want to rewind to.
|
2018-05-30 23:57:13 +03:00
|
|
|
fn rewind_to_pos(
|
|
|
|
&mut self,
|
2018-10-15 19:16:34 +03:00
|
|
|
header_pos: u64,
|
2018-05-30 23:57:13 +03:00
|
|
|
output_pos: u64,
|
|
|
|
kernel_pos: u64,
|
2018-06-18 18:18:38 +03:00
|
|
|
rewind_rm_pos: &Bitmap,
|
2018-05-30 23:57:13 +03:00
|
|
|
) -> Result<(), Error> {
|
2018-10-15 19:16:34 +03:00
|
|
|
debug!(
|
|
|
|
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
|
2018-10-21 23:30:56 +03:00
|
|
|
header_pos, output_pos, kernel_pos,
|
2018-05-30 23:57:13 +03:00
|
|
|
);
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
self.header_pmmr
|
|
|
|
.rewind(header_pos)
|
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2018-07-05 05:31:08 +03:00
|
|
|
self.output_pmmr
|
2018-07-08 19:37:09 +03:00
|
|
|
.rewind(output_pos, rewind_rm_pos)
|
2018-07-05 05:31:08 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
|
|
|
self.rproof_pmmr
|
2018-07-08 19:37:09 +03:00
|
|
|
.rewind(output_pos, rewind_rm_pos)
|
2018-07-05 05:31:08 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
|
|
|
self.kernel_pmmr
|
2018-07-08 19:37:09 +03:00
|
|
|
.rewind(kernel_pos, &Bitmap::create())
|
2018-07-05 05:31:08 +03:00
|
|
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Current root hashes and sums (if applicable) for the Output, range proof
|
2017-09-28 02:46:32 +03:00
|
|
|
/// and kernel sum trees.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn roots(&self) -> TxHashSetRoots {
|
|
|
|
TxHashSetRoots {
|
2018-10-15 19:16:34 +03:00
|
|
|
header_root: self.header_pmmr.root(),
|
2018-03-05 22:33:44 +03:00
|
|
|
output_root: self.output_pmmr.root(),
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_root: self.rproof_pmmr.root(),
|
|
|
|
kernel_root: self.kernel_pmmr.root(),
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
/// Get the root of the current header MMR.
|
|
|
|
pub fn header_root(&self) -> Hash {
|
|
|
|
self.header_pmmr.root()
|
|
|
|
}
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
/// Validate the following MMR roots against the latest header applied -
|
|
|
|
/// * output
|
|
|
|
/// * rangeproof
|
|
|
|
/// * kernel
|
|
|
|
///
|
2018-10-17 12:06:38 +03:00
|
|
|
/// Note we do not validate the header MMR root here as we need to validate
|
|
|
|
/// a header against the state of the MMR *prior* to applying it.
|
|
|
|
/// Each header commits to the root of the MMR of all previous headers,
|
2018-10-15 19:16:34 +03:00
|
|
|
/// not including the header itself.
|
|
|
|
///
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn validate_roots(&self) -> Result<(), Error> {
|
2018-06-21 04:30:22 +03:00
|
|
|
// If we are validating the genesis block then we have no outputs or
|
|
|
|
// kernels. So we are done here.
|
2018-09-26 11:59:00 +03:00
|
|
|
if self.header.height == 0 {
|
2018-04-24 22:53:01 +03:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
let roots = self.roots();
|
2018-10-15 19:16:34 +03:00
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
if roots.output_root != self.header.output_root
|
|
|
|
|| roots.rproof_root != self.header.range_proof_root
|
|
|
|
|| roots.kernel_root != self.header.kernel_root
|
2018-04-24 22:53:01 +03:00
|
|
|
{
|
2018-09-05 12:51:29 +03:00
|
|
|
Err(ErrorKind::InvalidRoot.into())
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
/// Validate the provided header by comparing its prev_root to the
|
2018-10-15 19:16:34 +03:00
|
|
|
/// root of the current header MMR.
|
2018-10-17 12:06:38 +03:00
|
|
|
pub fn validate_header_root(&self, header: &BlockHeader) -> Result<(), Error> {
|
2018-10-31 23:24:21 +03:00
|
|
|
if header.height == 0 {
|
2018-10-15 19:16:34 +03:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2018-10-17 12:06:38 +03:00
|
|
|
let roots = self.roots();
|
|
|
|
if roots.header_root != header.prev_root {
|
|
|
|
Err(ErrorKind::InvalidRoot.into())
|
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-10-15 19:16:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Validate the header, output and kernel MMR sizes against the block header.
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn validate_sizes(&self) -> Result<(), Error> {
|
2018-09-05 12:51:29 +03:00
|
|
|
// If we are validating the genesis block then we have no outputs or
|
|
|
|
// kernels. So we are done here.
|
2018-09-26 11:59:00 +03:00
|
|
|
if self.header.height == 0 {
|
2018-09-05 12:51:29 +03:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2018-10-15 19:16:34 +03:00
|
|
|
let (header_mmr_size, output_mmr_size, rproof_mmr_size, kernel_mmr_size) = self.sizes();
|
|
|
|
let expected_header_mmr_size = pmmr::insertion_to_pmmr_index(self.header.height + 2) - 1;
|
|
|
|
|
|
|
|
if header_mmr_size != expected_header_mmr_size {
|
|
|
|
Err(ErrorKind::InvalidMMRSize.into())
|
|
|
|
} else if output_mmr_size != self.header.output_mmr_size {
|
|
|
|
Err(ErrorKind::InvalidMMRSize.into())
|
|
|
|
} else if kernel_mmr_size != self.header.kernel_mmr_size {
|
2018-09-26 11:59:00 +03:00
|
|
|
Err(ErrorKind::InvalidMMRSize.into())
|
|
|
|
} else if output_mmr_size != rproof_mmr_size {
|
2018-09-05 12:51:29 +03:00
|
|
|
Err(ErrorKind::InvalidMMRSize.into())
|
|
|
|
} else {
|
|
|
|
Ok(())
|
2018-04-24 22:53:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
fn validate_mmrs(&self) -> Result<(), Error> {
|
|
|
|
let now = Instant::now();
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
// validate all hashes and sums within the trees
|
2018-10-15 19:16:34 +03:00
|
|
|
if let Err(e) = self.header_pmmr.validate() {
|
|
|
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
|
|
|
}
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Err(e) = self.output_pmmr.validate() {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
if let Err(e) = self.rproof_pmmr.validate() {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
if let Err(e) = self.kernel_pmmr.validate() {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
debug!(
|
2018-10-15 19:16:34 +03:00
|
|
|
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
|
|
|
|
self.header_pmmr.unpruned_size(),
|
2018-09-12 10:19:05 +03:00
|
|
|
self.output_pmmr.unpruned_size(),
|
|
|
|
self.rproof_pmmr.unpruned_size(),
|
|
|
|
self.kernel_pmmr.unpruned_size(),
|
2018-05-07 16:21:41 +03:00
|
|
|
now.elapsed().as_secs(),
|
|
|
|
);
|
2018-04-24 22:53:01 +03:00
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
/// Validate full kernel sums against the provided header (for overage and kernel_offset).
|
|
|
|
/// This is an expensive operation as we need to retrieve all the UTXOs and kernels
|
|
|
|
/// from the respective MMRs.
|
|
|
|
/// For a significantly faster way of validating full kernel sums see BlockSums.
|
2018-09-26 11:59:00 +03:00
|
|
|
pub fn validate_kernel_sums(&self) -> Result<((Commitment, Commitment)), Error> {
|
|
|
|
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
|
|
|
|
self.header.total_overage(),
|
|
|
|
self.header.total_kernel_offset(),
|
|
|
|
)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
Ok((utxo_sum, kernel_sum))
|
|
|
|
}
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
/// Validate the txhashset state against the provided block header.
|
2018-10-07 15:39:40 +03:00
|
|
|
/// A "fast validation" will skip rangeproof verification and kernel signature verification.
|
2018-08-28 00:22:48 +03:00
|
|
|
pub fn validate(
|
2018-09-26 11:59:00 +03:00
|
|
|
&self,
|
2018-10-07 15:39:40 +03:00
|
|
|
fast_validation: bool,
|
2018-08-28 00:22:48 +03:00
|
|
|
status: &TxHashsetWriteStatus,
|
|
|
|
) -> Result<((Commitment, Commitment)), Error> {
|
2018-05-07 16:21:41 +03:00
|
|
|
self.validate_mmrs()?;
|
2018-09-26 11:59:00 +03:00
|
|
|
self.validate_roots()?;
|
|
|
|
self.validate_sizes()?;
|
2018-05-07 16:21:41 +03:00
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
if self.header.height == 0 {
|
2018-06-22 11:08:06 +03:00
|
|
|
let zero_commit = secp_static::commit_to_zero_value();
|
|
|
|
return Ok((zero_commit.clone(), zero_commit.clone()));
|
2018-05-07 16:21:41 +03:00
|
|
|
}
|
|
|
|
|
2018-07-02 02:08:39 +03:00
|
|
|
// The real magicking happens here. Sum of kernel excesses should equal
|
|
|
|
// sum of unspent outputs minus total supply.
|
2018-09-26 11:59:00 +03:00
|
|
|
let (output_sum, kernel_sum) = self.validate_kernel_sums()?;
|
2018-05-07 16:21:41 +03:00
|
|
|
|
2018-10-07 15:39:40 +03:00
|
|
|
// These are expensive verification step (skipped for "fast validation").
|
|
|
|
if !fast_validation {
|
|
|
|
// Verify the rangeproof associated with each unspent output.
|
2018-07-02 02:08:39 +03:00
|
|
|
self.verify_rangeproofs(status)?;
|
2018-10-07 15:39:40 +03:00
|
|
|
|
|
|
|
// Verify all the kernel signatures.
|
|
|
|
self.verify_kernel_signatures(status)?;
|
2018-03-21 15:28:05 +03:00
|
|
|
}
|
2018-03-21 03:34:19 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok((output_sum, kernel_sum))
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-05-28 21:22:22 +03:00
|
|
|
/// Rebuild the index of MMR positions to the corresponding Output and
|
|
|
|
/// kernel by iterating over the whole MMR data. This is a costly operation
|
2018-02-10 01:32:16 +03:00
|
|
|
/// performed only when we receive a full new chain state.
|
|
|
|
pub fn rebuild_index(&self) -> Result<(), Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
2018-02-10 01:32:16 +03:00
|
|
|
// non-pruned leaves only
|
|
|
|
if pmmr::bintree_postorder_height(n) == 0 {
|
2018-03-24 02:33:59 +03:00
|
|
|
if let Some(out) = self.output_pmmr.get_data(n) {
|
2018-06-22 11:08:06 +03:00
|
|
|
self.batch.save_output_pos(&out.commit, n)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
/// Force the rollback of this extension, no matter the result
|
|
|
|
pub fn force_rollback(&mut self) {
|
|
|
|
self.rollback = true;
|
|
|
|
}
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
/// Dumps the output MMR.
|
|
|
|
/// We use this after compacting for visual confirmation that it worked.
|
|
|
|
pub fn dump_output_pmmr(&self) {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("-- outputs --");
|
2018-03-13 21:22:34 +03:00
|
|
|
self.output_pmmr.dump_from_file(false);
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("--");
|
2018-03-15 19:53:40 +03:00
|
|
|
self.output_pmmr.dump_stats();
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("-- end of outputs --");
|
2018-03-13 21:22:34 +03:00
|
|
|
}
|
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
2018-03-05 22:33:44 +03:00
|
|
|
/// version only prints the Output tree.
|
2017-10-22 10:11:45 +03:00
|
|
|
pub fn dump(&self, short: bool) {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("-- outputs --");
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr.dump(short);
|
2017-10-28 00:57:04 +03:00
|
|
|
if !short {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("-- range proofs --");
|
2017-10-28 00:57:04 +03:00
|
|
|
self.rproof_pmmr.dump(short);
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("-- kernels --");
|
2017-10-28 00:57:04 +03:00
|
|
|
self.kernel_pmmr.dump(short);
|
|
|
|
}
|
2017-10-12 22:23:58 +03:00
|
|
|
}
|
|
|
|
|
2018-06-21 04:30:22 +03:00
|
|
|
/// Sizes of each of the sum trees
|
2018-10-15 19:16:34 +03:00
|
|
|
pub fn sizes(&self) -> (u64, u64, u64, u64) {
|
2017-10-17 00:23:10 +03:00
|
|
|
(
|
2018-10-15 19:16:34 +03:00
|
|
|
self.header_pmmr.unpruned_size(),
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr.unpruned_size(),
|
2017-10-17 00:23:10 +03:00
|
|
|
self.rproof_pmmr.unpruned_size(),
|
|
|
|
self.kernel_pmmr.unpruned_size(),
|
|
|
|
)
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
fn verify_kernel_signatures(&self, status: &TxHashsetWriteStatus) -> Result<(), Error> {
|
2018-03-21 03:34:19 +03:00
|
|
|
let now = Instant::now();
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
let mut kern_count = 0;
|
2018-07-02 02:08:39 +03:00
|
|
|
let total_kernels = pmmr::n_leaves(self.kernel_pmmr.unpruned_size());
|
2018-03-21 15:28:05 +03:00
|
|
|
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
|
|
|
|
if pmmr::is_leaf(n) {
|
2018-03-24 02:33:59 +03:00
|
|
|
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
|
2018-02-10 01:32:16 +03:00
|
|
|
kernel.verify()?;
|
2018-05-07 16:21:41 +03:00
|
|
|
kern_count += 1;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-07-02 02:08:39 +03:00
|
|
|
if n % 20 == 0 {
|
|
|
|
status.on_validation(kern_count, total_kernels, 0, 0);
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
debug!(
|
2018-05-07 16:21:41 +03:00
|
|
|
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
|
2018-03-21 03:34:19 +03:00
|
|
|
kern_count,
|
|
|
|
self.kernel_pmmr.unpruned_size(),
|
|
|
|
now.elapsed().as_secs(),
|
2018-03-13 21:22:34 +03:00
|
|
|
);
|
2018-05-07 16:21:41 +03:00
|
|
|
|
|
|
|
Ok(())
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
fn verify_rangeproofs(&self, status: &TxHashsetWriteStatus) -> Result<(), Error> {
|
2018-03-21 03:34:19 +03:00
|
|
|
let now = Instant::now();
|
|
|
|
|
2018-08-20 21:02:44 +03:00
|
|
|
let mut commits: Vec<Commitment> = vec![];
|
|
|
|
let mut proofs: Vec<RangeProof> = vec![];
|
2018-08-17 18:18:48 +03:00
|
|
|
|
2018-03-21 03:34:19 +03:00
|
|
|
let mut proof_count = 0;
|
2018-07-02 02:08:39 +03:00
|
|
|
let total_rproofs = pmmr::n_leaves(self.output_pmmr.unpruned_size());
|
2018-03-05 22:33:44 +03:00
|
|
|
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
2018-03-13 21:22:34 +03:00
|
|
|
if pmmr::is_leaf(n) {
|
2018-03-24 02:33:59 +03:00
|
|
|
if let Some(out) = self.output_pmmr.get_data(n) {
|
|
|
|
if let Some(rp) = self.rproof_pmmr.get_data(n) {
|
2018-08-17 18:18:48 +03:00
|
|
|
commits.push(out.commit);
|
|
|
|
proofs.push(rp);
|
2018-03-21 15:28:05 +03:00
|
|
|
} else {
|
|
|
|
// TODO - rangeproof not found
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::OutputNotFound.into());
|
2018-02-25 02:39:26 +03:00
|
|
|
}
|
2018-03-21 03:34:19 +03:00
|
|
|
proof_count += 1;
|
2018-04-18 22:12:39 +03:00
|
|
|
|
2018-08-17 18:18:48 +03:00
|
|
|
if proofs.len() >= 1000 {
|
|
|
|
Output::batch_verify_proofs(&commits, &proofs)?;
|
|
|
|
commits.clear();
|
|
|
|
proofs.clear();
|
2018-04-18 22:12:39 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"txhashset: verify_rangeproofs: verified {} rangeproofs",
|
|
|
|
proof_count,
|
2018-04-18 22:12:39 +03:00
|
|
|
);
|
|
|
|
}
|
2018-03-21 03:34:19 +03:00
|
|
|
}
|
|
|
|
}
|
2018-07-02 02:08:39 +03:00
|
|
|
if n % 20 == 0 {
|
|
|
|
status.on_validation(0, 0, proof_count, total_rproofs);
|
|
|
|
}
|
2018-03-21 03:34:19 +03:00
|
|
|
}
|
2018-08-17 18:18:48 +03:00
|
|
|
|
|
|
|
// remaining part which not full of 1000 range proofs
|
|
|
|
if proofs.len() > 0 {
|
|
|
|
Output::batch_verify_proofs(&commits, &proofs)?;
|
|
|
|
commits.clear();
|
|
|
|
proofs.clear();
|
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"txhashset: verify_rangeproofs: verified {} rangeproofs",
|
|
|
|
proof_count,
|
2018-08-17 18:18:48 +03:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-03-21 03:34:19 +03:00
|
|
|
debug!(
|
2018-05-07 16:21:41 +03:00
|
|
|
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
|
2018-03-21 03:34:19 +03:00
|
|
|
proof_count,
|
|
|
|
self.rproof_pmmr.unpruned_size(),
|
|
|
|
now.elapsed().as_secs(),
|
|
|
|
);
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Packages the txhashset data files into a zip and returns a Read to the
|
2018-02-10 01:32:16 +03:00
|
|
|
/// resulting file
|
2018-08-03 05:16:16 +03:00
|
|
|
pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
|
|
|
let zip_path = Path::new(&root_dir).join(TXHASHSET_ZIP);
|
2018-02-10 01:32:16 +03:00
|
|
|
// create the zip archive
|
|
|
|
{
|
2018-08-03 05:16:16 +03:00
|
|
|
// Temp txhashset directory
|
|
|
|
let temp_txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR.to_string() + "_zip");
|
|
|
|
// Remove temp dir if it exist
|
|
|
|
if temp_txhashset_path.exists() {
|
|
|
|
fs::remove_dir_all(&temp_txhashset_path)?;
|
|
|
|
}
|
|
|
|
// Copy file to another dir
|
2018-08-10 16:56:35 +03:00
|
|
|
file::copy_dir_to(&txhashset_path, &temp_txhashset_path)?;
|
2018-08-03 05:16:16 +03:00
|
|
|
// Check and remove file that are not supposed to be there
|
|
|
|
check_and_remove_files(&temp_txhashset_path, header)?;
|
|
|
|
// Compress zip
|
|
|
|
zip::compress(&temp_txhashset_path, &File::create(zip_path.clone())?)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// open it again to read it back
|
|
|
|
let zip_file = File::open(zip_path)?;
|
|
|
|
Ok(zip_file)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Extract the txhashset data from a zip file and writes the content into the
|
|
|
|
/// txhashset storage dir
|
2018-08-10 16:56:35 +03:00
|
|
|
pub fn zip_write(
|
|
|
|
root_dir: String,
|
|
|
|
txhashset_data: File,
|
|
|
|
header: &BlockHeader,
|
|
|
|
) -> Result<(), Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
|
|
|
fs::create_dir_all(txhashset_path.clone())?;
|
2018-07-01 01:36:38 +03:00
|
|
|
zip::decompress(txhashset_data, &txhashset_path)
|
2018-08-03 05:16:16 +03:00
|
|
|
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
|
|
|
check_and_remove_files(&txhashset_path, header)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check a txhashset directory and remove any unexpected
|
|
|
|
fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
// First compare the subdirectories
|
|
|
|
let subdirectories_expected: HashSet<_> = [OUTPUT_SUBDIR, KERNEL_SUBDIR, RANGE_PROOF_SUBDIR]
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|s| String::from(s))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let subdirectories_found: HashSet<_> = fs::read_dir(txhashset_path)?
|
|
|
|
.filter_map(|entry| {
|
|
|
|
entry.ok().and_then(|e| {
|
|
|
|
e.path()
|
|
|
|
.file_name()
|
|
|
|
.and_then(|n| n.to_str().map(|s| String::from(s)))
|
|
|
|
})
|
2018-09-19 21:59:17 +03:00
|
|
|
}).collect();
|
2018-08-03 05:16:16 +03:00
|
|
|
|
|
|
|
let dir_difference: Vec<String> = subdirectories_found
|
|
|
|
.difference(&subdirectories_expected)
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Removing unexpected directories if needed
|
|
|
|
if !dir_difference.is_empty() {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Unexpected folder(s) found in txhashset folder, removing.");
|
2018-08-03 05:16:16 +03:00
|
|
|
for diff in dir_difference {
|
|
|
|
let diff_path = txhashset_path.join(diff);
|
|
|
|
file::delete(diff_path)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then compare the files found in the subdirectories
|
|
|
|
let pmmr_files_expected: HashSet<_> = PMMR_FILES
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
2018-08-10 16:56:35 +03:00
|
|
|
.map(|s| {
|
|
|
|
if s.contains("pmmr_leaf.bin") {
|
|
|
|
format!("{}.{}", s, header.hash())
|
|
|
|
} else {
|
|
|
|
String::from(s)
|
|
|
|
}
|
2018-09-19 21:59:17 +03:00
|
|
|
}).collect();
|
2018-08-03 05:16:16 +03:00
|
|
|
|
|
|
|
let subdirectories = fs::read_dir(txhashset_path)?;
|
|
|
|
for subdirectory in subdirectories {
|
|
|
|
let subdirectory_path = subdirectory?.path();
|
|
|
|
let pmmr_files = fs::read_dir(&subdirectory_path)?;
|
|
|
|
let pmmr_files_found: HashSet<_> = pmmr_files
|
|
|
|
.filter_map(|entry| {
|
|
|
|
entry.ok().and_then(|e| {
|
|
|
|
e.path()
|
|
|
|
.file_name()
|
|
|
|
.and_then(|n| n.to_str().map(|s| String::from(s)))
|
|
|
|
})
|
2018-09-19 21:59:17 +03:00
|
|
|
}).collect();
|
2018-08-03 05:16:16 +03:00
|
|
|
let difference: Vec<String> = pmmr_files_found
|
|
|
|
.difference(&pmmr_files_expected)
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
|
|
|
if !difference.is_empty() {
|
2018-08-10 16:56:35 +03:00
|
|
|
debug!(
|
|
|
|
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
|
|
|
|
&subdirectory_path
|
|
|
|
);
|
2018-08-03 05:16:16 +03:00
|
|
|
for diff in difference {
|
|
|
|
let diff_path = subdirectory_path.join(diff);
|
|
|
|
file::delete(diff_path)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
|
|
|
|
/// Given a block header to rewind to and the block header at the
|
|
|
|
/// head of the current chain state, we need to calculate the positions
|
|
|
|
/// of all inputs (spent outputs) we need to "undo" during a rewind.
|
|
|
|
/// We do this by leveraging the "block_input_bitmap" cache and OR'ing
|
|
|
|
/// the set of bitmaps together for the set of blocks being rewound.
|
2018-09-25 13:01:19 +03:00
|
|
|
pub fn input_pos_to_rewind(
|
2018-06-18 18:18:38 +03:00
|
|
|
block_header: &BlockHeader,
|
|
|
|
head_header: &BlockHeader,
|
2018-08-20 22:34:12 +03:00
|
|
|
batch: &Batch,
|
|
|
|
) -> Result<Bitmap, Error> {
|
2018-06-18 18:18:38 +03:00
|
|
|
let mut current = head_header.hash();
|
2018-09-15 02:51:36 +03:00
|
|
|
let mut height = head_header.height;
|
2018-08-20 22:34:12 +03:00
|
|
|
|
|
|
|
if head_header.height < block_header.height {
|
|
|
|
debug!(
|
|
|
|
"input_pos_to_rewind: {} < {}, nothing to rewind",
|
2018-10-21 23:30:56 +03:00
|
|
|
head_header.height, block_header.height
|
2018-08-20 22:34:12 +03:00
|
|
|
);
|
2018-09-15 02:51:36 +03:00
|
|
|
return Ok(Bitmap::create());
|
2018-08-20 22:34:12 +03:00
|
|
|
}
|
|
|
|
|
2018-09-15 02:51:36 +03:00
|
|
|
// Batching up the block input bitmaps, and running fast_or() on every batch of 256 bitmaps.
|
|
|
|
// so to avoid maintaining a huge vec of bitmaps.
|
|
|
|
let bitmap_fast_or = |b_res, block_input_bitmaps: &mut Vec<Bitmap>| -> Option<Bitmap> {
|
|
|
|
if let Some(b) = b_res {
|
|
|
|
block_input_bitmaps.push(b);
|
|
|
|
if block_input_bitmaps.len() < 256 {
|
|
|
|
return None;
|
|
|
|
}
|
2018-06-18 18:18:38 +03:00
|
|
|
}
|
2018-09-15 02:51:36 +03:00
|
|
|
let bitmap = Bitmap::fast_or(&block_input_bitmaps.iter().collect::<Vec<&Bitmap>>());
|
|
|
|
block_input_bitmaps.clear();
|
|
|
|
block_input_bitmaps.push(bitmap.clone());
|
|
|
|
Some(bitmap)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut block_input_bitmaps: Vec<Bitmap> = vec![];
|
|
|
|
let bh = block_header.hash();
|
2018-06-18 18:18:38 +03:00
|
|
|
|
2018-09-15 02:51:36 +03:00
|
|
|
while current != bh {
|
2018-06-18 18:18:38 +03:00
|
|
|
// We cache recent block headers and block_input_bitmaps
|
|
|
|
// internally in our db layer (commit_index).
|
|
|
|
// I/O should be minimized or eliminated here for most
|
|
|
|
// rewind scenarios.
|
2018-09-15 02:51:36 +03:00
|
|
|
if let Ok(b_res) = batch.get_block_input_bitmap(¤t) {
|
|
|
|
bitmap_fast_or(Some(b_res), &mut block_input_bitmaps);
|
2018-07-05 15:18:09 +03:00
|
|
|
}
|
2018-09-15 02:51:36 +03:00
|
|
|
if height == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
height -= 1;
|
2018-09-25 13:01:19 +03:00
|
|
|
current = batch.get_hash_by_height(height)?;
|
2018-06-18 18:18:38 +03:00
|
|
|
}
|
2018-09-15 02:51:36 +03:00
|
|
|
|
|
|
|
let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap();
|
2018-08-20 22:34:12 +03:00
|
|
|
Ok(bitmap)
|
2018-06-18 18:18:38 +03:00
|
|
|
}
|