2018-03-02 23:47:27 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-09-28 02:46:32 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
//! Utility structs to handle the 3 hashtrees (output, range proof, kernel) more
|
2017-09-28 02:46:32 +03:00
|
|
|
//! conveniently and transactionally.
|
|
|
|
|
|
|
|
use std::fs;
|
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
|
|
|
use std::path::{Path, PathBuf};
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
2018-02-17 20:56:22 +03:00
|
|
|
use util::static_secp_instance;
|
2018-03-04 03:19:54 +03:00
|
|
|
use util::secp::pedersen::{Commitment, RangeProof};
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-13 05:39:22 +03:00
|
|
|
use core::consensus::REWARD;
|
2018-03-04 03:19:54 +03:00
|
|
|
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier,
|
|
|
|
OutputStoreable, TxKernel};
|
|
|
|
use core::core::pmmr::{self, MerkleProof, PMMR};
|
2018-03-06 20:58:33 +03:00
|
|
|
use core::global;
|
2018-02-22 16:45:13 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-03-06 20:58:33 +03:00
|
|
|
use core::ser::{self, PMMRIndexHashable, PMMRable};
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
use grin_store;
|
2018-03-03 12:08:36 +03:00
|
|
|
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
|
2018-03-06 20:58:33 +03:00
|
|
|
use grin_store::types::prune_noop;
|
2018-03-13 21:22:34 +03:00
|
|
|
use keychain::BlindingFactor;
|
2018-03-05 22:33:44 +03:00
|
|
|
use types::{ChainStore, Error, PMMRFileMetadataCollection, TxHashSetRoots};
|
2018-03-04 03:19:54 +03:00
|
|
|
use util::{zip, LOGGER};
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
|
|
|
const OUTPUT_SUBDIR: &'static str = "output";
|
2017-09-28 02:46:32 +03:00
|
|
|
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
|
|
|
const KERNEL_SUBDIR: &'static str = "kernel";
|
2018-03-05 22:33:44 +03:00
|
|
|
const TXHASHSET_ZIP: &'static str = "txhashset_snapshot.zip";
|
2017-09-28 02:46:32 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
struct PMMRHandle<T>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
2018-02-22 16:45:13 +03:00
|
|
|
T: PMMRable,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2017-09-28 02:46:32 +03:00
|
|
|
backend: PMMRBackend<T>,
|
|
|
|
last_pos: u64,
|
|
|
|
}
|
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
impl<T> PMMRHandle<T>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
2018-03-13 21:22:34 +03:00
|
|
|
T: PMMRable + ::std::fmt::Debug,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2018-03-04 03:19:54 +03:00
|
|
|
fn new(
|
|
|
|
root_dir: String,
|
|
|
|
file_name: &str,
|
|
|
|
index_md: Option<PMMRFileMetadata>,
|
|
|
|
) -> Result<PMMRHandle<T>, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name);
|
2017-09-28 02:46:32 +03:00
|
|
|
fs::create_dir_all(path.clone())?;
|
2018-03-03 12:08:36 +03:00
|
|
|
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
let sz = be.unpruned_size()?;
|
|
|
|
Ok(PMMRHandle {
|
|
|
|
backend: be,
|
|
|
|
last_pos: sz,
|
|
|
|
})
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
|
2018-03-03 12:08:36 +03:00
|
|
|
/// Return last written positions of hash file and data file
|
|
|
|
pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
|
|
|
self.backend.last_file_positions()
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
2018-03-05 22:33:44 +03:00
|
|
|
/// validate blocks and capturing the Output set, the range proofs and the
|
2017-09-28 02:46:32 +03:00
|
|
|
/// kernels. Also handles the index of Commitments to positions in the
|
2018-02-22 16:45:13 +03:00
|
|
|
/// output and range proof pmmr trees.
|
2017-09-28 02:46:32 +03:00
|
|
|
///
|
|
|
|
/// Note that the index is never authoritative, only the trees are
|
|
|
|
/// guaranteed to indicate whether an output is spent or not. The index
|
|
|
|
/// may have commitments that have already been spent, even with
|
|
|
|
/// pruning enabled.
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
pub struct TxHashSet {
|
|
|
|
output_pmmr_h: PMMRHandle<OutputStoreable>,
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_pmmr_h: PMMRHandle<RangeProof>,
|
|
|
|
kernel_pmmr_h: PMMRHandle<TxKernel>,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
|
|
|
// chain store used as index of commitments to MMR positions
|
|
|
|
commit_index: Arc<ChainStore>,
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
impl TxHashSet {
|
|
|
|
/// Open an existing or new set of backends for the TxHashSet
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn open(
|
|
|
|
root_dir: String,
|
2018-03-03 12:08:36 +03:00
|
|
|
commit_index: Arc<ChainStore>,
|
2018-03-04 03:19:54 +03:00
|
|
|
last_file_positions: Option<PMMRFileMetadataCollection>,
|
2018-03-05 22:33:44 +03:00
|
|
|
) -> Result<TxHashSet, Error> {
|
2018-03-06 20:58:33 +03:00
|
|
|
let output_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, OUTPUT_SUBDIR]
|
|
|
|
.iter()
|
|
|
|
.collect();
|
2018-03-05 22:33:44 +03:00
|
|
|
fs::create_dir_all(output_file_path.clone())?;
|
2018-02-22 16:45:13 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let rproof_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, RANGE_PROOF_SUBDIR]
|
2018-03-04 03:19:54 +03:00
|
|
|
.iter()
|
|
|
|
.collect();
|
2018-02-22 16:45:13 +03:00
|
|
|
fs::create_dir_all(rproof_file_path.clone())?;
|
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
let kernel_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, KERNEL_SUBDIR]
|
|
|
|
.iter()
|
|
|
|
.collect();
|
2018-02-10 01:32:16 +03:00
|
|
|
fs::create_dir_all(kernel_file_path.clone())?;
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut output_md = None;
|
2018-03-03 12:08:36 +03:00
|
|
|
let mut rproof_md = None;
|
|
|
|
let mut kernel_md = None;
|
|
|
|
|
|
|
|
if let Some(p) = last_file_positions {
|
2018-03-05 22:33:44 +03:00
|
|
|
output_md = Some(p.output_file_md);
|
2018-03-03 12:08:36 +03:00
|
|
|
rproof_md = Some(p.rproof_file_md);
|
|
|
|
kernel_md = Some(p.kernel_file_md);
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
Ok(TxHashSet {
|
|
|
|
output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, output_md)?,
|
2018-03-03 12:08:36 +03:00
|
|
|
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, rproof_md)?,
|
|
|
|
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, kernel_md)?,
|
2017-09-28 02:46:32 +03:00
|
|
|
commit_index: commit_index,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Check if an output is unspent.
|
2018-01-17 06:03:40 +03:00
|
|
|
/// We look in the index to find the output MMR pos.
|
|
|
|
/// Then we check the entry in the output MMR and confirm the hash matches.
|
2018-03-02 23:47:27 +03:00
|
|
|
pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<Hash, Error> {
|
2018-02-22 16:45:13 +03:00
|
|
|
match self.commit_index.get_output_pos(&output_id.commit) {
|
2017-11-22 23:14:42 +03:00
|
|
|
Ok(pos) => {
|
2018-03-04 03:19:54 +03:00
|
|
|
let output_pmmr: PMMR<OutputStoreable, _> =
|
2018-03-05 22:33:44 +03:00
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
2018-02-22 16:45:13 +03:00
|
|
|
if let Some((hash, _)) = output_pmmr.get(pos, false) {
|
2018-03-05 18:05:42 +03:00
|
|
|
if hash == output_id.hash_with_index(pos) {
|
2018-03-02 23:47:27 +03:00
|
|
|
Ok(hash)
|
2018-01-17 06:03:40 +03:00
|
|
|
} else {
|
2018-03-05 22:33:44 +03:00
|
|
|
Err(Error::TxHashSetErr(format!("txhashset hash mismatch")))
|
2018-01-17 06:03:40 +03:00
|
|
|
}
|
2017-11-22 23:14:42 +03:00
|
|
|
} else {
|
2018-01-17 06:03:40 +03:00
|
|
|
Err(Error::OutputNotFound)
|
2017-11-22 23:14:42 +03:00
|
|
|
}
|
|
|
|
}
|
2018-01-17 06:03:40 +03:00
|
|
|
Err(grin_store::Error::NotFoundErr) => Err(Error::OutputNotFound),
|
2018-03-05 22:33:44 +03:00
|
|
|
Err(e) => Err(Error::StoreErr(e, format!("txhashset unspent check"))),
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
}
|
2017-10-24 21:11:58 +03:00
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
/// returns the last N nodes inserted into the tree (i.e. the 'bottom'
|
|
|
|
/// nodes at level 0
|
2018-02-22 16:45:13 +03:00
|
|
|
/// TODO: These need to return the actual data from the flat-files instead of hashes now
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn last_n_output(&mut self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
|
|
|
|
let output_pmmr: PMMR<OutputStoreable, _> =
|
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
|
|
|
output_pmmr.get_last_n_insertions(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for range proofs
|
2018-02-22 16:45:13 +03:00
|
|
|
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
|
2018-03-04 03:19:54 +03:00
|
|
|
let rproof_pmmr: PMMR<RangeProof, _> =
|
|
|
|
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
2017-10-28 00:57:04 +03:00
|
|
|
rproof_pmmr.get_last_n_insertions(distance)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for kernels
|
2018-02-22 16:45:13 +03:00
|
|
|
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
|
2018-03-04 03:19:54 +03:00
|
|
|
let kernel_pmmr: PMMR<TxKernel, _> =
|
|
|
|
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
2017-10-28 00:57:04 +03:00
|
|
|
kernel_pmmr.get_last_n_insertions(distance)
|
|
|
|
}
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Output and kernel MMR indexes at the end of the provided block
|
2018-03-09 00:36:51 +03:00
|
|
|
pub fn indexes_at(&self, bh: &Hash) -> Result<(u64, u64), Error> {
|
|
|
|
self.commit_index.get_block_marker(bh).map_err(&From::from)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Last file positions of Output set.. hash file,data file
|
2018-03-03 12:08:36 +03:00
|
|
|
pub fn last_file_metadata(&self) -> PMMRFileMetadataCollection {
|
|
|
|
PMMRFileMetadataCollection::new(
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr_h.last_file_positions(),
|
2018-03-03 12:08:36 +03:00
|
|
|
self.rproof_pmmr_h.last_file_positions(),
|
2018-03-04 03:19:54 +03:00
|
|
|
self.kernel_pmmr_h.last_file_positions(),
|
2018-03-03 12:08:36 +03:00
|
|
|
)
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
|
2017-10-24 21:11:58 +03:00
|
|
|
/// Get sum tree roots
|
2018-02-22 16:45:13 +03:00
|
|
|
/// TODO: Return data instead of hashes
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn roots(&mut self) -> (Hash, Hash, Hash) {
|
|
|
|
let output_pmmr: PMMR<OutputStoreable, _> =
|
2018-03-05 22:33:44 +03:00
|
|
|
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
2018-03-04 03:19:54 +03:00
|
|
|
let rproof_pmmr: PMMR<RangeProof, _> =
|
|
|
|
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
|
|
|
let kernel_pmmr: PMMR<TxKernel, _> =
|
|
|
|
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
2017-10-28 00:57:04 +03:00
|
|
|
(output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root())
|
2017-10-24 21:11:58 +03:00
|
|
|
}
|
2018-03-06 20:58:33 +03:00
|
|
|
|
|
|
|
/// Compact the MMR data files and flush the rm logs
|
|
|
|
pub fn compact(&mut self) -> Result<(), Error> {
|
|
|
|
let commit_index = self.commit_index.clone();
|
2018-03-13 21:22:34 +03:00
|
|
|
let head = commit_index.head()?;
|
|
|
|
let current_height = head.height;
|
|
|
|
|
|
|
|
// horizon for compacting is based on current_height
|
|
|
|
let horizon = (current_height as u32).saturating_sub(global::cut_through_horizon());
|
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
let clean_output_index = |commit: &[u8]| {
|
2018-03-13 21:22:34 +03:00
|
|
|
// do we care if this fails?
|
2018-03-06 20:58:33 +03:00
|
|
|
let _ = commit_index.delete_output_pos(commit);
|
|
|
|
};
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
let min_rm = (horizon / 10) as usize;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
self.output_pmmr_h
|
|
|
|
.backend
|
|
|
|
.check_compact(min_rm, horizon, clean_output_index)?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
self.rproof_pmmr_h
|
|
|
|
.backend
|
|
|
|
.check_compact(min_rm, horizon, &prune_noop)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Starts a new unit of work to extend the chain with additional blocks,
|
|
|
|
/// accepting a closure that will work within that unit of work. The closure
|
|
|
|
/// has access to an Extension object that allows the addition of blocks to
|
2018-03-05 22:33:44 +03:00
|
|
|
/// the txhashset and the checking of the current tree roots.
|
2017-09-28 02:46:32 +03:00
|
|
|
///
|
|
|
|
/// If the closure returns an error, modifications are canceled and the unit
|
|
|
|
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn extending<'a, F, T>(trees: &'a mut TxHashSet, inner: F) -> Result<T, Error>
|
2017-10-17 00:23:10 +03:00
|
|
|
where
|
|
|
|
F: FnOnce(&mut Extension) -> Result<T, Error>,
|
2017-09-29 21:44:25 +03:00
|
|
|
{
|
2017-09-28 02:46:32 +03:00
|
|
|
let sizes: (u64, u64, u64);
|
|
|
|
let res: Result<T, Error>;
|
|
|
|
let rollback: bool;
|
|
|
|
{
|
|
|
|
let commit_index = trees.commit_index.clone();
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "Starting new txhashset extension.");
|
2017-09-28 02:46:32 +03:00
|
|
|
let mut extension = Extension::new(trees, commit_index);
|
|
|
|
res = inner(&mut extension);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
rollback = extension.rollback;
|
|
|
|
if res.is_ok() && !rollback {
|
2018-03-09 00:36:51 +03:00
|
|
|
extension.save_indexes()?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
sizes = extension.sizes();
|
|
|
|
}
|
|
|
|
match res {
|
|
|
|
Err(e) => {
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "Error returned, discarding txhashset extension.");
|
|
|
|
trees.output_pmmr_h.backend.discard();
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.discard();
|
|
|
|
trees.kernel_pmmr_h.backend.discard();
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
Ok(r) => {
|
|
|
|
if rollback {
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "Rollbacking txhashset extension.");
|
|
|
|
trees.output_pmmr_h.backend.discard();
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.discard();
|
|
|
|
trees.kernel_pmmr_h.backend.discard();
|
|
|
|
} else {
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "Committing txhashset extension.");
|
|
|
|
trees.output_pmmr_h.backend.sync()?;
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.backend.sync()?;
|
|
|
|
trees.kernel_pmmr_h.backend.sync()?;
|
2018-03-05 22:33:44 +03:00
|
|
|
trees.output_pmmr_h.last_pos = sizes.0;
|
2017-09-28 02:46:32 +03:00
|
|
|
trees.rproof_pmmr_h.last_pos = sizes.1;
|
|
|
|
trees.kernel_pmmr_h.last_pos = sizes.2;
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "TxHashSet extension done.");
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Allows the application of new blocks on top of the sum trees in a
|
|
|
|
/// reversible manner within a unit of work provided by the `extending`
|
|
|
|
/// function.
|
|
|
|
pub struct Extension<'a> {
|
2018-03-05 22:33:44 +03:00
|
|
|
output_pmmr: PMMR<'a, OutputStoreable, PMMRBackend<OutputStoreable>>,
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
|
|
|
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
|
|
|
commit_index: Arc<ChainStore>,
|
|
|
|
new_output_commits: HashMap<Commitment, u64>,
|
2018-03-09 00:36:51 +03:00
|
|
|
new_block_markers: HashMap<Hash, (u64, u64)>,
|
2017-09-29 21:44:25 +03:00
|
|
|
rollback: bool,
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> Extension<'a> {
|
|
|
|
// constructor
|
2018-03-05 22:33:44 +03:00
|
|
|
fn new(trees: &'a mut TxHashSet, commit_index: Arc<ChainStore>) -> Extension<'a> {
|
2017-09-28 02:46:32 +03:00
|
|
|
Extension {
|
2018-03-06 20:58:33 +03:00
|
|
|
output_pmmr: PMMR::at(
|
|
|
|
&mut trees.output_pmmr_h.backend,
|
|
|
|
trees.output_pmmr_h.last_pos,
|
|
|
|
),
|
2017-09-29 21:44:25 +03:00
|
|
|
rproof_pmmr: PMMR::at(
|
|
|
|
&mut trees.rproof_pmmr_h.backend,
|
|
|
|
trees.rproof_pmmr_h.last_pos,
|
|
|
|
),
|
|
|
|
kernel_pmmr: PMMR::at(
|
|
|
|
&mut trees.kernel_pmmr_h.backend,
|
|
|
|
trees.kernel_pmmr_h.last_pos,
|
|
|
|
),
|
2017-09-28 02:46:32 +03:00
|
|
|
commit_index: commit_index,
|
|
|
|
new_output_commits: HashMap::new(),
|
2018-03-09 00:36:51 +03:00
|
|
|
new_block_markers: HashMap::new(),
|
2017-09-28 02:46:32 +03:00
|
|
|
rollback: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Apply a new set of blocks on top the existing sum trees. Blocks are
|
|
|
|
/// applied in order of the provided Vec. If pruning is enabled, inputs also
|
|
|
|
/// prune MMR data.
|
|
|
|
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
2018-01-08 04:23:23 +03:00
|
|
|
// first applying coinbase outputs. due to the construction of PMMRs the
|
|
|
|
// last element, when its a leaf, can never be pruned as it has no parent
|
|
|
|
// yet and it will be needed to calculate that hash. to work around this,
|
|
|
|
// we insert coinbase outputs first to add at least one output of padding
|
|
|
|
for out in &b.outputs {
|
2018-02-05 22:43:54 +03:00
|
|
|
if out.features.contains(OutputFeatures::COINBASE_OUTPUT) {
|
2018-01-08 04:23:23 +03:00
|
|
|
self.apply_output(out)?;
|
|
|
|
}
|
|
|
|
}
|
2018-01-17 06:03:40 +03:00
|
|
|
|
|
|
|
// then doing inputs guarantees an input can't spend an output in the
|
2017-11-29 02:43:02 +03:00
|
|
|
// same block, enforcing block cut-through
|
2017-09-28 02:46:32 +03:00
|
|
|
for input in &b.inputs {
|
2018-01-08 04:23:23 +03:00
|
|
|
self.apply_input(input, b.header.height)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
// now all regular, non coinbase outputs
|
2017-09-28 02:46:32 +03:00
|
|
|
for out in &b.outputs {
|
2018-02-05 22:43:54 +03:00
|
|
|
if !out.features.contains(OutputFeatures::COINBASE_OUTPUT) {
|
2018-01-08 04:23:23 +03:00
|
|
|
self.apply_output(out)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
}
|
2018-01-17 06:03:40 +03:00
|
|
|
|
2018-03-09 00:36:51 +03:00
|
|
|
// then applying all kernels
|
2017-09-28 02:46:32 +03:00
|
|
|
for kernel in &b.kernels {
|
2018-01-08 04:23:23 +03:00
|
|
|
self.apply_kernel(kernel)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2018-03-02 23:47:27 +03:00
|
|
|
|
2018-03-09 00:36:51 +03:00
|
|
|
// finally, recording the PMMR positions after this block for future rewind
|
|
|
|
let last_output_pos = self.output_pmmr.unpruned_size();
|
|
|
|
let last_kernel_pos = self.kernel_pmmr.unpruned_size();
|
|
|
|
self.new_block_markers
|
|
|
|
.insert(b.hash(), (last_output_pos, last_kernel_pos));
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-09 00:36:51 +03:00
|
|
|
fn save_indexes(&self) -> Result<(), Error> {
|
2018-03-02 23:47:27 +03:00
|
|
|
// store all new output pos in the index
|
2017-09-28 02:46:32 +03:00
|
|
|
for (commit, pos) in &self.new_output_commits {
|
|
|
|
self.commit_index.save_output_pos(commit, *pos)?;
|
|
|
|
}
|
2018-03-09 00:36:51 +03:00
|
|
|
for (bh, tag) in &self.new_block_markers {
|
|
|
|
self.commit_index.save_block_marker(bh, tag)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
fn apply_input(&mut self, input: &Input, height: u64) -> Result<(), Error> {
|
|
|
|
let commit = input.commitment();
|
|
|
|
let pos_res = self.get_output_pos(&commit);
|
|
|
|
if let Ok(pos) = pos_res {
|
2018-03-05 18:05:42 +03:00
|
|
|
let output_id_hash = OutputIdentifier::from_input(input).hash_with_index(pos);
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Some((read_hash, read_elem)) = self.output_pmmr.get(pos, true) {
|
2018-02-22 16:45:13 +03:00
|
|
|
// check hash from pmmr matches hash from input (or corresponding output)
|
2018-01-17 06:03:40 +03:00
|
|
|
// if not then the input is not being honest about
|
|
|
|
// what it is attempting to spend...
|
2018-03-13 21:22:34 +03:00
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
if output_id_hash != read_hash
|
2018-03-06 20:58:33 +03:00
|
|
|
|| output_id_hash
|
|
|
|
!= read_elem
|
|
|
|
.expect("no output at position")
|
|
|
|
.hash_with_index(pos)
|
2018-03-04 03:19:54 +03:00
|
|
|
{
|
2018-03-05 22:33:44 +03:00
|
|
|
return Err(Error::TxHashSetErr(format!("output pmmr hash mismatch")));
|
2018-01-17 06:03:40 +03:00
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
// check coinbase maturity with the Merkle Proof on the input
|
2018-02-05 22:43:54 +03:00
|
|
|
if input.features.contains(OutputFeatures::COINBASE_OUTPUT) {
|
2018-03-02 23:47:27 +03:00
|
|
|
let header = self.commit_index.get_block_header(&input.block_hash())?;
|
|
|
|
input.verify_maturity(read_hash, &header, height)?;
|
2018-01-17 06:03:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
// Now prune the output_pmmr, rproof_pmmr and their storage.
|
2018-03-04 03:19:54 +03:00
|
|
|
// Input is not valid if we cannot prune successfully (to spend an unspent
|
|
|
|
// output).
|
2018-03-05 22:33:44 +03:00
|
|
|
match self.output_pmmr.prune(pos, height as u32) {
|
2018-01-08 04:23:23 +03:00
|
|
|
Ok(true) => {
|
|
|
|
self.rproof_pmmr
|
|
|
|
.prune(pos, height as u32)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(|s| Error::TxHashSetErr(s))?;
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
Ok(false) => return Err(Error::AlreadySpent(commit)),
|
2018-03-05 22:33:44 +03:00
|
|
|
Err(s) => return Err(Error::TxHashSetErr(s)),
|
2018-01-08 04:23:23 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return Err(Error::AlreadySpent(commit));
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn apply_output(&mut self, out: &Output) -> Result<(), Error> {
|
|
|
|
let commit = out.commitment();
|
|
|
|
|
|
|
|
if let Ok(pos) = self.get_output_pos(&commit) {
|
|
|
|
// we need to check whether the commitment is in the current MMR view
|
|
|
|
// as well as the index doesn't support rewind and is non-authoritative
|
|
|
|
// (non-historical node will have a much smaller one)
|
|
|
|
// note that this doesn't show the commitment *never* existed, just
|
|
|
|
// that this is not an existing unspent commitment right now
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Some((hash, _)) = self.output_pmmr.get(pos, false) {
|
2018-01-08 04:23:23 +03:00
|
|
|
// processing a new fork so we may get a position on the old
|
|
|
|
// fork that exists but matches a different node
|
|
|
|
// filtering that case out
|
2018-02-22 16:45:13 +03:00
|
|
|
if hash == OutputStoreable::from_output(out).hash() {
|
2018-01-08 04:23:23 +03:00
|
|
|
return Err(Error::DuplicateCommitment(commit));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-22 16:45:13 +03:00
|
|
|
// push new outputs in their MMR and save them in the index
|
2018-03-05 22:33:44 +03:00
|
|
|
let pos = self.output_pmmr
|
2018-02-22 16:45:13 +03:00
|
|
|
.push(OutputStoreable::from_output(out))
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2018-01-08 04:23:23 +03:00
|
|
|
self.new_output_commits.insert(out.commitment(), pos);
|
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
// push range proofs in their MMR and file
|
2018-01-08 04:23:23 +03:00
|
|
|
self.rproof_pmmr
|
2018-02-22 16:45:13 +03:00
|
|
|
.push(out.proof)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2018-01-08 04:23:23 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
2018-02-10 01:32:16 +03:00
|
|
|
// push kernels in their MMR and file
|
2018-03-09 00:36:51 +03:00
|
|
|
self.kernel_pmmr
|
2018-02-22 16:45:13 +03:00
|
|
|
.push(kernel.clone())
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Build a Merkle proof for the given output and the block by
|
|
|
|
/// rewinding the MMR to the last pos of the block.
|
|
|
|
/// Note: this relies on the MMR being stable even after pruning/compaction.
|
|
|
|
/// We need the hash of each sibling pos from the pos up to the peak
|
|
|
|
/// including the sibling leaf node which may have been removed.
|
|
|
|
pub fn merkle_proof_via_rewind(
|
|
|
|
&mut self,
|
|
|
|
output: &OutputIdentifier,
|
2018-03-09 00:36:51 +03:00
|
|
|
block_header: &BlockHeader,
|
2018-03-02 23:47:27 +03:00
|
|
|
) -> Result<MerkleProof, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-03-05 22:33:44 +03:00
|
|
|
"txhashset: merkle_proof_via_rewind: rewinding to block {:?}",
|
2018-03-09 00:36:51 +03:00
|
|
|
block_header.hash()
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-03-09 00:36:51 +03:00
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
// rewind to the specified block
|
2018-03-09 00:36:51 +03:00
|
|
|
self.rewind(block_header)?;
|
2018-03-02 23:47:27 +03:00
|
|
|
// then calculate the Merkle Proof based on the known pos
|
|
|
|
let pos = self.get_output_pos(&output.commit)?;
|
2018-03-05 22:33:44 +03:00
|
|
|
let merkle_proof = self.output_pmmr
|
2018-03-02 23:47:27 +03:00
|
|
|
.merkle_proof(pos)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2018-03-02 23:47:27 +03:00
|
|
|
|
|
|
|
Ok(merkle_proof)
|
|
|
|
}
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Rewinds the MMRs to the provided block, using the last output and
|
2017-09-28 02:46:32 +03:00
|
|
|
/// last kernel of the block we want to rewind to.
|
2018-03-09 00:36:51 +03:00
|
|
|
pub fn rewind(&mut self, block_header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
let hash = block_header.hash();
|
|
|
|
let height = block_header.height;
|
|
|
|
debug!(LOGGER, "Rewind to header {} at {}", hash, height);
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
// rewind each MMR
|
2018-03-09 00:36:51 +03:00
|
|
|
let (out_pos_rew, kern_pos_rew) = self.commit_index.get_block_marker(&hash)?;
|
|
|
|
self.rewind_pos(height, out_pos_rew, kern_pos_rew)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Rewinds the MMRs to the provided positions, given the output and
|
|
|
|
/// kernel we want to rewind to.
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn rewind_pos(
|
|
|
|
&mut self,
|
|
|
|
height: u64,
|
|
|
|
out_pos_rew: u64,
|
|
|
|
kern_pos_rew: u64,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-03-05 22:33:44 +03:00
|
|
|
"Rewind txhashset to output pos: {}, kernel pos: {}", out_pos_rew, kern_pos_rew,
|
2017-11-15 23:37:40 +03:00
|
|
|
);
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr
|
2017-09-29 21:44:25 +03:00
|
|
|
.rewind(out_pos_rew, height as u32)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2017-09-29 21:44:25 +03:00
|
|
|
self.rproof_pmmr
|
|
|
|
.rewind(out_pos_rew, height as u32)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2017-09-29 21:44:25 +03:00
|
|
|
self.kernel_pmmr
|
|
|
|
.rewind(kern_pos_rew, height as u32)
|
2018-03-05 22:33:44 +03:00
|
|
|
.map_err(&Error::TxHashSetErr)?;
|
2017-11-15 23:37:40 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-12-25 03:27:13 +03:00
|
|
|
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, grin_store::Error> {
|
|
|
|
if let Some(pos) = self.new_output_commits.get(commit) {
|
|
|
|
Ok(*pos)
|
|
|
|
} else {
|
|
|
|
self.commit_index.get_output_pos(commit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Current root hashes and sums (if applicable) for the Output, range proof
|
2017-09-28 02:46:32 +03:00
|
|
|
/// and kernel sum trees.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn roots(&self) -> TxHashSetRoots {
|
|
|
|
TxHashSetRoots {
|
|
|
|
output_root: self.output_pmmr.root(),
|
2018-02-22 16:45:13 +03:00
|
|
|
rproof_root: self.rproof_pmmr.root(),
|
|
|
|
kernel_root: self.kernel_pmmr.root(),
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Validate the current txhashset state against a block header
|
2018-02-10 01:32:16 +03:00
|
|
|
pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> {
|
|
|
|
// validate all hashes and sums within the trees
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Err(e) = self.output_pmmr.validate() {
|
|
|
|
return Err(Error::InvalidTxHashSet(e));
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
if let Err(e) = self.rproof_pmmr.validate() {
|
2018-03-05 22:33:44 +03:00
|
|
|
return Err(Error::InvalidTxHashSet(e));
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
if let Err(e) = self.kernel_pmmr.validate() {
|
2018-03-05 22:33:44 +03:00
|
|
|
return Err(Error::InvalidTxHashSet(e));
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// validate the tree roots against the block header
|
2018-02-22 16:45:13 +03:00
|
|
|
let roots = self.roots();
|
2018-03-05 22:33:44 +03:00
|
|
|
if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root
|
2018-02-22 16:45:13 +03:00
|
|
|
|| roots.kernel_root != header.kernel_root
|
2018-02-10 01:32:16 +03:00
|
|
|
{
|
|
|
|
return Err(Error::InvalidRoot);
|
|
|
|
}
|
|
|
|
|
|
|
|
// the real magicking: the sum of all kernel excess should equal the sum
|
2018-03-05 22:33:44 +03:00
|
|
|
// of all Output commitments, minus the total supply
|
2018-03-13 21:22:34 +03:00
|
|
|
let kernel_offset = self.sum_kernel_offsets(&header)?;
|
|
|
|
let kernel_sum = self.sum_kernels(kernel_offset)?;
|
2018-03-05 22:33:44 +03:00
|
|
|
let output_sum = self.sum_outputs()?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// supply is the sum of the coinbase outputs from all the block headers
|
|
|
|
let supply = header.height * REWARD;
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
{
|
|
|
|
let secp = static_secp_instance();
|
|
|
|
let secp = secp.lock().unwrap();
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
let over_commit = secp.commit_value(supply)?;
|
|
|
|
let adjusted_sum_output = secp.commit_sum(vec![output_sum], vec![over_commit])?;
|
2018-03-05 22:33:44 +03:00
|
|
|
if adjusted_sum_output != kernel_sum {
|
|
|
|
return Err(Error::InvalidTxHashSet(
|
|
|
|
"Differing Output commitment and kernel excess sums.".to_owned(),
|
2018-03-04 03:19:54 +03:00
|
|
|
));
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Rebuild the index of MMR positions to the corresponding Output and kernel
|
2018-02-10 01:32:16 +03:00
|
|
|
/// by iterating over the whole MMR data. This is a costly operation
|
|
|
|
/// performed only when we receive a full new chain state.
|
|
|
|
pub fn rebuild_index(&self) -> Result<(), Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
2018-02-10 01:32:16 +03:00
|
|
|
// non-pruned leaves only
|
|
|
|
if pmmr::bintree_postorder_height(n) == 0 {
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Some((_, out)) = self.output_pmmr.get(n, true) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.commit_index
|
|
|
|
.save_output_pos(&out.expect("not a leaf node").commit, n)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
/// Force the rollback of this extension, no matter the result
|
|
|
|
pub fn force_rollback(&mut self) {
|
|
|
|
self.rollback = true;
|
|
|
|
}
|
|
|
|
|
2018-03-13 21:22:34 +03:00
|
|
|
/// Dumps the output MMR.
|
|
|
|
/// We use this after compacting for visual confirmation that it worked.
|
|
|
|
pub fn dump_output_pmmr(&self) {
|
|
|
|
debug!(LOGGER, "-- outputs --");
|
|
|
|
self.output_pmmr.dump_from_file(false);
|
|
|
|
debug!(LOGGER, "-- end of outputs --");
|
|
|
|
}
|
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
2018-03-05 22:33:44 +03:00
|
|
|
/// version only prints the Output tree.
|
2017-10-22 10:11:45 +03:00
|
|
|
pub fn dump(&self, short: bool) {
|
|
|
|
debug!(LOGGER, "-- outputs --");
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr.dump(short);
|
2017-10-28 00:57:04 +03:00
|
|
|
if !short {
|
|
|
|
debug!(LOGGER, "-- range proofs --");
|
|
|
|
self.rproof_pmmr.dump(short);
|
|
|
|
debug!(LOGGER, "-- kernels --");
|
|
|
|
self.kernel_pmmr.dump(short);
|
|
|
|
}
|
2017-10-12 22:23:58 +03:00
|
|
|
}
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// Sizes of the sum trees, used by `extending` on rollback.
|
|
|
|
fn sizes(&self) -> (u64, u64, u64) {
|
2017-10-17 00:23:10 +03:00
|
|
|
(
|
2018-03-05 22:33:44 +03:00
|
|
|
self.output_pmmr.unpruned_size(),
|
2017-10-17 00:23:10 +03:00
|
|
|
self.rproof_pmmr.unpruned_size(),
|
|
|
|
self.kernel_pmmr.unpruned_size(),
|
|
|
|
)
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-14 18:22:09 +03:00
|
|
|
// We maintain the total accumulated kernel offset in each block header.
|
|
|
|
// So "summing" is just a case of taking the total kernel offset
|
|
|
|
// directly from the current block header.
|
2018-03-13 21:22:34 +03:00
|
|
|
fn sum_kernel_offsets(&self, header: &BlockHeader) -> Result<Option<Commitment>, Error> {
|
2018-03-14 18:22:09 +03:00
|
|
|
let offset = if header.total_kernel_offset == BlindingFactor::zero() {
|
2018-03-13 21:22:34 +03:00
|
|
|
None
|
|
|
|
} else {
|
2018-03-14 18:22:09 +03:00
|
|
|
let secp = static_secp_instance();
|
|
|
|
let secp = secp.lock().unwrap();
|
|
|
|
let skey = header.total_kernel_offset.secret_key(&secp)?;
|
2018-03-13 21:22:34 +03:00
|
|
|
Some(secp.commit(0, skey)?)
|
|
|
|
};
|
|
|
|
Ok(offset)
|
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
/// Sums the excess of all our kernels, validating their signatures on the
|
|
|
|
/// way
|
2018-03-13 21:22:34 +03:00
|
|
|
fn sum_kernels(&self, kernel_offset: Option<Commitment>) -> Result<Commitment, Error> {
|
2018-02-10 01:32:16 +03:00
|
|
|
// make sure we have the right count of kernels using the MMR, the storage
|
|
|
|
// file may have a few more
|
|
|
|
let mmr_sz = self.kernel_pmmr.unpruned_size();
|
2018-02-20 02:20:32 +03:00
|
|
|
let count = pmmr::n_leaves(mmr_sz);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
let mut kernel_file = File::open(self.kernel_pmmr.data_file_path())?;
|
2018-02-10 01:32:16 +03:00
|
|
|
let first: TxKernel = ser::deserialize(&mut kernel_file)?;
|
|
|
|
first.verify()?;
|
|
|
|
let mut sum_kernel = first.excess;
|
|
|
|
|
|
|
|
let secp = static_secp_instance();
|
|
|
|
let mut kern_count = 1;
|
|
|
|
loop {
|
|
|
|
match ser::deserialize::<TxKernel>(&mut kernel_file) {
|
|
|
|
Ok(kernel) => {
|
|
|
|
kernel.verify()?;
|
|
|
|
let secp = secp.lock().unwrap();
|
|
|
|
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel.excess], vec![])?;
|
|
|
|
kern_count += 1;
|
|
|
|
if kern_count == count {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(_) => break,
|
|
|
|
}
|
|
|
|
}
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// now apply the kernel offset of we have one
|
|
|
|
{
|
|
|
|
let secp = secp.lock().unwrap();
|
|
|
|
if let Some(kernel_offset) = kernel_offset {
|
|
|
|
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel_offset], vec![])?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Validated, summed (and offset) {} kernels", kern_count
|
|
|
|
);
|
|
|
|
Ok(sum_kernel)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Sums all our Output commitments, checking range proofs at the same time
|
|
|
|
fn sum_outputs(&self) -> Result<Commitment, Error> {
|
|
|
|
let mut sum_output = None;
|
|
|
|
let mut output_count = 0;
|
2018-02-10 01:32:16 +03:00
|
|
|
let secp = static_secp_instance();
|
2018-03-05 22:33:44 +03:00
|
|
|
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
2018-03-13 21:22:34 +03:00
|
|
|
if pmmr::is_leaf(n) {
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Some((_, output)) = self.output_pmmr.get(n, true) {
|
2018-02-25 02:39:26 +03:00
|
|
|
let out = output.expect("not a leaf node");
|
|
|
|
let commit = out.commit.clone();
|
|
|
|
match self.rproof_pmmr.get(n, true) {
|
|
|
|
Some((_, Some(rp))) => out.to_output(rp).verify_proof()?,
|
2018-02-28 00:11:55 +03:00
|
|
|
_res => {
|
2018-02-25 02:39:26 +03:00
|
|
|
return Err(Error::OutputNotFound);
|
|
|
|
}
|
|
|
|
}
|
2018-03-05 22:33:44 +03:00
|
|
|
if let None = sum_output {
|
|
|
|
sum_output = Some(commit);
|
2018-02-10 01:32:16 +03:00
|
|
|
} else {
|
|
|
|
let secp = secp.lock().unwrap();
|
2018-03-06 20:58:33 +03:00
|
|
|
sum_output =
|
|
|
|
Some(secp.commit_sum(vec![sum_output.unwrap(), commit], vec![])?);
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-03-05 22:33:44 +03:00
|
|
|
output_count += 1;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-05 22:33:44 +03:00
|
|
|
debug!(LOGGER, "Summed {} Outputs", output_count);
|
|
|
|
Ok(sum_output.unwrap())
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Packages the txhashset data files into a zip and returns a Read to the
|
2018-02-10 01:32:16 +03:00
|
|
|
/// resulting file
|
|
|
|
pub fn zip_read(root_dir: String) -> Result<File, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
|
|
|
let zip_path = Path::new(&root_dir).join(TXHASHSET_ZIP);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
|
|
|
// create the zip archive
|
|
|
|
{
|
2018-03-05 22:33:44 +03:00
|
|
|
zip::compress(&txhashset_path, &File::create(zip_path.clone())?)
|
2018-02-10 01:32:16 +03:00
|
|
|
.map_err(|ze| Error::Other(ze.to_string()))?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// open it again to read it back
|
|
|
|
let zip_file = File::open(zip_path)?;
|
|
|
|
Ok(zip_file)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Extract the txhashset data from a zip file and writes the content into the
|
|
|
|
/// txhashset storage dir
|
|
|
|
pub fn zip_write(root_dir: String, txhashset_data: File) -> Result<(), Error> {
|
|
|
|
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
fs::create_dir_all(txhashset_path.clone())?;
|
|
|
|
zip::decompress(txhashset_data, &txhashset_path).map_err(|ze| Error::Other(ze.to_string()))
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|