mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Block input bitmap rework (#3236)
* first pass at rewind_single_block and reworking rewind to simply iterate over blocks, rewinding each incrementally * commit * commit * cleanup * add test coverage for output_pos index transactional semantics during rewind * commit * do not store commitments in spent_index just use the order of the inputs in the block * compare key with commitment when cleaning output_pos index * remove unused OutputPos struct
This commit is contained in:
parent
ef853ae469
commit
cb2b909090
7 changed files with 385 additions and 190 deletions
|
@ -56,7 +56,7 @@ pub fn get_output(
|
||||||
match res {
|
match res {
|
||||||
Ok(output_pos) => {
|
Ok(output_pos) => {
|
||||||
return Ok((
|
return Ok((
|
||||||
Output::new(&commit, output_pos.height, output_pos.position),
|
Output::new(&commit, output_pos.height, output_pos.pos),
|
||||||
x.clone(),
|
x.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ pub fn get_output_v2(
|
||||||
for x in outputs.iter() {
|
for x in outputs.iter() {
|
||||||
let res = chain.is_unspent(x);
|
let res = chain.is_unspent(x);
|
||||||
match res {
|
match res {
|
||||||
Ok(output_pos) => match chain.get_unspent_output_at(output_pos.position) {
|
Ok(output_pos) => match chain.get_unspent_output_at(output_pos.pos) {
|
||||||
Ok(output) => {
|
Ok(output) => {
|
||||||
let header = if include_merkle_proof && output.is_coinbase() {
|
let header = if include_merkle_proof && output.is_coinbase() {
|
||||||
chain.get_header_by_height(output_pos.height).ok()
|
chain.get_header_by_height(output_pos.height).ok()
|
||||||
|
|
|
@ -30,7 +30,7 @@ use crate::store;
|
||||||
use crate::txhashset;
|
use crate::txhashset;
|
||||||
use crate::txhashset::{PMMRHandle, TxHashSet};
|
use crate::txhashset::{PMMRHandle, TxHashSet};
|
||||||
use crate::types::{
|
use crate::types::{
|
||||||
BlockStatus, ChainAdapter, NoStatus, Options, OutputMMRPosition, Tip, TxHashsetWriteStatus,
|
BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus,
|
||||||
};
|
};
|
||||||
use crate::util::secp::pedersen::{Commitment, RangeProof};
|
use crate::util::secp::pedersen::{Commitment, RangeProof};
|
||||||
use crate::util::RwLock;
|
use crate::util::RwLock;
|
||||||
|
@ -199,6 +199,15 @@ impl Chain {
|
||||||
&mut txhashset,
|
&mut txhashset,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
// Initialize the output_pos index based on UTXO set.
|
||||||
|
// This is fast as we only look for stale and missing entries
|
||||||
|
// and do not need to rebuild the entire index.
|
||||||
|
{
|
||||||
|
let batch = store.batch()?;
|
||||||
|
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||||
|
batch.commit()?;
|
||||||
|
}
|
||||||
|
|
||||||
let chain = Chain {
|
let chain = Chain {
|
||||||
db_root,
|
db_root,
|
||||||
store,
|
store,
|
||||||
|
@ -495,9 +504,8 @@ impl Chain {
|
||||||
/// spent. This querying is done in a way that is consistent with the
|
/// spent. This querying is done in a way that is consistent with the
|
||||||
/// current chain state, specifically the current winning (valid, most
|
/// current chain state, specifically the current winning (valid, most
|
||||||
/// work) fork.
|
/// work) fork.
|
||||||
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<OutputMMRPosition, Error> {
|
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<CommitPos, Error> {
|
||||||
let txhashset = self.txhashset.read();
|
self.txhashset.read().is_unspent(output_ref)
|
||||||
txhashset.is_unspent(output_ref)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves an unspent output using its PMMR position
|
/// Retrieves an unspent output using its PMMR position
|
||||||
|
@ -973,7 +981,7 @@ impl Chain {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rebuild our output_pos index in the db based on fresh UTXO set.
|
// Rebuild our output_pos index in the db based on fresh UTXO set.
|
||||||
txhashset.init_output_pos_index(&header_pmmr, &mut batch)?;
|
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||||
|
|
||||||
// Commit all the changes to the db.
|
// Commit all the changes to the db.
|
||||||
batch.commit()?;
|
batch.commit()?;
|
||||||
|
@ -1015,7 +1023,7 @@ impl Chain {
|
||||||
fn remove_historical_blocks(
|
fn remove_historical_blocks(
|
||||||
&self,
|
&self,
|
||||||
header_pmmr: &txhashset::PMMRHandle<BlockHeader>,
|
header_pmmr: &txhashset::PMMRHandle<BlockHeader>,
|
||||||
batch: &mut store::Batch<'_>,
|
batch: &store::Batch<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if self.archive_mode {
|
if self.archive_mode {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -1089,7 +1097,7 @@ impl Chain {
|
||||||
// Take a write lock on the txhashet and start a new writeable db batch.
|
// Take a write lock on the txhashet and start a new writeable db batch.
|
||||||
let header_pmmr = self.header_pmmr.read();
|
let header_pmmr = self.header_pmmr.read();
|
||||||
let mut txhashset = self.txhashset.write();
|
let mut txhashset = self.txhashset.write();
|
||||||
let mut batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
|
|
||||||
// Compact the txhashset itself (rewriting the pruned backend files).
|
// Compact the txhashset itself (rewriting the pruned backend files).
|
||||||
{
|
{
|
||||||
|
@ -1100,14 +1108,17 @@ impl Chain {
|
||||||
let horizon_hash = header_pmmr.get_header_hash_by_height(horizon_height)?;
|
let horizon_hash = header_pmmr.get_header_hash_by_height(horizon_height)?;
|
||||||
let horizon_header = batch.get_block_header(&horizon_hash)?;
|
let horizon_header = batch.get_block_header(&horizon_hash)?;
|
||||||
|
|
||||||
txhashset.compact(&horizon_header, &mut batch)?;
|
txhashset.compact(&horizon_header, &batch)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are not in archival mode remove historical blocks from the db.
|
// If we are not in archival mode remove historical blocks from the db.
|
||||||
if !self.archive_mode {
|
if !self.archive_mode {
|
||||||
self.remove_historical_blocks(&header_pmmr, &mut batch)?;
|
self.remove_historical_blocks(&header_pmmr, &batch)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure our output_pos index is consistent with the UTXO set.
|
||||||
|
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||||
|
|
||||||
// Commit all the above db changes.
|
// Commit all the above db changes.
|
||||||
batch.commit()?;
|
batch.commit()?;
|
||||||
|
|
||||||
|
@ -1510,6 +1521,7 @@ fn setup_head(
|
||||||
// We will update this later once we have the correct header_root.
|
// We will update this later once we have the correct header_root.
|
||||||
batch.save_block_header(&genesis.header)?;
|
batch.save_block_header(&genesis.header)?;
|
||||||
batch.save_block(&genesis)?;
|
batch.save_block(&genesis)?;
|
||||||
|
batch.save_spent_index(&genesis.hash(), &vec![])?;
|
||||||
batch.save_body_head(&Tip::from_header(&genesis.header))?;
|
batch.save_body_head(&Tip::from_header(&genesis.header))?;
|
||||||
|
|
||||||
if !genesis.kernels().is_empty() {
|
if !genesis.kernels().is_empty() {
|
||||||
|
|
|
@ -23,7 +23,7 @@ use crate::core::pow;
|
||||||
use crate::error::{Error, ErrorKind};
|
use crate::error::{Error, ErrorKind};
|
||||||
use crate::store;
|
use crate::store;
|
||||||
use crate::txhashset;
|
use crate::txhashset;
|
||||||
use crate::types::{Options, Tip};
|
use crate::types::{CommitPos, Options, Tip};
|
||||||
use crate::util::RwLock;
|
use crate::util::RwLock;
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -121,7 +121,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
||||||
let ref mut header_pmmr = &mut ctx.header_pmmr;
|
let ref mut header_pmmr = &mut ctx.header_pmmr;
|
||||||
let ref mut txhashset = &mut ctx.txhashset;
|
let ref mut txhashset = &mut ctx.txhashset;
|
||||||
let ref mut batch = &mut ctx.batch;
|
let ref mut batch = &mut ctx.batch;
|
||||||
let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
|
let (block_sums, spent) = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
|
||||||
rewind_and_apply_fork(&prev, ext, batch)?;
|
rewind_and_apply_fork(&prev, ext, batch)?;
|
||||||
|
|
||||||
// Check any coinbase being spent have matured sufficiently.
|
// Check any coinbase being spent have matured sufficiently.
|
||||||
|
@ -143,22 +143,24 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
||||||
// Apply the block to the txhashset state.
|
// Apply the block to the txhashset state.
|
||||||
// Validate the txhashset roots and sizes against the block header.
|
// Validate the txhashset roots and sizes against the block header.
|
||||||
// Block is invalid if there are any discrepencies.
|
// Block is invalid if there are any discrepencies.
|
||||||
apply_block_to_txhashset(b, ext, batch)?;
|
let spent = apply_block_to_txhashset(b, ext, batch)?;
|
||||||
|
|
||||||
// If applying this block does not increase the work on the chain then
|
// If applying this block does not increase the work on the chain then
|
||||||
// we know we have not yet updated the chain to produce a new chain head.
|
// we know we have not yet updated the chain to produce a new chain head.
|
||||||
|
// We discard the "child" batch used in this extension (original ctx batch still active).
|
||||||
|
// We discard any MMR modifications applied in this extension.
|
||||||
let head = batch.head()?;
|
let head = batch.head()?;
|
||||||
if !has_more_work(&b.header, &head) {
|
if !has_more_work(&b.header, &head) {
|
||||||
ext.extension.force_rollback();
|
ext.extension.force_rollback();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(block_sums)
|
Ok((block_sums, spent))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Add the validated block to the db along with the corresponding block_sums.
|
// Add the validated block to the db along with the corresponding block_sums.
|
||||||
// We do this even if we have not increased the total cumulative work
|
// We do this even if we have not increased the total cumulative work
|
||||||
// so we can maintain multiple (in progress) forks.
|
// so we can maintain multiple (in progress) forks.
|
||||||
add_block(b, &block_sums, &ctx.batch)?;
|
add_block(b, &block_sums, &spent, &ctx.batch)?;
|
||||||
|
|
||||||
// If we have no "tail" then set it now.
|
// If we have no "tail" then set it now.
|
||||||
if ctx.batch.tail().is_err() {
|
if ctx.batch.tail().is_err() {
|
||||||
|
@ -429,20 +431,25 @@ fn apply_block_to_txhashset(
|
||||||
block: &Block,
|
block: &Block,
|
||||||
ext: &mut txhashset::ExtensionPair<'_>,
|
ext: &mut txhashset::ExtensionPair<'_>,
|
||||||
batch: &store::Batch<'_>,
|
batch: &store::Batch<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<Vec<CommitPos>, Error> {
|
||||||
ext.extension.apply_block(block, batch)?;
|
let spent = ext.extension.apply_block(block, batch)?;
|
||||||
ext.extension.validate_roots(&block.header)?;
|
ext.extension.validate_roots(&block.header)?;
|
||||||
ext.extension.validate_sizes(&block.header)?;
|
ext.extension.validate_sizes(&block.header)?;
|
||||||
Ok(())
|
Ok(spent)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Officially adds the block to our chain.
|
/// Officially adds the block to our chain (possibly on a losing fork).
|
||||||
|
/// Adds the associated block_sums and spent_index as well.
|
||||||
/// Header must be added separately (assume this has been done previously).
|
/// Header must be added separately (assume this has been done previously).
|
||||||
fn add_block(b: &Block, block_sums: &BlockSums, batch: &store::Batch<'_>) -> Result<(), Error> {
|
fn add_block(
|
||||||
batch
|
b: &Block,
|
||||||
.save_block(b)
|
block_sums: &BlockSums,
|
||||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
|
spent: &Vec<CommitPos>,
|
||||||
|
batch: &store::Batch<'_>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
batch.save_block(b)?;
|
||||||
batch.save_block_sums(&b.hash(), block_sums)?;
|
batch.save_block_sums(&b.hash(), block_sums)?;
|
||||||
|
batch.save_spent_index(&b.hash(), spent)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,11 +19,12 @@ use crate::core::core::hash::{Hash, Hashed};
|
||||||
use crate::core::core::{Block, BlockHeader, BlockSums};
|
use crate::core::core::{Block, BlockHeader, BlockSums};
|
||||||
use crate::core::pow::Difficulty;
|
use crate::core::pow::Difficulty;
|
||||||
use crate::core::ser::ProtocolVersion;
|
use crate::core::ser::ProtocolVersion;
|
||||||
use crate::types::Tip;
|
use crate::types::{CommitPos, Tip};
|
||||||
use crate::util::secp::pedersen::Commitment;
|
use crate::util::secp::pedersen::Commitment;
|
||||||
use croaring::Bitmap;
|
use croaring::Bitmap;
|
||||||
use grin_store as store;
|
use grin_store as store;
|
||||||
use grin_store::{option_to_not_found, to_key, Error, SerIterator};
|
use grin_store::{option_to_not_found, to_key, Error, SerIterator};
|
||||||
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
const STORE_SUBPATH: &str = "chain";
|
const STORE_SUBPATH: &str = "chain";
|
||||||
|
@ -35,6 +36,7 @@ const TAIL_PREFIX: u8 = b'T';
|
||||||
const OUTPUT_POS_PREFIX: u8 = b'p';
|
const OUTPUT_POS_PREFIX: u8 = b'p';
|
||||||
const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B';
|
const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B';
|
||||||
const BLOCK_SUMS_PREFIX: u8 = b'M';
|
const BLOCK_SUMS_PREFIX: u8 = b'M';
|
||||||
|
const BLOCK_SPENT_PREFIX: u8 = b'S';
|
||||||
|
|
||||||
/// All chain-related database operations
|
/// All chain-related database operations
|
||||||
pub struct ChainStore {
|
pub struct ChainStore {
|
||||||
|
@ -178,16 +180,19 @@ impl<'a> Batch<'a> {
|
||||||
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec()))
|
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save the block and the associated input bitmap.
|
/// Save the block to the db.
|
||||||
/// Note: the block header is not saved to the db here, assumes this has already been done.
|
/// Note: the block header is not saved to the db here, assumes this has already been done.
|
||||||
pub fn save_block(&self, b: &Block) -> Result<(), Error> {
|
pub fn save_block(&self, b: &Block) -> Result<(), Error> {
|
||||||
// Build the "input bitmap" for this new block and store it in the db.
|
|
||||||
self.build_and_store_block_input_bitmap(&b)?;
|
|
||||||
|
|
||||||
// Save the block itself to the db.
|
|
||||||
self.db
|
self.db
|
||||||
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?;
|
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We maintain a "spent" index for each full block to allow the output_pos
|
||||||
|
/// to be easily reverted during rewind.
|
||||||
|
pub fn save_spent_index(&self, h: &Hash, spent: &Vec<CommitPos>) -> Result<(), Error> {
|
||||||
|
self.db
|
||||||
|
.put_ser(&to_key(BLOCK_SPENT_PREFIX, &mut h.to_vec())[..], spent)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,7 +222,7 @@ impl<'a> Batch<'a> {
|
||||||
// Not an error if these fail.
|
// Not an error if these fail.
|
||||||
{
|
{
|
||||||
let _ = self.delete_block_sums(bh);
|
let _ = self.delete_block_sums(bh);
|
||||||
let _ = self.delete_block_input_bitmap(bh);
|
let _ = self.delete_spent_index(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -247,6 +252,20 @@ impl<'a> Batch<'a> {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Delete the output_pos index entry for a spent output.
|
||||||
|
pub fn delete_output_pos_height(&self, commit: &Commitment) -> Result<(), Error> {
|
||||||
|
self.db
|
||||||
|
.delete(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When using the output_pos iterator we have access to the index keys but not the
|
||||||
|
/// original commitment that the key is constructed from. So we need a way of comparing
|
||||||
|
/// a key with another commitment without reconstructing the commitment from the key bytes.
|
||||||
|
pub fn is_match_output_pos_key(&self, key: &[u8], commit: &Commitment) -> bool {
|
||||||
|
let commit_key = to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec());
|
||||||
|
commit_key == key
|
||||||
|
}
|
||||||
|
|
||||||
/// Iterator over the output_pos index.
|
/// Iterator over the output_pos index.
|
||||||
pub fn output_pos_iter(&self) -> Result<SerIterator<(u64, u64)>, Error> {
|
pub fn output_pos_iter(&self) -> Result<SerIterator<(u64, u64)>, Error> {
|
||||||
let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes());
|
let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes());
|
||||||
|
@ -281,18 +300,15 @@ impl<'a> Batch<'a> {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save the input bitmap for the block.
|
/// Delete the block spent index.
|
||||||
fn save_block_input_bitmap(&self, bh: &Hash, bm: &Bitmap) -> Result<(), Error> {
|
fn delete_spent_index(&self, bh: &Hash) -> Result<(), Error> {
|
||||||
self.db.put(
|
// Clean up the legacy input bitmap as well.
|
||||||
&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec())[..],
|
let _ = self
|
||||||
&bm.serialize(),
|
.db
|
||||||
)
|
.delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()));
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete the block input bitmap.
|
|
||||||
fn delete_block_input_bitmap(&self, bh: &Hash) -> Result<(), Error> {
|
|
||||||
self.db
|
self.db
|
||||||
.delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
|
.delete(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save block_sums for the block.
|
/// Save block_sums for the block.
|
||||||
|
@ -314,47 +330,41 @@ impl<'a> Batch<'a> {
|
||||||
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
|
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build the input bitmap for the given block.
|
/// Get the block input bitmap based on our spent index.
|
||||||
fn build_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
|
/// Fallback to legacy block input bitmap from the db.
|
||||||
let bitmap = block
|
|
||||||
.inputs()
|
|
||||||
.iter()
|
|
||||||
.filter_map(|x| self.get_output_pos(&x.commitment()).ok())
|
|
||||||
.map(|x| x as u32)
|
|
||||||
.collect();
|
|
||||||
Ok(bitmap)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build and store the input bitmap for the given block.
|
|
||||||
fn build_and_store_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
|
|
||||||
// Build the bitmap.
|
|
||||||
let bitmap = self.build_block_input_bitmap(block)?;
|
|
||||||
|
|
||||||
// Save the bitmap to the db (via the batch).
|
|
||||||
self.save_block_input_bitmap(&block.hash(), &bitmap)?;
|
|
||||||
|
|
||||||
Ok(bitmap)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the block input bitmap from the db or build the bitmap from
|
|
||||||
/// the full block from the db (if the block is found).
|
|
||||||
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
|
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
|
||||||
|
if let Ok(spent) = self.get_spent_index(bh) {
|
||||||
|
let bitmap = spent
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| x.pos.try_into().unwrap())
|
||||||
|
.collect();
|
||||||
|
Ok(bitmap)
|
||||||
|
} else {
|
||||||
|
self.get_legacy_input_bitmap(bh)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_legacy_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
|
||||||
if let Ok(Some(bytes)) = self
|
if let Ok(Some(bytes)) = self
|
||||||
.db
|
.db
|
||||||
.get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
|
.get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
|
||||||
{
|
{
|
||||||
Ok(Bitmap::deserialize(&bytes))
|
Ok(Bitmap::deserialize(&bytes))
|
||||||
} else {
|
} else {
|
||||||
match self.get_block(bh) {
|
Err(Error::NotFoundErr("legacy block input bitmap".to_string()).into())
|
||||||
Ok(block) => {
|
|
||||||
let bitmap = self.build_and_store_block_input_bitmap(&block)?;
|
|
||||||
Ok(bitmap)
|
|
||||||
}
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the "spent index" from the db for the specified block.
|
||||||
|
/// If we need to rewind a block then we use this to "unspend" the spent outputs.
|
||||||
|
pub fn get_spent_index(&self, bh: &Hash) -> Result<Vec<CommitPos>, Error> {
|
||||||
|
option_to_not_found(
|
||||||
|
self.db
|
||||||
|
.get_ser(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())),
|
||||||
|
|| format!("spent index: {}", bh),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Commits this batch. If it's a child batch, it will be merged with the
|
/// Commits this batch. If it's a child batch, it will be merged with the
|
||||||
/// parent, otherwise the batch is written to db.
|
/// parent, otherwise the batch is written to db.
|
||||||
pub fn commit(self) -> Result<(), Error> {
|
pub fn commit(self) -> Result<(), Error> {
|
||||||
|
|
|
@ -25,7 +25,7 @@ use crate::error::{Error, ErrorKind};
|
||||||
use crate::store::{Batch, ChainStore};
|
use crate::store::{Batch, ChainStore};
|
||||||
use crate::txhashset::bitmap_accumulator::BitmapAccumulator;
|
use crate::txhashset::bitmap_accumulator::BitmapAccumulator;
|
||||||
use crate::txhashset::{RewindableKernelView, UTXOView};
|
use crate::txhashset::{RewindableKernelView, UTXOView};
|
||||||
use crate::types::{OutputMMRPosition, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
||||||
use crate::util::secp::pedersen::{Commitment, RangeProof};
|
use crate::util::secp::pedersen::{Commitment, RangeProof};
|
||||||
use crate::util::{file, secp_static, zip};
|
use crate::util::{file, secp_static, zip};
|
||||||
use croaring::Bitmap;
|
use croaring::Bitmap;
|
||||||
|
@ -223,18 +223,15 @@ impl TxHashSet {
|
||||||
/// Check if an output is unspent.
|
/// Check if an output is unspent.
|
||||||
/// We look in the index to find the output MMR pos.
|
/// We look in the index to find the output MMR pos.
|
||||||
/// Then we check the entry in the output MMR and confirm the hash matches.
|
/// Then we check the entry in the output MMR and confirm the hash matches.
|
||||||
pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result<OutputMMRPosition, Error> {
|
pub fn is_unspent(&self, output_id: &OutputIdentifier) -> Result<CommitPos, Error> {
|
||||||
match self.commit_index.get_output_pos_height(&output_id.commit) {
|
let commit = output_id.commit;
|
||||||
Ok((pos, block_height)) => {
|
match self.commit_index.get_output_pos_height(&commit) {
|
||||||
|
Ok((pos, height)) => {
|
||||||
let output_pmmr: ReadonlyPMMR<'_, Output, _> =
|
let output_pmmr: ReadonlyPMMR<'_, Output, _> =
|
||||||
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
||||||
if let Some(hash) = output_pmmr.get_hash(pos) {
|
if let Some(hash) = output_pmmr.get_hash(pos) {
|
||||||
if hash == output_id.hash_with_index(pos - 1) {
|
if hash == output_id.hash_with_index(pos - 1) {
|
||||||
Ok(OutputMMRPosition {
|
Ok(CommitPos { pos, height })
|
||||||
output_mmr_hash: hash,
|
|
||||||
position: pos,
|
|
||||||
height: block_height,
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into())
|
Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into())
|
||||||
}
|
}
|
||||||
|
@ -361,11 +358,12 @@ impl TxHashSet {
|
||||||
pub fn compact(
|
pub fn compact(
|
||||||
&mut self,
|
&mut self,
|
||||||
horizon_header: &BlockHeader,
|
horizon_header: &BlockHeader,
|
||||||
batch: &mut Batch<'_>,
|
batch: &Batch<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!("txhashset: starting compaction...");
|
debug!("txhashset: starting compaction...");
|
||||||
|
|
||||||
let head_header = batch.head_header()?;
|
let head_header = batch.head_header()?;
|
||||||
|
|
||||||
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, batch)?;
|
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, batch)?;
|
||||||
|
|
||||||
debug!("txhashset: check_compact output mmr backend...");
|
debug!("txhashset: check_compact output mmr backend...");
|
||||||
|
@ -378,35 +376,66 @@ impl TxHashSet {
|
||||||
.backend
|
.backend
|
||||||
.check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?;
|
.check_compact(horizon_header.output_mmr_size, &rewind_rm_pos)?;
|
||||||
|
|
||||||
debug!("txhashset: compact height pos index...");
|
|
||||||
self.compact_height_pos_index(batch)?;
|
|
||||||
|
|
||||||
debug!("txhashset: ... compaction finished");
|
debug!("txhashset: ... compaction finished");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize the output pos index based on current UTXO set.
|
/// (Re)build the output_pos index to be consistent with the current UTXO set.
|
||||||
/// This is a costly operation performed only when we receive a full new chain state.
|
/// Remove any "stale" index entries that do not correspond to outputs in the UTXO set.
|
||||||
|
/// Add any missing index entries based on UTXO set.
|
||||||
pub fn init_output_pos_index(
|
pub fn init_output_pos_index(
|
||||||
&self,
|
&self,
|
||||||
header_pmmr: &PMMRHandle<BlockHeader>,
|
header_pmmr: &PMMRHandle<BlockHeader>,
|
||||||
batch: &mut Batch<'_>,
|
batch: &Batch<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
let output_pmmr =
|
let output_pmmr =
|
||||||
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
||||||
|
|
||||||
|
// Iterate over the current output_pos index, removing any entries that
|
||||||
|
// do not point to to the expected output.
|
||||||
|
let mut removed_count = 0;
|
||||||
|
for (key, (pos, _)) in batch.output_pos_iter()? {
|
||||||
|
if let Some(out) = output_pmmr.get_data(pos) {
|
||||||
|
if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) {
|
||||||
|
// If the pos matches and the index key matches the commitment
|
||||||
|
// then keep the entry, other we want to clean it up.
|
||||||
|
if pos == pos_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
batch.delete(&key)?;
|
||||||
|
removed_count += 1;
|
||||||
|
}
|
||||||
|
debug!(
|
||||||
|
"init_output_pos_index: removed {} stale index entries",
|
||||||
|
removed_count
|
||||||
|
);
|
||||||
|
|
||||||
let mut outputs_pos: Vec<(Commitment, u64)> = vec![];
|
let mut outputs_pos: Vec<(Commitment, u64)> = vec![];
|
||||||
for pos in output_pmmr.leaf_pos_iter() {
|
for pos in output_pmmr.leaf_pos_iter() {
|
||||||
if let Some(out) = output_pmmr.get_data(pos) {
|
if let Some(out) = output_pmmr.get_data(pos) {
|
||||||
outputs_pos.push((out.commit, pos));
|
outputs_pos.push((out.commit, pos));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debug!("init_output_pos_index: {} utxos", outputs_pos.len());
|
||||||
|
|
||||||
|
outputs_pos.retain(|x| batch.get_output_pos_height(&x.0).is_err());
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"init_output_pos_index: {} utxos with missing index entries",
|
||||||
|
outputs_pos.len()
|
||||||
|
);
|
||||||
|
|
||||||
if outputs_pos.is_empty() {
|
if outputs_pos.is_empty() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let total_outputs = outputs_pos.len();
|
let total_outputs = outputs_pos.len();
|
||||||
let max_height = batch.head()?.height;
|
let max_height = batch.head()?.height;
|
||||||
|
|
||||||
|
@ -425,38 +454,12 @@ impl TxHashSet {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!(
|
debug!(
|
||||||
"init_height_pos_index: {} UTXOs, took {}s",
|
"init_height_pos_index: added entries for {} utxos, took {}s",
|
||||||
total_outputs,
|
total_outputs,
|
||||||
now.elapsed().as_secs(),
|
now.elapsed().as_secs(),
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compact_height_pos_index(&self, batch: &Batch<'_>) -> Result<(), Error> {
|
|
||||||
let now = Instant::now();
|
|
||||||
let output_pmmr =
|
|
||||||
ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
|
||||||
let last_pos = output_pmmr.unpruned_size();
|
|
||||||
|
|
||||||
let deleted = batch
|
|
||||||
.output_pos_iter()?
|
|
||||||
.filter(|(_, (pos, _))| {
|
|
||||||
// Note we use get_from_file() here as we want to ensure we have an entry
|
|
||||||
// in the index for *every* output still in the file, not just the "unspent"
|
|
||||||
// outputs. This is because we need to support rewind to handle fork/reorg.
|
|
||||||
// Rewind may "unspend" recently spent, but not yet pruned outputs, and the
|
|
||||||
// index must be consistent in this situation.
|
|
||||||
*pos <= last_pos && output_pmmr.get_from_file(*pos).is_none()
|
|
||||||
})
|
|
||||||
.map(|(key, _)| batch.delete(&key))
|
|
||||||
.count();
|
|
||||||
debug!(
|
|
||||||
"compact_output_pos_index: deleted {} entries from the index, took {}s",
|
|
||||||
deleted,
|
|
||||||
now.elapsed().as_secs(),
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Starts a new unit of work to extend (or rewind) the chain with additional
|
/// Starts a new unit of work to extend (or rewind) the chain with additional
|
||||||
|
@ -923,18 +926,29 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
|
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
|
||||||
pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> {
|
/// Returns a vec of commit_pos representing the pos and height of the outputs spent
|
||||||
|
/// by this block.
|
||||||
|
pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<Vec<CommitPos>, Error> {
|
||||||
let mut affected_pos = vec![];
|
let mut affected_pos = vec![];
|
||||||
|
let mut spent = vec![];
|
||||||
|
|
||||||
|
// Apply the output to the output and rangeproof MMRs.
|
||||||
|
// Add pos to affected_pos to update the accumulator later on.
|
||||||
|
// Add the new output to the output_pos index.
|
||||||
for out in b.outputs() {
|
for out in b.outputs() {
|
||||||
let pos = self.apply_output(out, batch)?;
|
let pos = self.apply_output(out, batch)?;
|
||||||
affected_pos.push(pos);
|
affected_pos.push(pos);
|
||||||
batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
|
batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove the output from the output and rangeproof MMRs.
|
||||||
|
// Add spent_pos to affected_pos to update the accumulator later on.
|
||||||
|
// Remove the spent output from the output_pos index.
|
||||||
for input in b.inputs() {
|
for input in b.inputs() {
|
||||||
let pos = self.apply_input(input, batch)?;
|
let spent_pos = self.apply_input(input, batch)?;
|
||||||
affected_pos.push(pos);
|
affected_pos.push(spent_pos.pos);
|
||||||
|
batch.delete_output_pos_height(&input.commitment())?;
|
||||||
|
spent.push(spent_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
for kernel in b.kernels() {
|
for kernel in b.kernels() {
|
||||||
|
@ -947,13 +961,10 @@ impl<'a> Extension<'a> {
|
||||||
// Update the head of the extension to reflect the block we just applied.
|
// Update the head of the extension to reflect the block we just applied.
|
||||||
self.head = Tip::from_header(&b.header);
|
self.head = Tip::from_header(&b.header);
|
||||||
|
|
||||||
Ok(())
|
Ok(spent)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> {
|
fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> {
|
||||||
// if self.output_pmmr.is_empty() || output_pos.is_empty() {
|
|
||||||
// return Ok(());
|
|
||||||
// }
|
|
||||||
let mut output_idx: Vec<_> = output_pos
|
let mut output_idx: Vec<_> = output_pos
|
||||||
.iter()
|
.iter()
|
||||||
.map(|x| pmmr::n_leaves(*x).saturating_sub(1))
|
.map(|x| pmmr::n_leaves(*x).saturating_sub(1))
|
||||||
|
@ -969,10 +980,9 @@ impl<'a> Extension<'a> {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result<u64, Error> {
|
fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result<CommitPos, Error> {
|
||||||
let commit = input.commitment();
|
let commit = input.commitment();
|
||||||
let pos_res = batch.get_output_pos(&commit);
|
if let Ok((pos, height)) = batch.get_output_pos_height(&commit) {
|
||||||
if let Ok(pos) = pos_res {
|
|
||||||
// First check this input corresponds to an existing entry in the output MMR.
|
// First check this input corresponds to an existing entry in the output MMR.
|
||||||
if let Some(hash) = self.output_pmmr.get_hash(pos) {
|
if let Some(hash) = self.output_pmmr.get_hash(pos) {
|
||||||
if hash != input.hash_with_index(pos - 1) {
|
if hash != input.hash_with_index(pos - 1) {
|
||||||
|
@ -990,7 +1000,7 @@ impl<'a> Extension<'a> {
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.prune(pos)
|
.prune(pos)
|
||||||
.map_err(ErrorKind::TxHashSetErr)?;
|
.map_err(ErrorKind::TxHashSetErr)?;
|
||||||
Ok(pos)
|
Ok(CommitPos { pos, height })
|
||||||
}
|
}
|
||||||
Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
|
Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
|
||||||
Err(e) => Err(ErrorKind::TxHashSetErr(e).into()),
|
Err(e) => Err(ErrorKind::TxHashSetErr(e).into()),
|
||||||
|
@ -1103,13 +1113,18 @@ impl<'a> Extension<'a> {
|
||||||
// Rewound output pos will be removed from the MMR.
|
// Rewound output pos will be removed from the MMR.
|
||||||
// Rewound input (spent) pos will be added back to the MMR.
|
// Rewound input (spent) pos will be added back to the MMR.
|
||||||
let head_header = batch.get_block_header(&self.head.hash())?;
|
let head_header = batch.get_block_header(&self.head.hash())?;
|
||||||
let rewind_rm_pos = input_pos_to_rewind(header, &head_header, batch)?;
|
|
||||||
|
|
||||||
self.rewind_to_pos(
|
if head_header.height <= header.height {
|
||||||
header.output_mmr_size,
|
// Nothing to rewind but we do want to truncate the MMRs at header for consistency.
|
||||||
header.kernel_mmr_size,
|
self.rewind_mmrs_to_pos(header.output_mmr_size, header.kernel_mmr_size, &vec![])?;
|
||||||
&rewind_rm_pos,
|
self.apply_to_bitmap_accumulator(&[header.output_mmr_size])?;
|
||||||
)?;
|
} else {
|
||||||
|
let mut current = head_header;
|
||||||
|
while header.height < current.height {
|
||||||
|
self.rewind_single_block(¤t, batch)?;
|
||||||
|
current = batch.get_previous_header(¤t)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Update our head to reflect the header we rewound to.
|
// Update our head to reflect the header we rewound to.
|
||||||
self.head = Tip::from_header(header);
|
self.head = Tip::from_header(header);
|
||||||
|
@ -1117,30 +1132,78 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Rewind the MMRs, the bitmap accumulator and the output_pos index.
|
||||||
|
fn rewind_single_block(
|
||||||
|
&mut self,
|
||||||
|
header: &BlockHeader,
|
||||||
|
batch: &Batch<'_>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// The spent index allows us to conveniently "unspend" everything in a block.
|
||||||
|
let spent = batch.get_spent_index(&header.hash());
|
||||||
|
|
||||||
|
let spent_pos: Vec<_> = if let Ok(ref spent) = spent {
|
||||||
|
spent.iter().map(|x| x.pos).collect()
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"rewind_single_block: fallback to legacy input bitmap for block {} at {}",
|
||||||
|
header.hash(),
|
||||||
|
header.height
|
||||||
|
);
|
||||||
|
let bitmap = batch.get_block_input_bitmap(&header.hash())?;
|
||||||
|
bitmap.iter().map(|x| x.into()).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
if header.height == 0 {
|
||||||
|
self.rewind_mmrs_to_pos(0, 0, &spent_pos)?;
|
||||||
|
} else {
|
||||||
|
let prev = batch.get_previous_header(&header)?;
|
||||||
|
self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &spent_pos)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update our BitmapAccumulator based on affected outputs.
|
||||||
|
// We want to "unspend" every rewound spent output.
|
||||||
|
// Treat last_pos as an affected output to ensure we rebuild far enough back.
|
||||||
|
let mut affected_pos = spent_pos.clone();
|
||||||
|
affected_pos.push(self.output_pmmr.last_pos);
|
||||||
|
self.apply_to_bitmap_accumulator(&affected_pos)?;
|
||||||
|
|
||||||
|
// Remove any entries from the output_pos created by the block being rewound.
|
||||||
|
let block = batch.get_block(&header.hash())?;
|
||||||
|
for out in block.outputs() {
|
||||||
|
batch.delete_output_pos_height(&out.commitment())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update output_pos based on "unspending" all spent pos from this block.
|
||||||
|
// This is necessary to ensure the output_pos index correclty reflects a
|
||||||
|
// reused output commitment. For example an output at pos 1, spent, reused at pos 2.
|
||||||
|
// The output_pos index should be updated to reflect the old pos 1 when unspent.
|
||||||
|
if let Ok(spent) = spent {
|
||||||
|
for (x, y) in block.inputs().into_iter().zip(spent) {
|
||||||
|
batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Rewinds the MMRs to the provided positions, given the output and
|
/// Rewinds the MMRs to the provided positions, given the output and
|
||||||
/// kernel we want to rewind to.
|
/// kernel pos we want to rewind to.
|
||||||
fn rewind_to_pos(
|
fn rewind_mmrs_to_pos(
|
||||||
&mut self,
|
&mut self,
|
||||||
output_pos: u64,
|
output_pos: u64,
|
||||||
kernel_pos: u64,
|
kernel_pos: u64,
|
||||||
rewind_rm_pos: &Bitmap,
|
spent_pos: &[u64],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let bitmap: Bitmap = spent_pos.into_iter().map(|x| *x as u32).collect();
|
||||||
self.output_pmmr
|
self.output_pmmr
|
||||||
.rewind(output_pos, rewind_rm_pos)
|
.rewind(output_pos, &bitmap)
|
||||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.rewind(output_pos, rewind_rm_pos)
|
.rewind(output_pos, &bitmap)
|
||||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
self.kernel_pmmr
|
self.kernel_pmmr
|
||||||
.rewind(kernel_pos, &Bitmap::create())
|
.rewind(kernel_pos, &Bitmap::create())
|
||||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
|
|
||||||
// Update our BitmapAccumulator based on affected outputs.
|
|
||||||
// We want to "unspend" every rewound spent output.
|
|
||||||
// Treat output_pos as an affected output to ensure we rebuild far enough back.
|
|
||||||
let mut affected_pos: Vec<_> = rewind_rm_pos.iter().map(|x| x as u64).collect();
|
|
||||||
affected_pos.push(output_pos);
|
|
||||||
self.apply_to_bitmap_accumulator(&affected_pos)?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1573,40 +1636,13 @@ fn input_pos_to_rewind(
|
||||||
head_header: &BlockHeader,
|
head_header: &BlockHeader,
|
||||||
batch: &Batch<'_>,
|
batch: &Batch<'_>,
|
||||||
) -> Result<Bitmap, Error> {
|
) -> Result<Bitmap, Error> {
|
||||||
if head_header.height <= block_header.height {
|
let mut bitmap = Bitmap::create();
|
||||||
return Ok(Bitmap::create());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batching up the block input bitmaps, and running fast_or() on every batch of 256 bitmaps.
|
|
||||||
// so to avoid maintaining a huge vec of bitmaps.
|
|
||||||
let bitmap_fast_or = |b_res, block_input_bitmaps: &mut Vec<Bitmap>| -> Option<Bitmap> {
|
|
||||||
if let Some(b) = b_res {
|
|
||||||
block_input_bitmaps.push(b);
|
|
||||||
if block_input_bitmaps.len() < 256 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let bitmap = Bitmap::fast_or(&block_input_bitmaps.iter().collect::<Vec<&Bitmap>>());
|
|
||||||
block_input_bitmaps.clear();
|
|
||||||
block_input_bitmaps.push(bitmap.clone());
|
|
||||||
Some(bitmap)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut block_input_bitmaps: Vec<Bitmap> = vec![];
|
|
||||||
|
|
||||||
let mut current = head_header.clone();
|
let mut current = head_header.clone();
|
||||||
while current.hash() != block_header.hash() {
|
while current.height > block_header.height {
|
||||||
if current.height < 1 {
|
if let Ok(block_bitmap) = batch.get_block_input_bitmap(¤t.hash()) {
|
||||||
break;
|
bitmap.or_inplace(&block_bitmap);
|
||||||
}
|
|
||||||
|
|
||||||
// I/O should be minimized or eliminated here for most
|
|
||||||
// rewind scenarios.
|
|
||||||
if let Ok(b_res) = batch.get_block_input_bitmap(¤t.hash()) {
|
|
||||||
bitmap_fast_or(Some(b_res), &mut block_input_bitmaps);
|
|
||||||
}
|
}
|
||||||
current = batch.get_previous_header(¤t)?;
|
current = batch.get_previous_header(¤t)?;
|
||||||
}
|
}
|
||||||
|
Ok(bitmap)
|
||||||
bitmap_fast_or(None, &mut block_input_bitmaps).ok_or_else(|| ErrorKind::Bitmap.into())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,9 @@ use std::sync::Arc;
|
||||||
use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
|
use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
|
||||||
use crate::core::core::{Block, BlockHeader, HeaderVersion};
|
use crate::core::core::{Block, BlockHeader, HeaderVersion};
|
||||||
use crate::core::pow::Difficulty;
|
use crate::core::pow::Difficulty;
|
||||||
use crate::core::ser::{self, PMMRIndexHashable};
|
use crate::core::ser::{self, PMMRIndexHashable, Readable, Reader, Writeable, Writer};
|
||||||
use crate::error::{Error, ErrorKind};
|
use crate::error::{Error, ErrorKind};
|
||||||
|
use crate::util::secp::pedersen::Commitment;
|
||||||
use crate::util::RwLock;
|
use crate::util::RwLock;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
|
@ -258,18 +259,31 @@ impl OutputRoots {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A helper to hold the output pmmr position of the txhashset in order to keep them
|
/// Minimal struct representing a known MMR position and associated block height.
|
||||||
/// readable.
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct OutputMMRPosition {
|
pub struct CommitPos {
|
||||||
/// The hash at the output position in the MMR.
|
|
||||||
pub output_mmr_hash: Hash,
|
|
||||||
/// MMR position
|
/// MMR position
|
||||||
pub position: u64,
|
pub pos: u64,
|
||||||
/// Block height
|
/// Block height
|
||||||
pub height: u64,
|
pub height: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Readable for CommitPos {
|
||||||
|
fn read(reader: &mut dyn Reader) -> Result<CommitPos, ser::Error> {
|
||||||
|
let pos = reader.read_u64()?;
|
||||||
|
let height = reader.read_u64()?;
|
||||||
|
Ok(CommitPos { pos, height })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Writeable for CommitPos {
|
||||||
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
|
writer.write_u64(self.pos)?;
|
||||||
|
writer.write_u64(self.height)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
|
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
|
||||||
/// blockchain tree. References the max height and the latest and previous
|
/// blockchain tree. References the max height and the latest and previous
|
||||||
/// blocks
|
/// blocks
|
||||||
|
|
|
@ -398,6 +398,7 @@ fn mine_reorg() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_forks() {
|
fn mine_forks() {
|
||||||
|
clean_output_dir(".grin2");
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
{
|
{
|
||||||
let chain = init_chain(".grin2", pow::mine_genesis_block().unwrap());
|
let chain = init_chain(".grin2", pow::mine_genesis_block().unwrap());
|
||||||
|
@ -445,6 +446,7 @@ fn mine_forks() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_losing_fork() {
|
fn mine_losing_fork() {
|
||||||
|
clean_output_dir(".grin3");
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
{
|
{
|
||||||
|
@ -481,6 +483,7 @@ fn mine_losing_fork() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn longer_fork() {
|
fn longer_fork() {
|
||||||
|
clean_output_dir(".grin4");
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
// to make it easier to compute the txhashset roots in the test, we
|
// to make it easier to compute the txhashset roots in the test, we
|
||||||
|
@ -524,12 +527,88 @@ fn longer_fork() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn spend_in_fork_and_compact() {
|
fn spend_rewind_spend() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
util::init_test_logger();
|
util::init_test_logger();
|
||||||
// Cleanup chain directory
|
clean_output_dir(".grin_spend_rewind_spend");
|
||||||
clean_output_dir(".grin6");
|
|
||||||
|
|
||||||
|
{
|
||||||
|
let chain = init_chain(
|
||||||
|
".grin_spend_rewind_spend",
|
||||||
|
pow::mine_genesis_block().unwrap(),
|
||||||
|
);
|
||||||
|
let prev = chain.head_header().unwrap();
|
||||||
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
let pb = ProofBuilder::new(&kc);
|
||||||
|
|
||||||
|
let mut head = prev;
|
||||||
|
|
||||||
|
// mine the first block and keep track of the block_hash
|
||||||
|
// so we can spend the coinbase later
|
||||||
|
let b = prepare_block_key_idx(&kc, &head, &chain, 2, 1);
|
||||||
|
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
|
||||||
|
assert!(out_id.features.is_coinbase());
|
||||||
|
head = b.header.clone();
|
||||||
|
chain
|
||||||
|
.process_block(b.clone(), chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// now mine three further blocks
|
||||||
|
for n in 3..6 {
|
||||||
|
let b = prepare_block(&kc, &head, &chain, n);
|
||||||
|
head = b.header.clone();
|
||||||
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a note of this header as we will rewind back to here later.
|
||||||
|
let rewind_to = head.clone();
|
||||||
|
|
||||||
|
let key_id_coinbase = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||||
|
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
|
||||||
|
|
||||||
|
let tx1 = build::transaction(
|
||||||
|
KernelFeatures::Plain { fee: 20000 },
|
||||||
|
vec![
|
||||||
|
build::coinbase_input(consensus::REWARD, key_id_coinbase.clone()),
|
||||||
|
build::output(consensus::REWARD - 20000, key_id30.clone()),
|
||||||
|
],
|
||||||
|
&kc,
|
||||||
|
&pb,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let b = prepare_block_tx(&kc, &head, &chain, 6, vec![&tx1]);
|
||||||
|
head = b.header.clone();
|
||||||
|
chain
|
||||||
|
.process_block(b.clone(), chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
|
||||||
|
// Now mine another block, reusing the private key for the coinbase we just spent.
|
||||||
|
{
|
||||||
|
let b = prepare_block_key_idx(&kc, &head, &chain, 7, 1);
|
||||||
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now mine a competing block also spending the same coinbase output from earlier.
|
||||||
|
// Rewind back prior to the tx that spends it to "unspend" it.
|
||||||
|
{
|
||||||
|
let b = prepare_block_tx(&kc, &rewind_to, &chain, 6, vec![&tx1]);
|
||||||
|
chain
|
||||||
|
.process_block(b.clone(), chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_output_dir(".grin_spend_rewind_spend");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn spend_in_fork_and_compact() {
|
||||||
|
clean_output_dir(".grin6");
|
||||||
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
|
util::init_test_logger();
|
||||||
{
|
{
|
||||||
let chain = init_chain(".grin6", pow::mine_genesis_block().unwrap());
|
let chain = init_chain(".grin6", pow::mine_genesis_block().unwrap());
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
|
@ -730,15 +809,31 @@ fn output_header_mappings() {
|
||||||
clean_output_dir(".grin_header_for_output");
|
clean_output_dir(".grin_header_for_output");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use diff as both diff *and* key_idx for convenience (deterministic private key for test blocks)
|
||||||
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block
|
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block
|
||||||
where
|
where
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
let mut b = prepare_block_nosum(kc, prev, diff, vec![]);
|
let key_idx = diff as u32;
|
||||||
|
prepare_block_key_idx(kc, prev, chain, diff, key_idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepare_block_key_idx<K>(
|
||||||
|
kc: &K,
|
||||||
|
prev: &BlockHeader,
|
||||||
|
chain: &Chain,
|
||||||
|
diff: u64,
|
||||||
|
key_idx: u32,
|
||||||
|
) -> Block
|
||||||
|
where
|
||||||
|
K: Keychain,
|
||||||
|
{
|
||||||
|
let mut b = prepare_block_nosum(kc, prev, diff, key_idx, vec![]);
|
||||||
chain.set_txhashset_roots(&mut b).unwrap();
|
chain.set_txhashset_roots(&mut b).unwrap();
|
||||||
b
|
b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use diff as both diff *and* key_idx for convenience (deterministic private key for test blocks)
|
||||||
fn prepare_block_tx<K>(
|
fn prepare_block_tx<K>(
|
||||||
kc: &K,
|
kc: &K,
|
||||||
prev: &BlockHeader,
|
prev: &BlockHeader,
|
||||||
|
@ -749,17 +844,38 @@ fn prepare_block_tx<K>(
|
||||||
where
|
where
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
let key_idx = diff as u32;
|
||||||
|
prepare_block_tx_key_idx(kc, prev, chain, diff, key_idx, txs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prepare_block_tx_key_idx<K>(
|
||||||
|
kc: &K,
|
||||||
|
prev: &BlockHeader,
|
||||||
|
chain: &Chain,
|
||||||
|
diff: u64,
|
||||||
|
key_idx: u32,
|
||||||
|
txs: Vec<&Transaction>,
|
||||||
|
) -> Block
|
||||||
|
where
|
||||||
|
K: Keychain,
|
||||||
|
{
|
||||||
|
let mut b = prepare_block_nosum(kc, prev, diff, key_idx, txs);
|
||||||
chain.set_txhashset_roots(&mut b).unwrap();
|
chain.set_txhashset_roots(&mut b).unwrap();
|
||||||
b
|
b
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_block_nosum<K>(kc: &K, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block
|
fn prepare_block_nosum<K>(
|
||||||
|
kc: &K,
|
||||||
|
prev: &BlockHeader,
|
||||||
|
diff: u64,
|
||||||
|
key_idx: u32,
|
||||||
|
txs: Vec<&Transaction>,
|
||||||
|
) -> Block
|
||||||
where
|
where
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
let proof_size = global::proofsize();
|
let proof_size = global::proofsize();
|
||||||
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
|
let key_id = ExtKeychainPath::new(1, key_idx, 0, 0, 0).to_identifier();
|
||||||
|
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
let reward =
|
let reward =
|
||||||
|
|
Loading…
Reference in a new issue