mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Rewind head and header_head consistently. (#2918)
* maintain header_head as distinctly separate from head * cleanup corrupted storage log msg * simplify process_header and check_header_known * remember to commit the batch when successfully processing a header... * rework sync_block_headers for consistency with process_block_header * cleanup unrelated code * fix pool tests * cleanup chain tests * cleanup chain tests (reuse helpers more) * cleanup - head not header on an extension shortcircuit "rewind and apply fork" for headers if next header
This commit is contained in:
parent
45cf1d96df
commit
515fa54614
11 changed files with 390 additions and 706 deletions
|
@ -350,13 +350,14 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// Process a block header received during "header first" propagation.
|
||||
/// Note: This will update header MMR and corresponding header_head
|
||||
/// if total work increases (on the header chain).
|
||||
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
|
||||
// We take a write lock on the txhashset and create a new batch
|
||||
// but this is strictly readonly so we do not commit the batch.
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let batch = self.store.batch()?;
|
||||
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
||||
pipe::process_block_header(bh, &mut ctx)?;
|
||||
ctx.batch.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -368,9 +369,15 @@ impl Chain {
|
|||
let batch = self.store.batch()?;
|
||||
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
||||
|
||||
// Sync the chunk of block headers, updating sync_head as necessary.
|
||||
pipe::sync_block_headers(headers, &mut ctx)?;
|
||||
ctx.batch.commit()?;
|
||||
|
||||
// Now "process" the last block header, updating header_head to match sync_head.
|
||||
if let Some(header) = headers.last() {
|
||||
pipe::process_block_header(header, &mut ctx)?;
|
||||
}
|
||||
|
||||
ctx.batch.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -520,64 +527,23 @@ impl Chain {
|
|||
// latest block header. Rewind the extension to the specified header to
|
||||
// ensure the view is consistent.
|
||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
extension.rewind(&header)?;
|
||||
let header_head = extension.batch.header_head()?;
|
||||
pipe::rewind_and_apply_fork(&header, &header_head, extension)?;
|
||||
extension.validate(fast_validation, &NoStatus)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// *** Only used in tests. ***
|
||||
/// Convenience for setting roots on a block header when
|
||||
/// creating a chain fork during tests.
|
||||
pub fn set_txhashset_roots_forked(
|
||||
&self,
|
||||
b: &mut Block,
|
||||
prev: &BlockHeader,
|
||||
) -> Result<(), Error> {
|
||||
let prev_block = self.get_block(&prev.hash())?;
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let (prev_root, roots, sizes) =
|
||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
// Put the txhashset in the correct state as of the previous block.
|
||||
// We cannot use the new block to do this because we have no
|
||||
// explicit previous linkage (and prev_root not yet setup).
|
||||
pipe::rewind_and_apply_fork(&prev_block, extension)?;
|
||||
extension.apply_block(&prev_block)?;
|
||||
|
||||
// Retrieve the header root before we apply the new block
|
||||
let prev_root = extension.header_root();
|
||||
|
||||
// Apply the latest block to the chain state via the extension.
|
||||
extension.apply_block(b)?;
|
||||
|
||||
Ok((prev_root, extension.roots(), extension.sizes()))
|
||||
})?;
|
||||
|
||||
// Set the prev_root on the header.
|
||||
b.header.prev_root = prev_root;
|
||||
|
||||
// Set the output, rangeproof and kernel MMR roots.
|
||||
b.header.output_root = roots.output_root;
|
||||
b.header.range_proof_root = roots.rproof_root;
|
||||
b.header.kernel_root = roots.kernel_root;
|
||||
|
||||
// Set the output and kernel MMR sizes.
|
||||
{
|
||||
// Carefully destructure these correctly...
|
||||
let (_, output_mmr_size, _, kernel_mmr_size) = sizes;
|
||||
b.header.output_mmr_size = output_mmr_size;
|
||||
b.header.kernel_mmr_size = kernel_mmr_size;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the txhashset roots on a brand new block by applying the block on
|
||||
/// the current txhashset state.
|
||||
pub fn set_txhashset_roots(&self, b: &mut Block) -> Result<(), Error> {
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let (prev_root, roots, sizes) =
|
||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
let previous_header = extension.batch.get_previous_header(&b.header)?;
|
||||
let header_head = extension.batch.header_head()?;
|
||||
pipe::rewind_and_apply_fork(&previous_header, &header_head, extension)?;
|
||||
|
||||
// Retrieve the header root before we apply the new block
|
||||
let prev_root = extension.header_root();
|
||||
|
||||
|
@ -610,12 +576,12 @@ impl Chain {
|
|||
pub fn get_merkle_proof(
|
||||
&self,
|
||||
output: &OutputIdentifier,
|
||||
block_header: &BlockHeader,
|
||||
header: &BlockHeader,
|
||||
) -> Result<MerkleProof, Error> {
|
||||
let mut txhashset = self.txhashset.write();
|
||||
|
||||
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
extension.rewind(&block_header)?;
|
||||
let header_head = extension.batch.header_head()?;
|
||||
pipe::rewind_and_apply_fork(&header, &header_head, extension)?;
|
||||
extension.merkle_proof(output)
|
||||
})?;
|
||||
|
||||
|
@ -669,7 +635,9 @@ impl Chain {
|
|||
{
|
||||
let mut txhashset = self.txhashset.write();
|
||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
extension.rewind(&header)?;
|
||||
let header_head = extension.batch.header_head()?;
|
||||
pipe::rewind_and_apply_fork(&header, &header_head, extension)?;
|
||||
|
||||
extension.snapshot()?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
@ -1349,7 +1317,8 @@ fn setup_head(
|
|||
})?;
|
||||
|
||||
let res = txhashset::extending(txhashset, &mut batch, |extension| {
|
||||
extension.rewind(&header)?;
|
||||
let header_head = extension.batch.header_head()?;
|
||||
pipe::rewind_and_apply_fork(&header, &header_head, extension)?;
|
||||
extension.validate_roots()?;
|
||||
|
||||
// now check we have the "block sums" for the block in question
|
||||
|
@ -1394,7 +1363,7 @@ fn setup_head(
|
|||
let prev_header = batch.get_block_header(&head.prev_block_h)?;
|
||||
let _ = batch.delete_block(&header.hash());
|
||||
head = Tip::from_header(&prev_header);
|
||||
batch.save_head(&head)?;
|
||||
batch.save_body_head(&head)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1407,7 +1376,11 @@ fn setup_head(
|
|||
batch.save_block(&genesis)?;
|
||||
|
||||
let tip = Tip::from_header(&genesis.header);
|
||||
batch.save_head(&tip)?;
|
||||
|
||||
// Save these ahead of time as we need head and header_head to be initialized
|
||||
// with *something* when creating a txhashset extension below.
|
||||
batch.save_body_head(&tip)?;
|
||||
batch.save_header_head(&tip)?;
|
||||
|
||||
if genesis.kernels().len() > 0 {
|
||||
let (utxo_sum, kernel_sum) = (sums, genesis as &Committed).verify_kernel_sums(
|
||||
|
@ -1419,6 +1392,10 @@ fn setup_head(
|
|||
kernel_sum,
|
||||
};
|
||||
}
|
||||
txhashset::header_extending(txhashset, &mut batch, |extension| {
|
||||
extension.apply_header(&genesis.header)?;
|
||||
Ok(())
|
||||
})?;
|
||||
txhashset::extending(txhashset, &mut batch, |extension| {
|
||||
extension.apply_block(&genesis)?;
|
||||
extension.validate_roots()?;
|
||||
|
|
|
@ -45,30 +45,6 @@ pub struct BlockContext<'a> {
|
|||
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
}
|
||||
|
||||
/// Process a block header as part of processing a full block.
|
||||
/// We want to be sure the header is valid before processing the full block.
|
||||
fn process_header_for_block(
|
||||
header: &BlockHeader,
|
||||
is_fork: bool,
|
||||
ctx: &mut BlockContext<'_>,
|
||||
) -> Result<(), Error> {
|
||||
txhashset::header_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
|
||||
extension.force_rollback();
|
||||
if is_fork {
|
||||
rewind_and_apply_header_fork(header, extension)?;
|
||||
}
|
||||
extension.validate_root(header)?;
|
||||
extension.apply_header(header)?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
validate_header(header, ctx)?;
|
||||
add_block_header(header, &ctx.batch)?;
|
||||
update_header_head(header, ctx)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check if we already know about this block for various reasons
|
||||
// from cheapest to most expensive (delay hitting the db until last).
|
||||
fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
|
@ -81,9 +57,6 @@ fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), E
|
|||
/// place for the new block in the chain.
|
||||
/// Returns new head if chain head updated.
|
||||
pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip>, Error> {
|
||||
// TODO should just take a promise for a block with a full header so we don't
|
||||
// spend resources reading the full block when its header is invalid
|
||||
|
||||
debug!(
|
||||
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}]",
|
||||
b.hash(),
|
||||
|
@ -96,26 +69,23 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
// Check if we have already processed this block previously.
|
||||
check_known(&b.header, ctx)?;
|
||||
|
||||
// Delay hitting the db for current chain head until we know
|
||||
// this block is not already known.
|
||||
let head = ctx.batch.head()?;
|
||||
let is_next = b.header.prev_hash == head.last_block_h;
|
||||
let header_head = ctx.batch.header_head()?;
|
||||
|
||||
let prev = prev_header_store(&b.header, &mut ctx.batch)?;
|
||||
let is_next = b.header.prev_hash == head.last_block_h;
|
||||
|
||||
// Block is an orphan if we do not know about the previous full block.
|
||||
// Skip this check if we have just processed the previous block
|
||||
// or the full txhashset state (fast sync) at the previous block height.
|
||||
let prev = prev_header_store(&b.header, &mut ctx.batch)?;
|
||||
if !is_next && !ctx.batch.block_exists(&prev.hash())? {
|
||||
return Err(ErrorKind::Orphan.into());
|
||||
}
|
||||
|
||||
// This is a fork in the context of both header and block processing
|
||||
// if this block does not immediately follow the chain head.
|
||||
let is_fork = !is_next;
|
||||
|
||||
// Check the header is valid before we proceed with the full block.
|
||||
process_header_for_block(&b.header, is_fork, ctx)?;
|
||||
// Process the header for the block.
|
||||
// Note: We still want to process the full block if we have seen this header before
|
||||
// as we may have processed it "header first" and not yet processed the full block.
|
||||
process_block_header(&b.header, ctx)?;
|
||||
|
||||
// Validate the block itself, make sure it is internally consistent.
|
||||
// Use the verifier_cache for verifying rangeproofs and kernel signatures.
|
||||
|
@ -123,10 +93,8 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
|
||||
// Start a chain extension unit of work dependent on the success of the
|
||||
// internal validation and saving operations
|
||||
txhashset::extending(&mut ctx.txhashset, &mut ctx.batch, |mut extension| {
|
||||
if is_fork {
|
||||
rewind_and_apply_fork(b, extension)?;
|
||||
}
|
||||
let block_sums = txhashset::extending(&mut ctx.txhashset, &mut ctx.batch, |mut extension| {
|
||||
rewind_and_apply_fork(&prev, &header_head, extension)?;
|
||||
|
||||
// Check any coinbase being spent have matured sufficiently.
|
||||
// This needs to be done within the context of a potentially
|
||||
|
@ -141,7 +109,8 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
||||
// accounting for inputs/outputs/kernels in this new block.
|
||||
// We know there are no double-spends etc. if this verifies successfully.
|
||||
verify_block_sums(b, &mut extension)?;
|
||||
// Remember to save these to the db later on (regardless of extension rollback)
|
||||
let block_sums = verify_block_sums(b, &extension.batch)?;
|
||||
|
||||
// Apply the block to the txhashset state.
|
||||
// Validate the txhashset roots and sizes against the block header.
|
||||
|
@ -155,111 +124,122 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
extension.force_rollback();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(block_sums)
|
||||
})?;
|
||||
|
||||
// Add the validated block to the db.
|
||||
// Add the validated block to the db along with the corresponding block_sums.
|
||||
// We do this even if we have not increased the total cumulative work
|
||||
// so we can maintain multiple (in progress) forks.
|
||||
add_block(b, &ctx.batch)?;
|
||||
add_block(b, &block_sums, &ctx.batch)?;
|
||||
|
||||
// If we have no "tail" then set it now.
|
||||
if ctx.batch.tail().is_err() {
|
||||
update_body_tail(&b.header, &ctx.batch)?;
|
||||
}
|
||||
|
||||
// Update the chain head if total work is increased.
|
||||
let res = update_head(b, ctx)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Process the block header.
|
||||
/// This is only ever used during sync and uses a context based on sync_head.
|
||||
pub fn sync_block_headers(
|
||||
headers: &[BlockHeader],
|
||||
ctx: &mut BlockContext<'_>,
|
||||
) -> Result<Option<Tip>, Error> {
|
||||
let first_header = match headers.first() {
|
||||
Some(header) => {
|
||||
debug!(
|
||||
"pipe: sync_block_headers: {} headers from {} at {}",
|
||||
headers.len(),
|
||||
header.hash(),
|
||||
header.height,
|
||||
);
|
||||
header
|
||||
}
|
||||
None => {
|
||||
error!("failed to get the first header");
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let all_known = if let Some(last_header) = headers.last() {
|
||||
ctx.batch.get_block_header(&last_header.hash()).is_ok()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if !all_known {
|
||||
let prev_header = ctx.batch.get_previous_header(&first_header)?;
|
||||
txhashset::sync_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
|
||||
extension.rewind(&prev_header)?;
|
||||
|
||||
for header in headers {
|
||||
// Check the current root is correct.
|
||||
extension.validate_root(header)?;
|
||||
|
||||
// Apply the header to the header MMR.
|
||||
extension.apply_header(header)?;
|
||||
|
||||
// Save the header to the db.
|
||||
add_block_header(header, &extension.batch)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Validate all our headers now that we have added each "previous"
|
||||
// header to the db in this batch above.
|
||||
for header in headers {
|
||||
validate_header(header, ctx)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Update header_head (if most work) and sync_head (regardless) in all cases,
|
||||
// even if we already know all the headers.
|
||||
// This avoids the case of us getting into an infinite loop with sync_head never
|
||||
// progressing.
|
||||
// We only need to do this once at the end of this batch of headers.
|
||||
if let Some(header) = headers.last() {
|
||||
// Update sync_head regardless of total work.
|
||||
update_sync_head(header, &mut ctx.batch)?;
|
||||
|
||||
// Update header_head (but only if this header increases our total known work).
|
||||
// i.e. Only if this header is now the head of the current "most work" chain.
|
||||
let res = update_header_head(header, ctx)?;
|
||||
Ok(res)
|
||||
if has_more_work(&b.header, &head) {
|
||||
let head = Tip::from_header(&b.header);
|
||||
update_head(&head, &mut ctx.batch)?;
|
||||
Ok(Some(head))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Process block header as part of "header first" block propagation.
|
||||
/// We validate the header but we do not store it or update header head based
|
||||
/// on this. We will update these once we get the block back after requesting
|
||||
/// it.
|
||||
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
debug!(
|
||||
"pipe: process_block_header: {} at {}",
|
||||
header.hash(),
|
||||
header.height,
|
||||
); // keep this
|
||||
/// Sync a chunk of block headers.
|
||||
/// This is only used during header sync.
|
||||
pub fn sync_block_headers(
|
||||
headers: &[BlockHeader],
|
||||
ctx: &mut BlockContext<'_>,
|
||||
) -> Result<(), Error> {
|
||||
if headers.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check if this header is already "known" from processing a previous block.
|
||||
// Note: We are looking for a full block based on this header, not just the header itself.
|
||||
check_known(header, ctx)?;
|
||||
let first_header = headers.first().expect("first header");
|
||||
let last_header = headers.last().expect("last header");
|
||||
let prev_header = ctx.batch.get_previous_header(&first_header)?;
|
||||
|
||||
// Check if we know about all these headers. If so we can accept them quickly.
|
||||
// If they *do not* increase total work on the sync chain we are done.
|
||||
// If they *do* increase total work then we should process them to update sync_head.
|
||||
let sync_head = ctx.batch.get_sync_head()?;
|
||||
if let Ok(existing) = ctx.batch.get_block_header(&last_header.hash()) {
|
||||
if !has_more_work(&existing, &sync_head) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
txhashset::sync_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
|
||||
rewind_and_apply_header_fork(&prev_header, extension)?;
|
||||
for header in headers {
|
||||
extension.validate_root(header)?;
|
||||
extension.apply_header(header)?;
|
||||
add_block_header(header, &extension.batch)?;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Validate all our headers now that we have added each "previous"
|
||||
// header to the db in this batch above.
|
||||
for header in headers {
|
||||
validate_header(header, ctx)?;
|
||||
}
|
||||
|
||||
if has_more_work(&last_header, &sync_head) {
|
||||
update_sync_head(&Tip::from_header(&last_header), &mut ctx.batch)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a block header. Update the header MMR and corresponding header_head if this header
|
||||
/// increases the total work relative to header_head.
|
||||
/// Note: In contrast to processing a full block we treat "already known" as success
|
||||
/// to allow processing to continue (for header itself).
|
||||
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
// Check this header is not an orphan, we must know about the previous header to continue.
|
||||
let prev_header = ctx.batch.get_previous_header(&header)?;
|
||||
|
||||
// If this header is "known" then stop processing the header.
|
||||
// Do not stop processing with an error though.
|
||||
if check_known(header, ctx).is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If we have not yet seen the full block then check if we have seen this header.
|
||||
// If it does not increase total_difficulty beyond our current header_head
|
||||
// then we can (re)accept this header and process the full block (or request it).
|
||||
// This header is on a fork and we should still accept it as the fork may eventually win.
|
||||
let header_head = ctx.batch.header_head()?;
|
||||
if let Ok(existing) = ctx.batch.get_block_header(&header.hash()) {
|
||||
if !has_more_work(&existing, &header_head) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
txhashset::header_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
|
||||
rewind_and_apply_header_fork(&prev_header, extension)?;
|
||||
extension.validate_root(header)?;
|
||||
extension.apply_header(header)?;
|
||||
if !has_more_work(&header, &header_head) {
|
||||
extension.force_rollback();
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
validate_header(header, ctx)?;
|
||||
add_block_header(header, &ctx.batch)?;
|
||||
|
||||
// Update header_head independently of chain head (full blocks).
|
||||
// If/when we process the corresponding full block we will update the
|
||||
// chain head to match. This allows our header chain to extend safely beyond
|
||||
// the full chain in a fork scenario without needing excessive rewinds to handle
|
||||
// the temporarily divergent chains.
|
||||
if has_more_work(&header, &header_head) {
|
||||
update_header_head(&Tip::from_header(&header), &mut ctx.batch)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -418,14 +398,11 @@ fn verify_coinbase_maturity(block: &Block, ext: &txhashset::Extension<'_>) -> Re
|
|||
.verify_coinbase_maturity(&block.inputs(), block.header.height)
|
||||
}
|
||||
|
||||
/// Some "real magick" verification logic.
|
||||
/// The (BlockSums, Block) tuple implements Committed...
|
||||
/// This allows us to verify kernel sums across the full utxo and kernel sets
|
||||
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
||||
/// of the new block.
|
||||
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension<'_>) -> Result<(), Error> {
|
||||
/// Verify kernel sums across the full utxo and kernel sets based on block_sums
|
||||
/// of previous block accounting for the inputs|outputs|kernels of the new block.
|
||||
fn verify_block_sums(b: &Block, batch: &store::Batch<'_>) -> Result<BlockSums, Error> {
|
||||
// Retrieve the block_sums for the previous block.
|
||||
let block_sums = ext.batch.get_block_sums(&b.header.prev_hash)?;
|
||||
let block_sums = batch.get_block_sums(&b.header.prev_hash)?;
|
||||
|
||||
// Overage is based purely on the new block.
|
||||
// Previous block_sums have taken all previous overage into account.
|
||||
|
@ -438,16 +415,10 @@ fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension<'_>) -> Result<()
|
|||
let (utxo_sum, kernel_sum) =
|
||||
(block_sums, b as &dyn Committed).verify_kernel_sums(overage, offset)?;
|
||||
|
||||
// Save the new block_sums for the new block to the db via the batch.
|
||||
ext.batch.save_block_sums(
|
||||
&b.header.hash(),
|
||||
&BlockSums {
|
||||
utxo_sum,
|
||||
kernel_sum,
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
Ok(BlockSums {
|
||||
utxo_sum,
|
||||
kernel_sum,
|
||||
})
|
||||
}
|
||||
|
||||
/// Fully validate the block by applying it to the txhashset extension.
|
||||
|
@ -465,10 +436,11 @@ fn apply_block_to_txhashset(
|
|||
|
||||
/// Officially adds the block to our chain.
|
||||
/// Header must be added separately (assume this has been done previously).
|
||||
fn add_block(b: &Block, batch: &store::Batch<'_>) -> Result<(), Error> {
|
||||
fn add_block(b: &Block, block_sums: &BlockSums, batch: &store::Batch<'_>) -> Result<(), Error> {
|
||||
batch
|
||||
.save_block(b)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
|
||||
batch.save_block_sums(&b.hash(), block_sums)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -490,29 +462,14 @@ fn add_block_header(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Er
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Directly updates the head if we've just appended a new block to it or handle
|
||||
/// the situation where we've just added enough work to have a fork with more
|
||||
/// work than the head.
|
||||
fn update_head(b: &Block, ctx: &BlockContext<'_>) -> Result<Option<Tip>, Error> {
|
||||
// if we made a fork with more work than the head (which should also be true
|
||||
// when extending the head), update it
|
||||
let head = ctx.batch.head()?;
|
||||
if has_more_work(&b.header, &head) {
|
||||
let tip = Tip::from_header(&b.header);
|
||||
fn update_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
|
||||
batch
|
||||
.save_body_head(&head)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
||||
|
||||
ctx.batch
|
||||
.save_body_head(&tip)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
||||
debug!("head updated to {} at {}", head.last_block_h, head.height);
|
||||
|
||||
debug!(
|
||||
"pipe: head updated to {} at {}",
|
||||
tip.last_block_h, tip.height
|
||||
);
|
||||
|
||||
Ok(Some(tip))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Whether the provided block totals more work than the chain tip
|
||||
|
@ -521,33 +478,27 @@ fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
|
|||
}
|
||||
|
||||
/// Update the sync head so we can keep syncing from where we left off.
|
||||
fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch<'_>) -> Result<(), Error> {
|
||||
let tip = Tip::from_header(bh);
|
||||
fn update_sync_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
|
||||
batch
|
||||
.save_sync_head(&tip)
|
||||
.save_sync_head(&head)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
|
||||
debug!("sync head {} @ {}", bh.hash(), bh.height);
|
||||
debug!(
|
||||
"sync_head updated to {} at {}",
|
||||
head.last_block_h, head.height
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the header head if this header has most work.
|
||||
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<Option<Tip>, Error> {
|
||||
let header_head = ctx.batch.header_head()?;
|
||||
if has_more_work(&bh, &header_head) {
|
||||
let tip = Tip::from_header(bh);
|
||||
ctx.batch
|
||||
.save_header_head(&tip)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||
|
||||
debug!(
|
||||
"pipe: header_head updated to {} at {}",
|
||||
tip.last_block_h, tip.height
|
||||
);
|
||||
|
||||
Ok(Some(tip))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
/// Update the header_head.
|
||||
fn update_header_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
|
||||
batch
|
||||
.save_header_head(&head)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||
debug!(
|
||||
"header_head updated to {} at {}",
|
||||
head.last_block_h, head.height
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rewind the header chain and reapply headers on a fork.
|
||||
|
@ -555,8 +506,14 @@ pub fn rewind_and_apply_header_fork(
|
|||
header: &BlockHeader,
|
||||
ext: &mut txhashset::HeaderExtension<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let head = ext.head();
|
||||
if header.hash() == head.last_block_h {
|
||||
// Nothing to rewind and nothing to reapply. Done.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut fork_hashes = vec![];
|
||||
let mut current = ext.batch.get_previous_header(header)?;
|
||||
let mut current = header.clone();
|
||||
while current.height > 0 && !ext.is_on_current_chain(¤t).is_ok() {
|
||||
fork_hashes.push(current.hash());
|
||||
current = ext.batch.get_previous_header(¤t)?;
|
||||
|
@ -576,25 +533,50 @@ pub fn rewind_and_apply_header_fork(
|
|||
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?;
|
||||
ext.apply_header(&header)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Utility function to handle forks. From the forked block, jump backward
|
||||
/// to find to fork root. Rewind the txhashset to the root and apply all the
|
||||
/// forked blocks prior to the one being processed to set the txhashset in
|
||||
/// to find to fork point. Rewind the txhashset to the fork point and apply all
|
||||
/// necessary blocks prior to the one being processed to set the txhashset in
|
||||
/// the expected state.
|
||||
pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension<'_>) -> Result<(), Error> {
|
||||
// extending a fork, first identify the block where forking occurred
|
||||
// keeping the hashes of blocks along the fork
|
||||
let mut fork_hashes = vec![];
|
||||
let mut current = ext.batch.get_previous_header(&b.header)?;
|
||||
while current.height > 0 && !ext.is_on_current_chain(¤t).is_ok() {
|
||||
fork_hashes.push(current.hash());
|
||||
current = ext.batch.get_previous_header(¤t)?;
|
||||
}
|
||||
fork_hashes.reverse();
|
||||
pub fn rewind_and_apply_fork(
|
||||
header: &BlockHeader,
|
||||
header_head: &Tip,
|
||||
ext: &mut txhashset::Extension<'_>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO - Skip the "rewind and reapply" if everything is aligned and this is the "next" block.
|
||||
// This will be significantly easier once we break out the header extension.
|
||||
|
||||
let forked_header = current;
|
||||
// Find the fork point where head and header_head diverge.
|
||||
// We may need to rewind back to this fork point if they diverged
|
||||
// prior to the fork point for the provided header.
|
||||
let header_forked_header = {
|
||||
let mut current = ext.batch.get_block_header(&header_head.last_block_h)?;
|
||||
while current.height > 0 && !ext.is_on_current_chain(¤t).is_ok() {
|
||||
current = ext.batch.get_previous_header(¤t)?;
|
||||
}
|
||||
current
|
||||
};
|
||||
|
||||
// Find the fork point where the provided header diverges from our main chain.
|
||||
// Account for the header fork point. Use the earliest fork point to determine
|
||||
// where we need to rewind to. We need to do this
|
||||
let (forked_header, fork_hashes) = {
|
||||
let mut fork_hashes = vec![];
|
||||
let mut current = header.clone();
|
||||
while current.height > 0
|
||||
&& (!ext.is_on_current_chain(¤t).is_ok()
|
||||
|| current.height > header_forked_header.height)
|
||||
{
|
||||
fork_hashes.push(current.hash());
|
||||
current = ext.batch.get_previous_header(¤t)?;
|
||||
}
|
||||
fork_hashes.reverse();
|
||||
|
||||
(current, fork_hashes)
|
||||
};
|
||||
|
||||
// Rewind the txhashset state back to the block where we forked from the most work chain.
|
||||
ext.rewind(&forked_header)?;
|
||||
|
@ -611,10 +593,11 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension<'_>) -> R
|
|||
// Validate the block against the UTXO set.
|
||||
validate_utxo(&fb, ext)?;
|
||||
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
||||
verify_block_sums(&fb, ext)?;
|
||||
verify_block_sums(&fb, &ext.batch)?;
|
||||
// Re-apply the blocks.
|
||||
apply_block_to_txhashset(&fb, ext)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -160,12 +160,6 @@ impl<'a> Batch<'a> {
|
|||
option_to_not_found(self.db.get_ser(&vec![SYNC_HEAD_PREFIX]), "SYNC_HEAD")
|
||||
}
|
||||
|
||||
/// Save head to db.
|
||||
pub fn save_head(&self, t: &Tip) -> Result<(), Error> {
|
||||
self.db.put_ser(&vec![HEAD_PREFIX], t)?;
|
||||
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
|
||||
}
|
||||
|
||||
/// Save body head to db.
|
||||
pub fn save_body_head(&self, t: &Tip) -> Result<(), Error> {
|
||||
self.db.put_ser(&vec![HEAD_PREFIX], t)
|
||||
|
|
|
@ -337,17 +337,13 @@ where
|
|||
|
||||
// We want to use the current head of the most work chain unless
|
||||
// we explicitly rewind the extension.
|
||||
let header = batch.head_header()?;
|
||||
let head = batch.head()?;
|
||||
|
||||
trace!("Starting new txhashset (readonly) extension.");
|
||||
|
||||
let res = {
|
||||
let mut extension = Extension::new(trees, &batch, header);
|
||||
let mut extension = Extension::new(trees, &batch, head);
|
||||
extension.force_rollback();
|
||||
|
||||
// TODO - header_mmr may be out ahead via the header_head
|
||||
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
||||
|
||||
inner(&mut extension)
|
||||
};
|
||||
|
||||
|
@ -430,7 +426,7 @@ where
|
|||
|
||||
// We want to use the current head of the most work chain unless
|
||||
// we explicitly rewind the extension.
|
||||
let header = batch.head_header()?;
|
||||
let head = batch.head()?;
|
||||
|
||||
// create a child transaction so if the state is rolled back by itself, all
|
||||
// index saving can be undone
|
||||
|
@ -438,9 +434,7 @@ where
|
|||
{
|
||||
trace!("Starting new txhashset extension.");
|
||||
|
||||
// TODO - header_mmr may be out ahead via the header_head
|
||||
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
||||
let mut extension = Extension::new(trees, &child_batch, header);
|
||||
let mut extension = Extension::new(trees, &child_batch, head);
|
||||
res = inner(&mut extension);
|
||||
|
||||
rollback = extension.rollback;
|
||||
|
@ -466,11 +460,11 @@ where
|
|||
} else {
|
||||
trace!("Committing txhashset extension. sizes {:?}", sizes);
|
||||
child_batch.commit()?;
|
||||
trees.header_pmmr_h.backend.sync()?;
|
||||
// NOTE: The header MMR is readonly for a txhashset extension.
|
||||
trees.header_pmmr_h.backend.discard();
|
||||
trees.output_pmmr_h.backend.sync()?;
|
||||
trees.rproof_pmmr_h.backend.sync()?;
|
||||
trees.kernel_pmmr_h.backend.sync()?;
|
||||
trees.header_pmmr_h.last_pos = sizes.0;
|
||||
trees.output_pmmr_h.last_pos = sizes.1;
|
||||
trees.rproof_pmmr_h.last_pos = sizes.2;
|
||||
trees.kernel_pmmr_h.last_pos = sizes.3;
|
||||
|
@ -501,7 +495,6 @@ where
|
|||
// We want to use the current sync_head unless
|
||||
// we explicitly rewind the extension.
|
||||
let head = batch.get_sync_head()?;
|
||||
let header = batch.get_block_header(&head.last_block_h)?;
|
||||
|
||||
// create a child transaction so if the state is rolled back by itself, all
|
||||
// index saving can be undone
|
||||
|
@ -509,7 +502,7 @@ where
|
|||
{
|
||||
trace!("Starting new txhashset sync_head extension.");
|
||||
let pmmr = PMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
|
||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, head);
|
||||
|
||||
res = inner(&mut extension);
|
||||
|
||||
|
@ -560,7 +553,6 @@ where
|
|||
// We want to use the current head of the most work chain unless
|
||||
// we explicitly rewind the extension.
|
||||
let head = batch.head()?;
|
||||
let header = batch.get_block_header(&head.last_block_h)?;
|
||||
|
||||
// create a child transaction so if the state is rolled back by itself, all
|
||||
// index saving can be undone
|
||||
|
@ -571,7 +563,7 @@ where
|
|||
&mut trees.header_pmmr_h.backend,
|
||||
trees.header_pmmr_h.last_pos,
|
||||
);
|
||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, head);
|
||||
res = inner(&mut extension);
|
||||
|
||||
rollback = extension.rollback;
|
||||
|
@ -606,7 +598,7 @@ where
|
|||
/// A header extension to allow the header MMR to extend beyond the other MMRs individually.
|
||||
/// This is to allow headers to be validated against the MMR before we have the full block data.
|
||||
pub struct HeaderExtension<'a> {
|
||||
header: BlockHeader,
|
||||
head: Tip,
|
||||
|
||||
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
|
||||
|
@ -623,10 +615,10 @@ impl<'a> HeaderExtension<'a> {
|
|||
fn new(
|
||||
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
batch: &'a Batch<'_>,
|
||||
header: BlockHeader,
|
||||
head: Tip,
|
||||
) -> HeaderExtension<'a> {
|
||||
HeaderExtension {
|
||||
header,
|
||||
head,
|
||||
pmmr,
|
||||
rollback: false,
|
||||
batch,
|
||||
|
@ -638,6 +630,11 @@ impl<'a> HeaderExtension<'a> {
|
|||
self.pmmr.get_data(pos).map(|x| x.hash())
|
||||
}
|
||||
|
||||
/// The head representing the furthest extent of the current extension.
|
||||
pub fn head(&self) -> Tip {
|
||||
self.head.clone()
|
||||
}
|
||||
|
||||
/// Get the header at the specified height based on the current state of the header extension.
|
||||
/// Derives the MMR pos from the height (insertion index) and retrieves the header hash.
|
||||
/// Looks the header up in the db by hash.
|
||||
|
@ -672,7 +669,7 @@ impl<'a> HeaderExtension<'a> {
|
|||
/// extension.
|
||||
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<Hash, Error> {
|
||||
self.pmmr.push(header).map_err(&ErrorKind::TxHashSetErr)?;
|
||||
self.header = header.clone();
|
||||
self.head = Tip::from_header(header);
|
||||
Ok(self.root())
|
||||
}
|
||||
|
||||
|
@ -690,8 +687,8 @@ impl<'a> HeaderExtension<'a> {
|
|||
.rewind(header_pos, &Bitmap::create())
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
|
||||
// Update our header to reflect the one we rewound to.
|
||||
self.header = header.clone();
|
||||
// Update our head to reflect the header we rewound to.
|
||||
self.head = Tip::from_header(header);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -763,7 +760,6 @@ impl<'a> HeaderExtension<'a> {
|
|||
if header.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.root() != header.prev_root {
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
} else {
|
||||
|
@ -776,7 +772,7 @@ impl<'a> HeaderExtension<'a> {
|
|||
/// reversible manner within a unit of work provided by the `extending`
|
||||
/// function.
|
||||
pub struct Extension<'a> {
|
||||
header: BlockHeader,
|
||||
head: Tip,
|
||||
|
||||
header_pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
output_pmmr: PMMR<'a, Output, PMMRBackend<Output>>,
|
||||
|
@ -821,9 +817,9 @@ impl<'a> Committed for Extension<'a> {
|
|||
}
|
||||
|
||||
impl<'a> Extension<'a> {
|
||||
fn new(trees: &'a mut TxHashSet, batch: &'a Batch<'_>, header: BlockHeader) -> Extension<'a> {
|
||||
fn new(trees: &'a mut TxHashSet, batch: &'a Batch<'_>, head: Tip) -> Extension<'a> {
|
||||
Extension {
|
||||
header,
|
||||
head,
|
||||
header_pmmr: PMMR::at(
|
||||
&mut trees.header_pmmr_h.backend,
|
||||
trees.header_pmmr_h.last_pos,
|
||||
|
@ -845,6 +841,11 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// The head representing the furthest extent of the current extension.
|
||||
pub fn head(&self) -> Tip {
|
||||
self.head.clone()
|
||||
}
|
||||
|
||||
/// Build a view of the current UTXO set based on the output PMMR.
|
||||
pub fn utxo_view(&'a self) -> UTXOView<'a> {
|
||||
UTXOView::new(
|
||||
|
@ -879,8 +880,8 @@ impl<'a> Extension<'a> {
|
|||
self.apply_kernel(kernel)?;
|
||||
}
|
||||
|
||||
// Update the header on the extension to reflect the block we just applied.
|
||||
self.header = b.header.clone();
|
||||
// Update the head of the extension to reflect the block we just applied.
|
||||
self.head = Tip::from_header(&b.header);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1024,11 +1025,12 @@ impl<'a> Extension<'a> {
|
|||
/// Needed for fast-sync (utxo file needs to be rewound before sending
|
||||
/// across).
|
||||
pub fn snapshot(&mut self) -> Result<(), Error> {
|
||||
let header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
self.output_pmmr
|
||||
.snapshot(&self.header)
|
||||
.snapshot(&header)
|
||||
.map_err(|e| ErrorKind::Other(e))?;
|
||||
self.rproof_pmmr
|
||||
.snapshot(&self.header)
|
||||
.snapshot(&header)
|
||||
.map_err(|e| ErrorKind::Other(e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1036,7 +1038,7 @@ impl<'a> Extension<'a> {
|
|||
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
|
||||
/// and last kernel pos of that block.
|
||||
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||
debug!("Rewind to header {} at {}", header.hash(), header.height,);
|
||||
debug!("Rewind extension to {} at {}", header.hash(), header.height,);
|
||||
|
||||
// We need to build bitmaps of added and removed output positions
|
||||
// so we can correctly rewind all operations applied to the output MMR
|
||||
|
@ -1044,7 +1046,8 @@ impl<'a> Extension<'a> {
|
|||
// undone during rewind).
|
||||
// Rewound output pos will be removed from the MMR.
|
||||
// Rewound input (spent) pos will be added back to the MMR.
|
||||
let rewind_rm_pos = input_pos_to_rewind(header, &self.header, &self.batch)?;
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
let rewind_rm_pos = input_pos_to_rewind(header, &head_header, &self.batch)?;
|
||||
|
||||
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
|
||||
|
||||
|
@ -1055,8 +1058,8 @@ impl<'a> Extension<'a> {
|
|||
&rewind_rm_pos,
|
||||
)?;
|
||||
|
||||
// Update our header to reflect the one we rewound to.
|
||||
self.header = header.clone();
|
||||
// Update our head to reflect the header we rewound to.
|
||||
self.head = Tip::from_header(header);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1119,15 +1122,14 @@ impl<'a> Extension<'a> {
|
|||
pub fn validate_roots(&self) -> Result<(), Error> {
|
||||
// If we are validating the genesis block then we have no outputs or
|
||||
// kernels. So we are done here.
|
||||
if self.header.height == 0 {
|
||||
if self.head.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
let roots = self.roots();
|
||||
|
||||
if roots.output_root != self.header.output_root
|
||||
|| roots.rproof_root != self.header.range_proof_root
|
||||
|| roots.kernel_root != self.header.kernel_root
|
||||
if roots.output_root != head_header.output_root
|
||||
|| roots.rproof_root != head_header.range_proof_root
|
||||
|| roots.kernel_root != head_header.kernel_root
|
||||
{
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
} else {
|
||||
|
@ -1141,7 +1143,6 @@ impl<'a> Extension<'a> {
|
|||
if header.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let roots = self.roots();
|
||||
if roots.header_root != header.prev_root {
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
|
@ -1154,18 +1155,20 @@ impl<'a> Extension<'a> {
|
|||
pub fn validate_sizes(&self) -> Result<(), Error> {
|
||||
// If we are validating the genesis block then we have no outputs or
|
||||
// kernels. So we are done here.
|
||||
if self.header.height == 0 {
|
||||
if self.head.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
let (header_mmr_size, output_mmr_size, rproof_mmr_size, kernel_mmr_size) = self.sizes();
|
||||
let expected_header_mmr_size = pmmr::insertion_to_pmmr_index(self.header.height + 2) - 1;
|
||||
let expected_header_mmr_size =
|
||||
pmmr::insertion_to_pmmr_index(self.head.height + 2).saturating_sub(1);
|
||||
|
||||
if header_mmr_size != expected_header_mmr_size {
|
||||
Err(ErrorKind::InvalidMMRSize.into())
|
||||
} else if output_mmr_size != self.header.output_mmr_size {
|
||||
} else if output_mmr_size != head_header.output_mmr_size {
|
||||
Err(ErrorKind::InvalidMMRSize.into())
|
||||
} else if kernel_mmr_size != self.header.kernel_mmr_size {
|
||||
} else if kernel_mmr_size != head_header.kernel_mmr_size {
|
||||
Err(ErrorKind::InvalidMMRSize.into())
|
||||
} else if output_mmr_size != rproof_mmr_size {
|
||||
Err(ErrorKind::InvalidMMRSize.into())
|
||||
|
@ -1210,10 +1213,11 @@ impl<'a> Extension<'a> {
|
|||
pub fn validate_kernel_sums(&self) -> Result<((Commitment, Commitment)), Error> {
|
||||
let now = Instant::now();
|
||||
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
let genesis = self.get_header_by_height(0)?;
|
||||
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
|
||||
self.header.total_overage(genesis.kernel_mmr_size > 0),
|
||||
self.header.total_kernel_offset(),
|
||||
head_header.total_overage(genesis.kernel_mmr_size > 0),
|
||||
head_header.total_kernel_offset(),
|
||||
)?;
|
||||
|
||||
debug!(
|
||||
|
@ -1235,7 +1239,7 @@ impl<'a> Extension<'a> {
|
|||
self.validate_roots()?;
|
||||
self.validate_sizes()?;
|
||||
|
||||
if self.header.height == 0 {
|
||||
if self.head.height == 0 {
|
||||
let zero_commit = secp_static::commit_to_zero_value();
|
||||
return Ok((zero_commit.clone(), zero_commit.clone()));
|
||||
}
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
use self::chain::types::NoopAdapter;
|
||||
use self::chain::types::Options;
|
||||
use self::chain::Chain;
|
||||
use self::core::core::hash::Hashed;
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::Block;
|
||||
use self::core::genesis;
|
||||
use self::core::global::ChainTypes;
|
||||
use self::core::libtx::{self, reward};
|
||||
use self::core::pow::Difficulty;
|
||||
use self::core::{consensus, global, pow};
|
||||
use self::keychain::{ExtKeychainPath, Keychain};
|
||||
use self::util::RwLock;
|
||||
|
@ -36,9 +36,7 @@ pub fn clean_output_dir(dir_name: &str) {
|
|||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
|
||||
pub fn setup(dir_name: &str, genesis: Block) -> Chain {
|
||||
util::init_test_logger();
|
||||
clean_output_dir(dir_name);
|
||||
pub fn init_chain(dir_name: &str, genesis: Block) -> Chain {
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
Chain::init(
|
||||
dir_name.to_string(),
|
||||
|
@ -51,39 +49,31 @@ pub fn setup(dir_name: &str, genesis: Block) -> Chain {
|
|||
.unwrap()
|
||||
}
|
||||
|
||||
/// Mine a chain of specified length to assist with automated tests.
|
||||
/// Must call clean_output_dir at the end of your test.
|
||||
pub fn mine_chain(dir_name: &str, chain_length: u64) -> Chain {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
|
||||
// add coinbase data from the dev genesis block
|
||||
let mut genesis = genesis::genesis_dev();
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
/// Build genesis block with reward (non-empty, like we have in mainnet).
|
||||
fn genesis_block<K>(keychain: &K) -> Block
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let key_id = keychain::ExtKeychain::derive_key_id(0, 1, 0, 0, 0);
|
||||
let reward = reward::output(
|
||||
&keychain,
|
||||
&libtx::ProofBuilder::new(&keychain),
|
||||
keychain,
|
||||
&libtx::ProofBuilder::new(keychain),
|
||||
&key_id,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
genesis = genesis.with_reward(reward.0, reward.1);
|
||||
|
||||
let mut chain = setup(dir_name, pow::mine_genesis_block().unwrap());
|
||||
chain.set_txhashset_roots(&mut genesis).unwrap();
|
||||
genesis.header.output_mmr_size = 1;
|
||||
genesis.header.kernel_mmr_size = 1;
|
||||
|
||||
// get a valid PoW
|
||||
pow::pow_size(
|
||||
&mut genesis.header,
|
||||
Difficulty::unit(),
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
genesis::genesis_dev().with_reward(reward.0, reward.1)
|
||||
}
|
||||
|
||||
/// Mine a chain of specified length to assist with automated tests.
|
||||
/// Probably a good idea to call clean_output_dir at the beginning and end of each test.
|
||||
pub fn mine_chain(dir_name: &str, chain_length: u64) -> Chain {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
let genesis = genesis_block(&keychain);
|
||||
let mut chain = init_chain(dir_name, genesis.clone());
|
||||
mine_some_on_top(&mut chain, chain_length, &keychain);
|
||||
chain
|
||||
}
|
||||
|
@ -122,6 +112,29 @@ where
|
|||
.unwrap();
|
||||
b.header.pow.proof.edge_bits = edge_bits;
|
||||
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, Options::MINE).unwrap();
|
||||
|
||||
// checking our new head
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, n);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
|
||||
// now check the block_header of the head
|
||||
let header = chain.head_header().unwrap();
|
||||
assert_eq!(header.height, n);
|
||||
assert_eq!(header.hash(), bhash);
|
||||
|
||||
// now check the block itself
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
assert_eq!(block.header.height, n);
|
||||
assert_eq!(block.hash(), bhash);
|
||||
assert_eq!(block.outputs().len(), 1);
|
||||
|
||||
// now check the block height index
|
||||
let header_by_height = chain.get_header_by_height(n).unwrap();
|
||||
assert_eq!(header_by_height.hash(), bhash);
|
||||
|
||||
chain.validate(false).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,167 +12,35 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use self::chain::types::NoopAdapter;
|
||||
use self::chain::Chain;
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::{Block, BlockHeader, Transaction};
|
||||
use self::core::global::{self, ChainTypes};
|
||||
use self::core::libtx;
|
||||
use self::core::pow::{self, Difficulty};
|
||||
use self::core::{consensus, genesis};
|
||||
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
|
||||
use self::util::RwLock;
|
||||
use chrono::Duration;
|
||||
use grin_chain as chain;
|
||||
use self::core::genesis;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
mod chain_test_helper;
|
||||
|
||||
fn setup(dir_name: &str) -> Chain {
|
||||
util::init_test_logger();
|
||||
clean_output_dir(dir_name);
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis_block = pow::mine_genesis_block().unwrap();
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
verifier_cache,
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn reload_chain(dir_name: &str) -> Chain {
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis::genesis_dev(),
|
||||
pow::verify_size,
|
||||
verifier_cache,
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
use self::chain_test_helper::{clean_output_dir, init_chain, mine_chain};
|
||||
|
||||
#[test]
|
||||
fn data_files() {
|
||||
util::init_test_logger();
|
||||
|
||||
let chain_dir = ".grin_df";
|
||||
//new block so chain references should be freed
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
// Mine a few blocks on a new chain.
|
||||
{
|
||||
let chain = setup(chain_dir);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
|
||||
for n in 1..4 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward = libtx::reward::output(
|
||||
&keychain,
|
||||
&libtx::ProofBuilder::new(&keychain),
|
||||
&pk,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
.unwrap();
|
||||
b.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut b.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
chain
|
||||
.process_block(b.clone(), chain::Options::MINE)
|
||||
.unwrap();
|
||||
|
||||
chain.validate(false).unwrap();
|
||||
}
|
||||
}
|
||||
// Now reload the chain, should have valid indices
|
||||
{
|
||||
let chain = reload_chain(chain_dir);
|
||||
let chain = mine_chain(chain_dir, 4);
|
||||
chain.validate(false).unwrap();
|
||||
assert_eq!(chain.head().unwrap().height, 3);
|
||||
};
|
||||
|
||||
// Now reload the chain from existing data files and check it is valid.
|
||||
{
|
||||
let chain = init_chain(chain_dir, genesis::genesis_dev());
|
||||
chain.validate(false).unwrap();
|
||||
assert_eq!(chain.head().unwrap().height, 3);
|
||||
}
|
||||
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
fn _prepare_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
||||
let mut b = _prepare_block_nosum(kc, prev, diff, vec![]);
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn _prepare_block_tx(
|
||||
kc: &ExtKeychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = _prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn _prepare_fork_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
||||
let mut b = _prepare_block_nosum(kc, prev, diff, vec![]);
|
||||
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn _prepare_fork_block_tx(
|
||||
kc: &ExtKeychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = _prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn _prepare_block_nosum(
|
||||
kc: &ExtKeychain,
|
||||
prev: &BlockHeader,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
|
||||
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward =
|
||||
libtx::reward::output(kc, &libtx::ProofBuilder::new(kc), &key_id, fees, false).unwrap();
|
||||
let mut b = match core::core::Block::new(
|
||||
prev,
|
||||
txs.into_iter().cloned().collect(),
|
||||
Difficulty::from_num(diff),
|
||||
reward,
|
||||
) {
|
||||
Err(e) => panic!("{:?}", e),
|
||||
Ok(b) => b,
|
||||
};
|
||||
b.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
b.header.pow.total_difficulty = Difficulty::from_num(diff);
|
||||
b
|
||||
}
|
||||
|
|
|
@ -17,9 +17,8 @@ use self::chain::Chain;
|
|||
use self::core::core::hash::Hashed;
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::{Block, BlockHeader, OutputIdentifier, Transaction};
|
||||
use self::core::genesis;
|
||||
use self::core::global::ChainTypes;
|
||||
use self::core::libtx::{self, build, reward, ProofBuilder};
|
||||
use self::core::libtx::{self, build, ProofBuilder};
|
||||
use self::core::pow::Difficulty;
|
||||
use self::core::{consensus, global, pow};
|
||||
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
|
||||
|
@ -30,27 +29,11 @@ use grin_chain::{BlockStatus, ChainAdapter, Options};
|
|||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
mod chain_test_helper;
|
||||
|
||||
fn setup(dir_name: &str, genesis: Block) -> Chain {
|
||||
util::init_test_logger();
|
||||
clean_output_dir(dir_name);
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis,
|
||||
pow::verify_size,
|
||||
verifier_cache,
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
use self::chain_test_helper::{clean_output_dir, init_chain, mine_chain};
|
||||
|
||||
/// Adapter to retrieve last status
|
||||
pub struct StatusAdapter {
|
||||
|
@ -89,118 +72,20 @@ fn setup_with_status_adapter(dir_name: &str, genesis: Block, adapter: Arc<Status
|
|||
|
||||
#[test]
|
||||
fn mine_empty_chain() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
{
|
||||
mine_some_on_top(".grin", pow::mine_genesis_block().unwrap(), &keychain);
|
||||
}
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(".grin");
|
||||
let chain_dir = ".grin.empty";
|
||||
clean_output_dir(chain_dir);
|
||||
let chain = mine_chain(chain_dir, 1);
|
||||
assert_eq!(chain.head().unwrap().height, 0);
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mine_genesis_reward_chain() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
|
||||
// add coinbase data from the dev genesis block
|
||||
let mut genesis = genesis::genesis_dev();
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
let key_id = keychain::ExtKeychain::derive_key_id(0, 1, 0, 0, 0);
|
||||
let reward = reward::output(
|
||||
&keychain,
|
||||
&libtx::ProofBuilder::new(&keychain),
|
||||
&key_id,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
genesis = genesis.with_reward(reward.0, reward.1);
|
||||
|
||||
let tmp_chain_dir = ".grin.tmp";
|
||||
{
|
||||
// setup a tmp chain to hande tx hashsets
|
||||
let tmp_chain = setup(tmp_chain_dir, pow::mine_genesis_block().unwrap());
|
||||
tmp_chain.set_txhashset_roots(&mut genesis).unwrap();
|
||||
genesis.header.output_mmr_size = 1;
|
||||
genesis.header.kernel_mmr_size = 1;
|
||||
}
|
||||
|
||||
// get a valid PoW
|
||||
pow::pow_size(
|
||||
&mut genesis.header,
|
||||
Difficulty::unit(),
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
mine_some_on_top(".grin.genesis", genesis, &keychain);
|
||||
// Cleanup chain directories
|
||||
clean_output_dir(tmp_chain_dir);
|
||||
clean_output_dir(".grin.genesis");
|
||||
}
|
||||
|
||||
fn mine_some_on_top<K>(dir: &str, genesis: Block, keychain: &K)
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let chain = setup(dir, genesis);
|
||||
|
||||
for n in 1..4 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward =
|
||||
libtx::reward::output(keychain, &libtx::ProofBuilder::new(keychain), &pk, 0, false)
|
||||
.unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
.unwrap();
|
||||
b.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
|
||||
let edge_bits = if n == 2 {
|
||||
global::min_edge_bits() + 1
|
||||
} else {
|
||||
global::min_edge_bits()
|
||||
};
|
||||
b.header.pow.proof.edge_bits = edge_bits;
|
||||
pow::pow_size(
|
||||
&mut b.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
edge_bits,
|
||||
)
|
||||
.unwrap();
|
||||
b.header.pow.proof.edge_bits = edge_bits;
|
||||
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, chain::Options::MINE).unwrap();
|
||||
|
||||
// checking our new head
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, n);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
|
||||
// now check the block_header of the head
|
||||
let header = chain.head_header().unwrap();
|
||||
assert_eq!(header.height, n);
|
||||
assert_eq!(header.hash(), bhash);
|
||||
|
||||
// now check the block itself
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
assert_eq!(block.header.height, n);
|
||||
assert_eq!(block.hash(), bhash);
|
||||
assert_eq!(block.outputs().len(), 1);
|
||||
|
||||
// now check the block height index
|
||||
let header_by_height = chain.get_header_by_height(n).unwrap();
|
||||
assert_eq!(header_by_height.hash(), bhash);
|
||||
|
||||
chain.validate(false).unwrap();
|
||||
}
|
||||
fn mine_short_chain() {
|
||||
let chain_dir = ".grin.genesis";
|
||||
clean_output_dir(chain_dir);
|
||||
let chain = mine_chain(chain_dir, 4);
|
||||
assert_eq!(chain.head().unwrap().height, 3);
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -279,7 +164,7 @@ fn mine_reorg() {
|
|||
fn mine_forks() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
{
|
||||
let chain = setup(".grin2", pow::mine_genesis_block().unwrap());
|
||||
let chain = init_chain(".grin2", pow::mine_genesis_block().unwrap());
|
||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||
|
||||
// add a first block to not fork genesis
|
||||
|
@ -294,9 +179,6 @@ fn mine_forks() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
let b1 = prepare_block(&kc, &prev, &chain, 3 * n);
|
||||
|
||||
// 2nd block with higher difficulty for other branch
|
||||
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
|
||||
|
||||
// process the first block to extend the chain
|
||||
let bhash = b1.hash();
|
||||
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
|
||||
|
@ -307,6 +189,9 @@ fn mine_forks() {
|
|||
assert_eq!(head.last_block_h, bhash);
|
||||
assert_eq!(head.prev_block_h, prev.hash());
|
||||
|
||||
// 2nd block with higher difficulty for other branch
|
||||
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
|
||||
|
||||
// process the 2nd block to build a fork with more work
|
||||
let bhash = b2.hash();
|
||||
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
||||
|
@ -327,7 +212,7 @@ fn mine_losing_fork() {
|
|||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||
{
|
||||
let chain = setup(".grin3", pow::mine_genesis_block().unwrap());
|
||||
let chain = init_chain(".grin3", pow::mine_genesis_block().unwrap());
|
||||
|
||||
// add a first block we'll be forking from
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
@ -367,7 +252,7 @@ fn longer_fork() {
|
|||
// then send back on the 1st
|
||||
let genesis = pow::mine_genesis_block().unwrap();
|
||||
{
|
||||
let chain = setup(".grin4", genesis.clone());
|
||||
let chain = init_chain(".grin4", genesis.clone());
|
||||
|
||||
// add blocks to both chains, 20 on the main one, only the first 5
|
||||
// for the forked chain
|
||||
|
@ -406,8 +291,11 @@ fn longer_fork() {
|
|||
fn spend_in_fork_and_compact() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
util::init_test_logger();
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(".grin6");
|
||||
|
||||
{
|
||||
let chain = setup(".grin6", pow::mine_genesis_block().unwrap());
|
||||
let chain = init_chain(".grin6", pow::mine_genesis_block().unwrap());
|
||||
let prev = chain.head_header().unwrap();
|
||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let pb = ProofBuilder::new(&kc);
|
||||
|
@ -541,7 +429,7 @@ fn spend_in_fork_and_compact() {
|
|||
fn output_header_mappings() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
{
|
||||
let chain = setup(
|
||||
let chain = init_chain(
|
||||
".grin_header_for_output",
|
||||
pow::mine_genesis_block().unwrap(),
|
||||
);
|
||||
|
@ -635,7 +523,7 @@ where
|
|||
K: Keychain,
|
||||
{
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, vec![]);
|
||||
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
|
@ -650,7 +538,7 @@ where
|
|||
K: Keychain,
|
||||
{
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
|
|
|
@ -12,87 +12,46 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use self::chain::{Error, Tip};
|
||||
use self::core::core::hash::Hashed;
|
||||
use self::core::core::Block;
|
||||
use self::core::global::{self, ChainTypes};
|
||||
use self::core::libtx;
|
||||
use self::core::pow::{self, Difficulty};
|
||||
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
|
||||
use env_logger;
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use grin_util as util;
|
||||
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
mod chain_test_helper;
|
||||
|
||||
fn setup_chain(genesis: &Block, chain_store: Arc<chain::store::ChainStore>) -> Result<(), Error> {
|
||||
let batch = chain_store.batch()?;
|
||||
batch.save_block_header(&genesis.header)?;
|
||||
batch.save_block(&genesis)?;
|
||||
let head = Tip::from_header(&genesis.header);
|
||||
batch.save_head(&head)?;
|
||||
batch.save_block_header(&genesis.header)?;
|
||||
batch.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
use self::chain_test_helper::{clean_output_dir, mine_chain};
|
||||
|
||||
#[test]
|
||||
fn test_various_store_indices() {
|
||||
match env_logger::try_init() {
|
||||
Ok(_) => println!("Initializing env logger"),
|
||||
Err(e) => println!("env logger already initialized: {:?}", e),
|
||||
};
|
||||
fn test_store_indices() {
|
||||
util::init_test_logger();
|
||||
|
||||
let chain_dir = ".grin_idx_1";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
let chain = mine_chain(chain_dir, 4);
|
||||
|
||||
let chain_store = Arc::new(chain::store::ChainStore::new(chain_dir).unwrap());
|
||||
// Check head exists in the db.
|
||||
assert_eq!(chain.head().unwrap().height, 3);
|
||||
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis = pow::mine_genesis_block().unwrap();
|
||||
// Check the header exists in the db.
|
||||
assert_eq!(chain.head_header().unwrap().height, 3);
|
||||
|
||||
setup_chain(&genesis, chain_store.clone()).unwrap();
|
||||
// Check header_by_height index.
|
||||
let block_header = chain.get_header_by_height(3).unwrap();
|
||||
let block_hash = block_header.hash();
|
||||
assert_eq!(block_hash, chain.head().unwrap().last_block_h);
|
||||
|
||||
let reward = libtx::reward::output(
|
||||
&keychain,
|
||||
&libtx::ProofBuilder::new(&keychain),
|
||||
&key_id,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
|
||||
let block_hash = block.hash();
|
||||
|
||||
{
|
||||
let batch = chain_store.batch().unwrap();
|
||||
batch.save_block_header(&block.header).unwrap();
|
||||
batch.save_block(&block).unwrap();
|
||||
batch.commit().unwrap();
|
||||
}
|
||||
|
||||
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
||||
assert_eq!(block_header.hash(), block_hash);
|
||||
|
||||
// Test we can retrive the block from the db and that we can safely delete the
|
||||
// block from the db even though the block_sums are missing.
|
||||
{
|
||||
// Block exists in the db.
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
assert_eq!(chain.get_block(&block_hash).unwrap().hash(), block_hash);
|
||||
|
||||
// Block sums do not exist (we never set them up).
|
||||
assert!(chain_store.get_block_sums(&block_hash).is_err());
|
||||
// Check we have block_sums in the db.
|
||||
assert!(chain.get_block_sums(&block_hash).is_ok());
|
||||
|
||||
{
|
||||
// Start a new batch and delete the block.
|
||||
let batch = chain_store.batch().unwrap();
|
||||
let store = chain.store();
|
||||
let batch = store.batch().unwrap();
|
||||
assert!(batch.delete_block(&block_hash).is_ok());
|
||||
|
||||
// Block is deleted within this batch.
|
||||
|
@ -100,8 +59,9 @@ fn test_various_store_indices() {
|
|||
}
|
||||
|
||||
// Check the batch did not commit any changes to the store .
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
assert!(chain.get_block(&block_hash).is_ok());
|
||||
}
|
||||
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
|
|
@ -18,8 +18,10 @@ use self::chain_test_helper::{clean_output_dir, mine_chain};
|
|||
|
||||
#[test]
|
||||
fn test() {
|
||||
let chain = mine_chain(".txhashset_archive_test", 35);
|
||||
let chain_dir = ".txhashset_archive_test";
|
||||
clean_output_dir(chain_dir);
|
||||
let chain = mine_chain(chain_dir, 35);
|
||||
let header = chain.txhashset_archive_header().unwrap();
|
||||
assert_eq!(10, header.height);
|
||||
clean_output_dir(".txhashset_archive_test");
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,8 @@ impl ChainAdapter {
|
|||
let batch = s.batch().unwrap();
|
||||
|
||||
batch.save_block_header(header).unwrap();
|
||||
batch.save_head(&tip).unwrap();
|
||||
batch.save_body_head(&tip).unwrap();
|
||||
batch.save_header_head(&tip).unwrap();
|
||||
|
||||
// Retrieve previous block_sums from the db.
|
||||
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) {
|
||||
|
|
|
@ -109,13 +109,7 @@ where
|
|||
pub fn read(&self, position: u64) -> Option<T> {
|
||||
match self.file.read_as_elmt(position - 1) {
|
||||
Ok(x) => Some(x),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Corrupted storage, could not read an entry from data file: {:?}",
|
||||
e
|
||||
);
|
||||
None
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue