mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
run rustfmt against chain crate
suspect it has not been running successfully for a while...
This commit is contained in:
parent
ed88ad8bbc
commit
bcebe7b5b3
5 changed files with 64 additions and 37 deletions
|
@ -250,7 +250,7 @@ impl Chain {
|
|||
// block got accepted but we did not extend the head
|
||||
// so its on a fork (or is the start of a new fork)
|
||||
// broadcast the block out so everyone knows about the fork
|
||||
// broadcast the block
|
||||
// broadcast the block
|
||||
self.adapter.block_accepted(&b, opts);
|
||||
|
||||
Ok((None, Some(b)))
|
||||
|
@ -379,7 +379,7 @@ impl Chain {
|
|||
trace!(
|
||||
LOGGER,
|
||||
"chain: done check_orphans at {}. # remaining orphans {}",
|
||||
height-1,
|
||||
height - 1,
|
||||
self.orphans.len(),
|
||||
);
|
||||
}
|
||||
|
@ -541,12 +541,7 @@ impl Chain {
|
|||
/// If we're willing to accept that new state, the data stream will be
|
||||
/// read as a zip file, unzipped and the resulting state files should be
|
||||
/// rewound to the provided indexes.
|
||||
pub fn txhashset_write<T>(
|
||||
&self,
|
||||
h: Hash,
|
||||
txhashset_data: File,
|
||||
status: &T,
|
||||
) -> Result<(), Error>
|
||||
pub fn txhashset_write<T>(&self, h: Hash, txhashset_data: File, status: &T) -> Result<(), Error>
|
||||
where
|
||||
T: TxHashsetWriteStatus,
|
||||
{
|
||||
|
@ -566,7 +561,10 @@ impl Chain {
|
|||
// Validate against a read-only extension first.
|
||||
// The kernel history validation requires a read-only extension
|
||||
// due to the internal rewind behavior.
|
||||
debug!(LOGGER, "chain: txhashset_write: rewinding and validating (read-only)");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"chain: txhashset_write: rewinding and validating (read-only)"
|
||||
);
|
||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||
extension.rewind(&header, &header)?;
|
||||
extension.validate(&header, false, status)?;
|
||||
|
@ -579,7 +577,10 @@ impl Chain {
|
|||
})?;
|
||||
|
||||
// all good, prepare a new batch and update all the required records
|
||||
debug!(LOGGER, "chain: txhashset_write: rewinding a 2nd time (writeable)");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"chain: txhashset_write: rewinding a 2nd time (writeable)"
|
||||
);
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
||||
extension.rewind(&header, &header)?;
|
||||
|
@ -587,7 +588,10 @@ impl Chain {
|
|||
Ok(())
|
||||
})?;
|
||||
|
||||
debug!(LOGGER, "chain: txhashset_write: finished validating and rebuilding");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"chain: txhashset_write: finished validating and rebuilding"
|
||||
);
|
||||
|
||||
status.on_save();
|
||||
// replace the chain txhashset with the newly built one
|
||||
|
@ -605,7 +609,10 @@ impl Chain {
|
|||
}
|
||||
batch.commit()?;
|
||||
|
||||
debug!(LOGGER, "chain: txhashset_write: finished committing the batch (head etc.)");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"chain: txhashset_write: finished committing the batch (head etc.)"
|
||||
);
|
||||
|
||||
self.check_orphans(header.height + 1);
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ extern crate serde;
|
|||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate failure;
|
||||
extern crate chrono;
|
||||
extern crate failure;
|
||||
#[macro_use]
|
||||
extern crate failure_derive;
|
||||
|
||||
|
|
|
@ -17,9 +17,10 @@
|
|||
use std::collections::VecDeque;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use chrono::prelude::{Utc};
|
||||
use chrono::prelude::Utc;
|
||||
use chrono::Duration;
|
||||
|
||||
use chain::OrphanBlockPool;
|
||||
use core::consensus;
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::target::Difficulty;
|
||||
|
@ -29,7 +30,6 @@ use error::{Error, ErrorKind};
|
|||
use grin_store;
|
||||
use store;
|
||||
use txhashset;
|
||||
use chain::{OrphanBlockPool};
|
||||
use types::{Options, Tip};
|
||||
use util::LOGGER;
|
||||
|
||||
|
@ -218,8 +218,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
}
|
||||
|
||||
// TODO: remove CI check from here somehow
|
||||
if header.timestamp
|
||||
> Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
||||
if header.timestamp > Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
||||
&& !global::is_automated_testing_mode()
|
||||
{
|
||||
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
||||
|
|
|
@ -197,7 +197,8 @@ impl ChainStore {
|
|||
// the full block from the db (if the block is found).
|
||||
// (bool, Bitmap) : (false if bitmap was built and not found in db)
|
||||
fn get_block_input_bitmap_db(&self, bh: &Hash) -> Result<(bool, Bitmap), Error> {
|
||||
if let Ok(Some(bytes)) = self.db
|
||||
if let Ok(Some(bytes)) = self
|
||||
.db
|
||||
.get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
|
||||
{
|
||||
Ok((true, Bitmap::deserialize(&bytes)))
|
||||
|
@ -442,7 +443,8 @@ impl Iterator for DifficultyIter {
|
|||
if let Some(header) = self.header.clone() {
|
||||
self.prev_header = self.store.get_block_header(&header.previous).ok();
|
||||
|
||||
let prev_difficulty = self.prev_header
|
||||
let prev_difficulty = self
|
||||
.prev_header
|
||||
.clone()
|
||||
.map_or(Difficulty::zero(), |x| x.total_difficulty);
|
||||
let difficulty = header.total_difficulty - prev_difficulty;
|
||||
|
|
|
@ -449,11 +449,7 @@ impl<'a> Extension<'a> {
|
|||
kernel_pos: u64,
|
||||
rewind_rm_pos: &Bitmap,
|
||||
) -> Result<(), Error> {
|
||||
self.rewind_to_pos(
|
||||
output_pos,
|
||||
kernel_pos,
|
||||
rewind_rm_pos,
|
||||
)?;
|
||||
self.rewind_to_pos(output_pos, kernel_pos, rewind_rm_pos)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -466,7 +462,10 @@ impl<'a> Extension<'a> {
|
|||
/// new tx).
|
||||
pub fn apply_raw_tx(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||
// This should *never* be called on a writeable extension...
|
||||
assert!(self.rollback, "applied raw_tx to writeable txhashset extension");
|
||||
assert!(
|
||||
self.rollback,
|
||||
"applied raw_tx to writeable txhashset extension"
|
||||
);
|
||||
|
||||
// Checkpoint the MMR positions before we apply the new tx,
|
||||
// anything goes wrong we will rewind to these positions.
|
||||
|
@ -474,7 +473,8 @@ impl<'a> Extension<'a> {
|
|||
let kernel_pos = self.kernel_pmmr.unpruned_size();
|
||||
|
||||
// Build bitmap of output pos spent (as inputs) by this tx for rewind.
|
||||
let rewind_rm_pos = tx.inputs
|
||||
let rewind_rm_pos = tx
|
||||
.inputs
|
||||
.iter()
|
||||
.filter_map(|x| self.get_output_pos(&x.commitment()).ok())
|
||||
.map(|x| x as u32)
|
||||
|
@ -671,7 +671,8 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
}
|
||||
// push new outputs in their MMR and save them in the index
|
||||
let pos = self.output_pmmr
|
||||
let pos = self
|
||||
.output_pmmr
|
||||
.push(OutputIdentifier::from_output(out))
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
self.batch.save_output_pos(&out.commitment(), pos)?;
|
||||
|
@ -716,7 +717,8 @@ impl<'a> Extension<'a> {
|
|||
|
||||
// then calculate the Merkle Proof based on the known pos
|
||||
let pos = self.batch.get_output_pos(&output.commit)?;
|
||||
let merkle_proof = self.output_pmmr
|
||||
let merkle_proof = self
|
||||
.output_pmmr
|
||||
.merkle_proof(pos)
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
|
||||
|
@ -1028,7 +1030,10 @@ impl<'a> Extension<'a> {
|
|||
/// fast sync where a reorg past the horizon could allow a whole rewrite of
|
||||
/// the kernel set.
|
||||
pub fn validate_kernel_history(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||
assert!(self.rollback, "verified kernel history on writeable txhashset extension");
|
||||
assert!(
|
||||
self.rollback,
|
||||
"verified kernel history on writeable txhashset extension"
|
||||
);
|
||||
|
||||
let mut current = header.clone();
|
||||
loop {
|
||||
|
@ -1049,7 +1054,6 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Packages the txhashset data files into a zip and returns a Read to the
|
||||
|
@ -1066,7 +1070,7 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
fs::remove_dir_all(&temp_txhashset_path)?;
|
||||
}
|
||||
// Copy file to another dir
|
||||
file::copy_dir_to(&txhashset_path,&temp_txhashset_path)?;
|
||||
file::copy_dir_to(&txhashset_path, &temp_txhashset_path)?;
|
||||
// Check and remove file that are not supposed to be there
|
||||
check_and_remove_files(&temp_txhashset_path, header)?;
|
||||
// Compress zip
|
||||
|
@ -1081,7 +1085,11 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
|
||||
/// Extract the txhashset data from a zip file and writes the content into the
|
||||
/// txhashset storage dir
|
||||
pub fn zip_write(root_dir: String, txhashset_data: File, header: &BlockHeader) -> Result<(), Error> {
|
||||
pub fn zip_write(
|
||||
root_dir: String,
|
||||
txhashset_data: File,
|
||||
header: &BlockHeader,
|
||||
) -> Result<(), Error> {
|
||||
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
||||
fs::create_dir_all(txhashset_path.clone())?;
|
||||
zip::decompress(txhashset_data, &txhashset_path)
|
||||
|
@ -1115,7 +1123,10 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
|||
|
||||
// Removing unexpected directories if needed
|
||||
if !dir_difference.is_empty() {
|
||||
debug!(LOGGER, "Unexpected folder(s) found in txhashset folder, removing.");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Unexpected folder(s) found in txhashset folder, removing."
|
||||
);
|
||||
for diff in dir_difference {
|
||||
let diff_path = txhashset_path.join(diff);
|
||||
file::delete(diff_path)?;
|
||||
|
@ -1126,9 +1137,13 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
|||
let pmmr_files_expected: HashSet<_> = PMMR_FILES
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|s| if s.contains("pmmr_leaf.bin") {
|
||||
format!("{}.{}", s, header.hash())}
|
||||
else {String::from(s) })
|
||||
.map(|s| {
|
||||
if s.contains("pmmr_leaf.bin") {
|
||||
format!("{}.{}", s, header.hash())
|
||||
} else {
|
||||
String::from(s)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let subdirectories = fs::read_dir(txhashset_path)?;
|
||||
|
@ -1149,7 +1164,11 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
|||
.cloned()
|
||||
.collect();
|
||||
if !difference.is_empty() {
|
||||
debug!(LOGGER, "Unexpected file(s) found in txhashset subfolder {:?}, removing.", &subdirectory_path);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
|
||||
&subdirectory_path
|
||||
);
|
||||
for diff in difference {
|
||||
let diff_path = subdirectory_path.join(diff);
|
||||
file::delete(diff_path)?;
|
||||
|
|
Loading…
Reference in a new issue