Merge branch master into milestone/2.x.x
19
Cargo.lock
generated
|
@ -927,7 +927,7 @@ dependencies = [
|
|||
"serde_derive 1.0.93 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"walkdir 2.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"zeroize 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"zip 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1322,15 +1322,6 @@ dependencies = [
|
|||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "msdos_time"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ncurses"
|
||||
version = "5.99.0"
|
||||
|
@ -2693,12 +2684,11 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zip"
|
||||
version = "0.4.2"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"msdos_time 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"podio 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[metadata]
|
||||
|
@ -2822,7 +2812,6 @@ dependencies = [
|
|||
"checksum mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)" = "83f51996a3ed004ef184e16818edc51fadffe8e7ca68be67f9dee67d84d0ff23"
|
||||
"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125"
|
||||
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
|
||||
"checksum msdos_time 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aad9dfe950c057b1bfe9c1f2aa51583a8468ef2a5baba2ebbe06d775efeb7729"
|
||||
"checksum ncurses 5.99.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15699bee2f37e9f8828c7b35b2bc70d13846db453f2d507713b758fabe536b82"
|
||||
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
|
||||
"checksum nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce"
|
||||
|
@ -2980,4 +2969,4 @@ dependencies = [
|
|||
"checksum yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d"
|
||||
"checksum zeroize 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e2ea4afc22e9497e26b42bf047083c30f7e3ca566f3bcd7187f83d18b327043"
|
||||
"checksum zeroize_derive 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afd1469e4bbca3b96606d26ba6e9bd6d3aed3b1299c82b92ec94377d22d78dbc"
|
||||
"checksum zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "36b9e08fb518a65cf7e08a1e482573eb87a2f4f8c6619316612a3c1f162fe822"
|
||||
"checksum zip 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c18fc320faf909036e46ac785ea827f72e485304877faf1a3a39538d3714dbc3"
|
||||
|
|
|
@ -34,6 +34,7 @@ failure_derive = "0.1"
|
|||
|
||||
grin_api = { path = "./api", version = "2.0.1-beta.1" }
|
||||
grin_config = { path = "./config", version = "2.0.1-beta.1" }
|
||||
grin_chain = { path = "./chain", version = "2.0.1-beta.1" }
|
||||
grin_core = { path = "./core", version = "2.0.1-beta.1" }
|
||||
grin_keychain = { path = "./keychain", version = "2.0.1-beta.1" }
|
||||
grin_p2p = { path = "./p2p", version = "2.0.1-beta.1" }
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
use super::utils::w;
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::Transaction;
|
||||
use crate::core::ser;
|
||||
use crate::core::ser::{self, ProtocolVersion};
|
||||
use crate::pool;
|
||||
use crate::rest::*;
|
||||
use crate::router::{Handler, ResponseFuture};
|
||||
|
@ -64,7 +64,6 @@ impl PoolPushHandler {
|
|||
|
||||
let fluff = params.get("fluff").is_some();
|
||||
let pool_arc = match w(&self.tx_pool) {
|
||||
//w(&self.tx_pool).clone();
|
||||
Ok(p) => p,
|
||||
Err(e) => return Box::new(err(e)),
|
||||
};
|
||||
|
@ -76,14 +75,14 @@ impl PoolPushHandler {
|
|||
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
||||
})
|
||||
.and_then(move |tx_bin| {
|
||||
ser::deserialize(&mut &tx_bin[..])
|
||||
// TODO - pass protocol version in via the api call?
|
||||
let version = ProtocolVersion::local();
|
||||
|
||||
ser::deserialize(&mut &tx_bin[..], version)
|
||||
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
||||
})
|
||||
.and_then(move |tx: Transaction| {
|
||||
let source = pool::TxSource {
|
||||
debug_name: "push-api".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
let source = pool::TxSource::PushApi;
|
||||
info!(
|
||||
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
|
||||
tx.hash(),
|
||||
|
|
|
@ -54,7 +54,7 @@ impl TxHashSetHandler {
|
|||
Ok(TxHashSetNode::get_last_n_output(w(&self.chain)?, distance))
|
||||
}
|
||||
|
||||
// gets last n outputs inserted in to the tree
|
||||
// gets last n rangeproofs inserted in to the tree
|
||||
fn get_last_n_rangeproof(&self, distance: u64) -> Result<Vec<TxHashSetNode>, Error> {
|
||||
Ok(TxHashSetNode::get_last_n_rangeproof(
|
||||
w(&self.chain)?,
|
||||
|
@ -62,7 +62,7 @@ impl TxHashSetHandler {
|
|||
))
|
||||
}
|
||||
|
||||
// gets last n outputs inserted in to the tree
|
||||
// gets last n kernels inserted in to the tree
|
||||
fn get_last_n_kernel(&self, distance: u64) -> Result<Vec<TxHashSetNode>, Error> {
|
||||
Ok(TxHashSetNode::get_last_n_kernel(w(&self.chain)?, distance))
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ pub struct Status {
|
|||
impl Status {
|
||||
pub fn from_tip_and_peers(current_tip: chain::Tip, connections: u32) -> Status {
|
||||
Status {
|
||||
protocol_version: p2p::msg::ProtocolVersion::default().into(),
|
||||
protocol_version: ser::ProtocolVersion::local().into(),
|
||||
user_agent: p2p::msg::USER_AGENT.to_string(),
|
||||
connections: connections,
|
||||
tip: Tip::from_tip(current_tip),
|
||||
|
|
|
@ -23,7 +23,7 @@ use crate::core::core::{
|
|||
};
|
||||
use crate::core::global;
|
||||
use crate::core::pow;
|
||||
use crate::core::ser::{Readable, StreamingReader};
|
||||
use crate::core::ser::{ProtocolVersion, Readable, StreamingReader};
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use crate::pipe;
|
||||
use crate::store;
|
||||
|
@ -386,7 +386,6 @@ impl Chain {
|
|||
verifier_cache: self.verifier_cache.clone(),
|
||||
txhashset,
|
||||
batch,
|
||||
orphans: self.orphans.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -647,7 +646,7 @@ impl Chain {
|
|||
/// TODO - Write this data to disk and validate the rebuilt kernel MMR.
|
||||
pub fn kernel_data_write(&self, reader: &mut Read) -> Result<(), Error> {
|
||||
let mut count = 0;
|
||||
let mut stream = StreamingReader::new(reader, Duration::from_secs(1));
|
||||
let mut stream = StreamingReader::new(reader, ProtocolVersion::local());
|
||||
while let Ok(_kernel) = TxKernelEntry::read(&mut stream) {
|
||||
count += 1;
|
||||
}
|
||||
|
@ -685,6 +684,27 @@ impl Chain {
|
|||
))
|
||||
}
|
||||
|
||||
/// To support the ability to download the txhashset from multiple peers in parallel,
|
||||
/// the peers must all agree on the exact binary representation of the txhashset.
|
||||
/// This means compacting and rewinding to the exact same header.
|
||||
/// Since compaction is a heavy operation, peers can agree to compact every 12 hours,
|
||||
/// and no longer support requesting arbitrary txhashsets.
|
||||
/// Here we return the header of the txhashset we are currently offering to peers.
|
||||
pub fn txhashset_archive_header(&self) -> Result<BlockHeader, Error> {
|
||||
let sync_threshold = global::state_sync_threshold() as u64;
|
||||
let body_head = self.head()?;
|
||||
let archive_interval = global::txhashset_archive_interval();
|
||||
let mut txhashset_height = body_head.height.saturating_sub(sync_threshold);
|
||||
txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval);
|
||||
|
||||
debug!(
|
||||
"txhashset_archive_header: body_head - {}, {}, txhashset height - {}",
|
||||
body_head.last_block_h, body_head.height, txhashset_height,
|
||||
);
|
||||
|
||||
self.get_header_by_height(txhashset_height)
|
||||
}
|
||||
|
||||
// Special handling to make sure the whole kernel set matches each of its
|
||||
// roots in each block header, without truncation. We go back header by
|
||||
// header, rewind and check each root. This fixes a potential weakness in
|
||||
|
|
|
@ -140,6 +140,9 @@ pub enum ErrorKind {
|
|||
/// Internal Roaring Bitmap error
|
||||
#[fail(display = "Roaring Bitmap error")]
|
||||
Bitmap,
|
||||
/// Error during chain sync
|
||||
#[fail(display = "Sync error")]
|
||||
SyncError(String),
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
|
|
|
@ -45,4 +45,6 @@ pub mod types;
|
|||
pub use crate::chain::{Chain, MAX_ORPHAN_SIZE};
|
||||
pub use crate::error::{Error, ErrorKind};
|
||||
pub use crate::store::ChainStore;
|
||||
pub use crate::types::{BlockStatus, ChainAdapter, Options, Tip, TxHashsetWriteStatus};
|
||||
pub use crate::types::{
|
||||
BlockStatus, ChainAdapter, Options, SyncState, SyncStatus, Tip, TxHashsetWriteStatus,
|
||||
};
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
||||
|
||||
use crate::chain::OrphanBlockPool;
|
||||
use crate::core::consensus;
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
|
@ -45,8 +44,6 @@ pub struct BlockContext<'a> {
|
|||
pub batch: store::Batch<'a>,
|
||||
/// The verifier cache (caching verifier for rangeproofs and kernel signatures)
|
||||
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
/// Recent orphan blocks to avoid double-processing
|
||||
pub orphans: Arc<OrphanBlockPool>,
|
||||
}
|
||||
|
||||
/// Process a block header as part of processing a full block.
|
||||
|
@ -75,10 +72,9 @@ fn process_header_for_block(
|
|||
|
||||
// Check if we already know about this block for various reasons
|
||||
// from cheapest to most expensive (delay hitting the db until last).
|
||||
fn check_known(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
check_known_head(&block.header, ctx)?;
|
||||
check_known_orphans(&block.header, ctx)?;
|
||||
check_known_store(&block.header, ctx)?;
|
||||
fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
check_known_head(header, ctx)?;
|
||||
check_known_store(header, ctx)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -99,7 +95,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
);
|
||||
|
||||
// Check if we have already processed this block previously.
|
||||
check_known(b, ctx)?;
|
||||
check_known(&b.header, ctx)?;
|
||||
|
||||
// Delay hitting the db for current chain head until we know
|
||||
// this block is not already known.
|
||||
|
@ -260,19 +256,11 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
|
|||
header.height,
|
||||
); // keep this
|
||||
|
||||
check_header_known(header, ctx)?;
|
||||
validate_header(header, ctx)?;
|
||||
Ok(())
|
||||
}
|
||||
// Check if this header is already "known" from processing a previous block.
|
||||
// Note: We are looking for a full block based on this header, not just the header itself.
|
||||
check_known(header, ctx)?;
|
||||
|
||||
/// Quick in-memory check to fast-reject any block header we've already handled
|
||||
/// recently. Keeps duplicates from the network in check.
|
||||
/// ctx here is specific to the header_head (tip of the header chain)
|
||||
fn check_header_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
let header_head = ctx.batch.header_head()?;
|
||||
if header.hash() == header_head.last_block_h || header.hash() == header_head.prev_block_h {
|
||||
return Err(ErrorKind::Unfit("header already known".to_string()).into());
|
||||
}
|
||||
validate_header(header, ctx)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -288,15 +276,6 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if this block is in the set of known orphans.
|
||||
fn check_known_orphans(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
if ctx.orphans.contains(&header.hash()) {
|
||||
Err(ErrorKind::Unfit("already known in orphans".to_string()).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this block is in the store already.
|
||||
fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
match ctx.batch.block_exists(&header.hash()) {
|
||||
|
@ -450,10 +429,8 @@ fn verify_coinbase_maturity(block: &Block, ext: &txhashset::Extension<'_>) -> Re
|
|||
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
||||
/// of the new block.
|
||||
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension<'_>) -> Result<(), Error> {
|
||||
// TODO - this is 2 db calls, can we optimize this?
|
||||
// Retrieve the block_sums for the previous block.
|
||||
let prev = ext.batch.get_previous_header(&b.header)?;
|
||||
let block_sums = ext.batch.get_block_sums(&prev.hash())?;
|
||||
let block_sums = ext.batch.get_block_sums(&b.header.prev_hash)?;
|
||||
|
||||
// Overage is based purely on the new block.
|
||||
// Previous block_sums have taken all previous overage into account.
|
||||
|
|
|
@ -32,8 +32,7 @@ use crate::util::secp::pedersen::{Commitment, RangeProof};
|
|||
use crate::util::{file, secp_static, zip};
|
||||
use croaring::Bitmap;
|
||||
use grin_store;
|
||||
use grin_store::pmmr::{clean_files_by_prefix, PMMRBackend, PMMR_FILES};
|
||||
use std::collections::HashSet;
|
||||
use grin_store::pmmr::{clean_files_by_prefix, PMMRBackend};
|
||||
use std::fs::{self, File};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
@ -1432,10 +1431,10 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
} else {
|
||||
// clean up old zips.
|
||||
// Theoretically, we only need clean-up those zip files older than STATE_SYNC_THRESHOLD.
|
||||
// But practically, these zip files are not small ones, we just keep the zips in last one hour
|
||||
// But practically, these zip files are not small ones, we just keep the zips in last 24 hours
|
||||
let data_dir = Path::new(&root_dir);
|
||||
let pattern = format!("{}_", TXHASHSET_ZIP);
|
||||
if let Ok(n) = clean_files_by_prefix(data_dir.clone(), &pattern, 60 * 60) {
|
||||
if let Ok(n) = clean_files_by_prefix(data_dir.clone(), &pattern, 24 * 60 * 60) {
|
||||
debug!(
|
||||
"{} zip files have been clean up in folder: {:?}",
|
||||
n, data_dir
|
||||
|
@ -1457,11 +1456,13 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
}
|
||||
// Copy file to another dir
|
||||
file::copy_dir_to(&txhashset_path, &temp_txhashset_path)?;
|
||||
// Check and remove file that are not supposed to be there
|
||||
check_and_remove_files(&temp_txhashset_path, header)?;
|
||||
// Compress zip
|
||||
zip::compress(&temp_txhashset_path, &File::create(zip_path.clone())?)
|
||||
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
||||
|
||||
let zip_file = File::create(zip_path.clone())?;
|
||||
|
||||
// Explicit list of files to add to our zip archive.
|
||||
let files = file_list(header);
|
||||
|
||||
zip::create_zip(&zip_file, &temp_txhashset_path, files)?;
|
||||
|
||||
temp_txhashset_path
|
||||
};
|
||||
|
@ -1480,6 +1481,30 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
Ok(zip_file)
|
||||
}
|
||||
|
||||
// Explicit list of files to extract from our zip archive.
|
||||
// We include *only* these files when building the txhashset zip.
|
||||
// We extract *only* these files when receiving a txhashset zip.
|
||||
// Everything else will be safely ignored.
|
||||
// Return Vec<PathBuf> as some of these are dynamic (specifically the "rewound" leaf files).
|
||||
fn file_list(header: &BlockHeader) -> Vec<PathBuf> {
|
||||
vec![
|
||||
// kernel MMR
|
||||
PathBuf::from("kernel/pmmr_data.bin"),
|
||||
PathBuf::from("kernel/pmmr_hash.bin"),
|
||||
// output MMR
|
||||
PathBuf::from("output/pmmr_data.bin"),
|
||||
PathBuf::from("output/pmmr_hash.bin"),
|
||||
PathBuf::from("output/pmmr_prun.bin"),
|
||||
// rangeproof MMR
|
||||
PathBuf::from("rangeproof/pmmr_data.bin"),
|
||||
PathBuf::from("rangeproof/pmmr_hash.bin"),
|
||||
PathBuf::from("rangeproof/pmmr_prun.bin"),
|
||||
// Header specific "rewound" leaf files for output and rangeproof MMR.
|
||||
PathBuf::from(format!("output/pmmr_leaf.bin.{}", header.hash())),
|
||||
PathBuf::from(format!("rangeproof/pmmr_leaf.bin.{}", header.hash())),
|
||||
]
|
||||
}
|
||||
|
||||
/// Extract the txhashset data from a zip file and writes the content into the
|
||||
/// txhashset storage dir
|
||||
pub fn zip_write(
|
||||
|
@ -1489,10 +1514,17 @@ pub fn zip_write(
|
|||
) -> Result<(), Error> {
|
||||
debug!("zip_write on path: {:?}", root_dir);
|
||||
let txhashset_path = root_dir.clone().join(TXHASHSET_SUBDIR);
|
||||
fs::create_dir_all(txhashset_path.clone())?;
|
||||
zip::decompress(txhashset_data, &txhashset_path, expected_file)
|
||||
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
||||
check_and_remove_files(&txhashset_path, header)
|
||||
fs::create_dir_all(&txhashset_path)?;
|
||||
|
||||
// Explicit list of files to extract from our zip archive.
|
||||
let files = file_list(header);
|
||||
|
||||
// We expect to see *exactly* the paths listed above.
|
||||
// No attempt is made to be permissive or forgiving with "alternative" paths.
|
||||
// These are the *only* files we will attempt to extract from the zip file.
|
||||
// If any of these are missing we will attempt to continue as some are potentially optional.
|
||||
zip::extract_files(txhashset_data, &txhashset_path, files)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Overwrite txhashset folders in "to" folder with "from" folder
|
||||
|
@ -1537,112 +1569,6 @@ pub fn clean_header_folder(root_dir: &PathBuf) {
|
|||
}
|
||||
}
|
||||
|
||||
fn expected_file(path: &Path) -> bool {
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
let s_path = path.to_str().unwrap_or_else(|| "");
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(
|
||||
format!(
|
||||
r#"^({}|{}|{})((/|\\)pmmr_(hash|data|leaf|prun)\.bin(\.\w*)?)?$"#,
|
||||
OUTPUT_SUBDIR, KERNEL_SUBDIR, RANGE_PROOF_SUBDIR
|
||||
)
|
||||
.as_str()
|
||||
)
|
||||
.expect("invalid txhashset regular expression");
|
||||
}
|
||||
RE.is_match(&s_path)
|
||||
}
|
||||
|
||||
/// Check a txhashset directory and remove any unexpected
|
||||
fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Result<(), Error> {
|
||||
// First compare the subdirectories
|
||||
let subdirectories_expected: HashSet<_> = [OUTPUT_SUBDIR, KERNEL_SUBDIR, RANGE_PROOF_SUBDIR]
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|s| String::from(s))
|
||||
.collect();
|
||||
|
||||
let subdirectories_found: HashSet<_> = fs::read_dir(txhashset_path)?
|
||||
.filter_map(|entry| {
|
||||
entry.ok().and_then(|e| {
|
||||
e.path()
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str().map(|s| String::from(s)))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let dir_difference: Vec<String> = subdirectories_found
|
||||
.difference(&subdirectories_expected)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
// Removing unexpected directories if needed
|
||||
if !dir_difference.is_empty() {
|
||||
debug!("Unexpected folder(s) found in txhashset folder, removing.");
|
||||
for diff in dir_difference {
|
||||
let diff_path = txhashset_path.join(diff);
|
||||
file::delete(diff_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Then compare the files found in the subdirectories
|
||||
let pmmr_files_expected: HashSet<_> = PMMR_FILES
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|s| {
|
||||
if s.contains("pmmr_leaf.bin") {
|
||||
format!("{}.{}", s, header.hash())
|
||||
} else {
|
||||
String::from(s)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let subdirectories = fs::read_dir(txhashset_path)?;
|
||||
for subdirectory in subdirectories {
|
||||
let subdirectory_path = subdirectory?.path();
|
||||
let pmmr_files = fs::read_dir(&subdirectory_path)?;
|
||||
let pmmr_files_found: HashSet<_> = pmmr_files
|
||||
.filter_map(|entry| {
|
||||
entry.ok().and_then(|e| {
|
||||
e.path()
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str().map(|s| String::from(s)))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let difference: Vec<String> = pmmr_files_found
|
||||
.difference(&pmmr_files_expected)
|
||||
.cloned()
|
||||
.collect();
|
||||
let mut removed = 0;
|
||||
if !difference.is_empty() {
|
||||
for diff in &difference {
|
||||
let diff_path = subdirectory_path.join(diff);
|
||||
match file::delete(diff_path.clone()) {
|
||||
Err(e) => error!(
|
||||
"check_and_remove_files: fail to remove file '{:?}', Err: {:?}",
|
||||
diff_path, e,
|
||||
),
|
||||
Ok(_) => {
|
||||
removed += 1;
|
||||
trace!("check_and_remove_files: file '{:?}' removed", diff_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"{} tmp file(s) found in txhashset subfolder {:?}, {} removed.",
|
||||
difference.len(),
|
||||
&subdirectory_path,
|
||||
removed,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Given a block header to rewind to and the block header at the
|
||||
/// head of the current chain state, we need to calculate the positions
|
||||
/// of all inputs (spent outputs) we need to "undo" during a rewind.
|
||||
|
@ -1694,23 +1620,3 @@ pub fn input_pos_to_rewind(
|
|||
|
||||
bitmap_fast_or(None, &mut block_input_bitmaps).ok_or_else(|| ErrorKind::Bitmap.into())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_expected_files() {
|
||||
assert!(!expected_file(Path::new("kernels")));
|
||||
assert!(!expected_file(Path::new("xkernel")));
|
||||
assert!(expected_file(Path::new("kernel")));
|
||||
assert!(expected_file(Path::new("kernel\\pmmr_data.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_hash.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_leaf.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_prun.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_leaf.bin.deadbeef")));
|
||||
assert!(!expected_file(Path::new("xkernel/pmmr_data.bin")));
|
||||
assert!(!expected_file(Path::new("kernel/pmmrx_data.bin")));
|
||||
assert!(!expected_file(Path::new("kernel/pmmr_data.binx")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,10 +14,15 @@
|
|||
|
||||
//! Base types that the block chain pipeline requires.
|
||||
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use crate::core::core::{Block, BlockHeader};
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::ser;
|
||||
use crate::error::Error;
|
||||
use crate::util::RwLock;
|
||||
|
||||
bitflags! {
|
||||
/// Options for block validation
|
||||
|
@ -33,6 +38,171 @@ bitflags! {
|
|||
}
|
||||
}
|
||||
|
||||
/// Various status sync can be in, whether it's fast sync or archival.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum SyncStatus {
|
||||
/// Initial State (we do not yet know if we are/should be syncing)
|
||||
Initial,
|
||||
/// Not syncing
|
||||
NoSync,
|
||||
/// Not enough peers to do anything yet, boolean indicates whether
|
||||
/// we should wait at all or ignore and start ASAP
|
||||
AwaitingPeers(bool),
|
||||
/// Downloading block headers
|
||||
HeaderSync {
|
||||
current_height: u64,
|
||||
highest_height: u64,
|
||||
},
|
||||
/// Downloading the various txhashsets
|
||||
TxHashsetDownload {
|
||||
start_time: DateTime<Utc>,
|
||||
prev_update_time: DateTime<Utc>,
|
||||
update_time: DateTime<Utc>,
|
||||
prev_downloaded_size: u64,
|
||||
downloaded_size: u64,
|
||||
total_size: u64,
|
||||
},
|
||||
/// Setting up before validation
|
||||
TxHashsetSetup,
|
||||
/// Validating the full state
|
||||
TxHashsetValidation {
|
||||
kernels: u64,
|
||||
kernel_total: u64,
|
||||
rproofs: u64,
|
||||
rproof_total: u64,
|
||||
},
|
||||
/// Finalizing the new state
|
||||
TxHashsetSave,
|
||||
/// State sync finalized
|
||||
TxHashsetDone,
|
||||
/// Downloading blocks
|
||||
BodySync {
|
||||
current_height: u64,
|
||||
highest_height: u64,
|
||||
},
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
/// Current sync state. Encapsulates the current SyncStatus.
|
||||
pub struct SyncState {
|
||||
current: RwLock<SyncStatus>,
|
||||
sync_error: Arc<RwLock<Option<Error>>>,
|
||||
}
|
||||
|
||||
impl SyncState {
|
||||
/// Return a new SyncState initialize to NoSync
|
||||
pub fn new() -> SyncState {
|
||||
SyncState {
|
||||
current: RwLock::new(SyncStatus::Initial),
|
||||
sync_error: Arc::new(RwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the current state matches any active syncing operation.
|
||||
/// Note: This includes our "initial" state.
|
||||
pub fn is_syncing(&self) -> bool {
|
||||
*self.current.read() != SyncStatus::NoSync
|
||||
}
|
||||
|
||||
/// Current syncing status
|
||||
pub fn status(&self) -> SyncStatus {
|
||||
*self.current.read()
|
||||
}
|
||||
|
||||
/// Update the syncing status
|
||||
pub fn update(&self, new_status: SyncStatus) {
|
||||
if self.status() == new_status {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut status = self.current.write();
|
||||
|
||||
debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
|
||||
|
||||
*status = new_status;
|
||||
}
|
||||
|
||||
/// Update txhashset downloading progress
|
||||
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
|
||||
if let SyncStatus::TxHashsetDownload { .. } = new_status {
|
||||
let mut status = self.current.write();
|
||||
*status = new_status;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Communicate sync error
|
||||
pub fn set_sync_error(&self, error: Error) {
|
||||
*self.sync_error.write() = Some(error);
|
||||
}
|
||||
|
||||
/// Get sync error
|
||||
pub fn sync_error(&self) -> Arc<RwLock<Option<Error>>> {
|
||||
Arc::clone(&self.sync_error)
|
||||
}
|
||||
|
||||
/// Clear sync error
|
||||
pub fn clear_sync_error(&self) {
|
||||
*self.sync_error.write() = None;
|
||||
}
|
||||
}
|
||||
|
||||
impl TxHashsetWriteStatus for SyncState {
|
||||
fn on_setup(&self) {
|
||||
self.update(SyncStatus::TxHashsetSetup);
|
||||
}
|
||||
|
||||
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
|
||||
let mut status = self.current.write();
|
||||
match *status {
|
||||
SyncStatus::TxHashsetValidation {
|
||||
kernels,
|
||||
kernel_total,
|
||||
rproofs,
|
||||
rproof_total,
|
||||
} => {
|
||||
let ks = if vkernels > 0 { vkernels } else { kernels };
|
||||
let kt = if vkernel_total > 0 {
|
||||
vkernel_total
|
||||
} else {
|
||||
kernel_total
|
||||
};
|
||||
let rps = if vrproofs > 0 { vrproofs } else { rproofs };
|
||||
let rpt = if vrproof_total > 0 {
|
||||
vrproof_total
|
||||
} else {
|
||||
rproof_total
|
||||
};
|
||||
*status = SyncStatus::TxHashsetValidation {
|
||||
kernels: ks,
|
||||
kernel_total: kt,
|
||||
rproofs: rps,
|
||||
rproof_total: rpt,
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
*status = SyncStatus::TxHashsetValidation {
|
||||
kernels: 0,
|
||||
kernel_total: 0,
|
||||
rproofs: 0,
|
||||
rproof_total: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_save(&self) {
|
||||
self.update(SyncStatus::TxHashsetSave);
|
||||
}
|
||||
|
||||
fn on_done(&self) {
|
||||
self.update(SyncStatus::TxHashsetDone);
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper to hold the roots of the txhashset in order to keep them
|
||||
/// readable.
|
||||
#[derive(Debug)]
|
||||
|
|
127
chain/tests/chain_test_helper.rs
Normal file
|
@ -0,0 +1,127 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use self::chain::types::NoopAdapter;
|
||||
use self::chain::types::Options;
|
||||
use self::chain::Chain;
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::Block;
|
||||
use self::core::genesis;
|
||||
use self::core::global::ChainTypes;
|
||||
use self::core::libtx::{self, reward};
|
||||
use self::core::pow::Difficulty;
|
||||
use self::core::{consensus, global, pow};
|
||||
use self::keychain::{ExtKeychainPath, Keychain};
|
||||
use self::util::RwLock;
|
||||
use chrono::Duration;
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
|
||||
pub fn setup(dir_name: &str, genesis: Block) -> Chain {
|
||||
util::init_test_logger();
|
||||
clean_output_dir(dir_name);
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
Chain::init(
|
||||
dir_name.to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis,
|
||||
pow::verify_size,
|
||||
verifier_cache,
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Mine a chain of specified length to assist with automated tests.
|
||||
/// Must call clean_output_dir at the end of your test.
|
||||
pub fn mine_chain(dir_name: &str, chain_length: u64) -> Chain {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
|
||||
// add coinbase data from the dev genesis block
|
||||
let mut genesis = genesis::genesis_dev();
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
let key_id = keychain::ExtKeychain::derive_key_id(0, 1, 0, 0, 0);
|
||||
let reward = reward::output(
|
||||
&keychain,
|
||||
&libtx::ProofBuilder::new(&keychain),
|
||||
&key_id,
|
||||
0,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
genesis = genesis.with_reward(reward.0, reward.1);
|
||||
|
||||
let mut chain = setup(dir_name, pow::mine_genesis_block().unwrap());
|
||||
chain.set_txhashset_roots(&mut genesis).unwrap();
|
||||
genesis.header.output_mmr_size = 1;
|
||||
genesis.header.kernel_mmr_size = 1;
|
||||
|
||||
// get a valid PoW
|
||||
pow::pow_size(
|
||||
&mut genesis.header,
|
||||
Difficulty::unit(),
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
mine_some_on_top(&mut chain, chain_length, &keychain);
|
||||
chain
|
||||
}
|
||||
|
||||
fn mine_some_on_top<K>(chain: &mut Chain, chain_length: u64, keychain: &K)
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
for n in 1..chain_length {
|
||||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward =
|
||||
libtx::reward::output(keychain, &libtx::ProofBuilder::new(keychain), &pk, 0, false)
|
||||
.unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
.unwrap();
|
||||
b.header.timestamp = prev.timestamp + Duration::seconds(160);
|
||||
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut b).unwrap();
|
||||
|
||||
let edge_bits = if n == 2 {
|
||||
global::min_edge_bits() + 1
|
||||
} else {
|
||||
global::min_edge_bits()
|
||||
};
|
||||
b.header.pow.proof.edge_bits = edge_bits;
|
||||
pow::pow_size(
|
||||
&mut b.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
edge_bits,
|
||||
)
|
||||
.unwrap();
|
||||
b.header.pow.proof.edge_bits = edge_bits;
|
||||
|
||||
chain.process_block(b, Options::MINE).unwrap();
|
||||
}
|
||||
}
|
|
@ -55,94 +55,65 @@ fn test_unexpected_zip() {
|
|||
Path::new(&db_root).join(format!("txhashset_zip_{}", head.hash().to_string())),
|
||||
);
|
||||
// Then add strange files in the original txhashset folder
|
||||
write_file(db_root.clone());
|
||||
File::create(&Path::new(&db_root).join("txhashset").join("badfile"))
|
||||
.expect("problem creating a file");
|
||||
File::create(
|
||||
&Path::new(&db_root)
|
||||
.join("txhashset")
|
||||
.join("output")
|
||||
.join("badfile"),
|
||||
)
|
||||
.expect("problem creating a file");
|
||||
|
||||
let files = file::list_files(&Path::new(&db_root).join("txhashset"));
|
||||
let expected_files: Vec<_> = vec![
|
||||
"badfile",
|
||||
"kernel/pmmr_data.bin",
|
||||
"kernel/pmmr_hash.bin",
|
||||
"kernel/pmmr_size.bin",
|
||||
"output/badfile",
|
||||
"output/pmmr_data.bin",
|
||||
"output/pmmr_hash.bin",
|
||||
"rangeproof/pmmr_data.bin",
|
||||
"rangeproof/pmmr_hash.bin",
|
||||
];
|
||||
assert_eq!(
|
||||
files,
|
||||
expected_files
|
||||
.iter()
|
||||
.map(|x| PathBuf::from(x))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
assert!(txhashset::zip_read(db_root.clone(), &head).is_ok());
|
||||
// Check that the temp dir dos not contains the strange files
|
||||
let txhashset_zip_path =
|
||||
Path::new(&db_root).join(format!("txhashset_zip_{}", head.hash().to_string()));
|
||||
assert!(txhashset_contains_expected_files(
|
||||
format!("txhashset_zip_{}", head.hash().to_string()),
|
||||
txhashset_zip_path.clone()
|
||||
));
|
||||
let _ = fs::remove_dir_all(
|
||||
Path::new(&db_root).join(format!("txhashset_zip_{}", head.hash().to_string())),
|
||||
);
|
||||
|
||||
let zip_file = File::open(zip_path).unwrap();
|
||||
assert!(txhashset::zip_write(PathBuf::from(db_root.clone()), zip_file, &head).is_ok());
|
||||
// Check that the txhashset dir dos not contains the strange files
|
||||
let txhashset_path = Path::new(&db_root).join("txhashset");
|
||||
assert!(txhashset_contains_expected_files(
|
||||
"txhashset".to_string(),
|
||||
txhashset_path.clone()
|
||||
));
|
||||
let _ = fs::remove_dir_all(Path::new(&db_root).join("txhashset"));
|
||||
assert!(txhashset::zip_write(PathBuf::from(db_root.clone()), zip_file, &head).is_ok());
|
||||
|
||||
// Check that the new txhashset dir contains *only* the expected files
|
||||
// No "badfiles" and no "size" file.
|
||||
let files = file::list_files(&Path::new(&db_root).join("txhashset"));
|
||||
let expected_files: Vec<_> = vec![
|
||||
"kernel/pmmr_data.bin",
|
||||
"kernel/pmmr_hash.bin",
|
||||
"output/pmmr_data.bin",
|
||||
"output/pmmr_hash.bin",
|
||||
"rangeproof/pmmr_data.bin",
|
||||
"rangeproof/pmmr_hash.bin",
|
||||
];
|
||||
assert_eq!(
|
||||
files,
|
||||
expected_files
|
||||
.iter()
|
||||
.map(|x| PathBuf::from(x))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(&db_root);
|
||||
}
|
||||
|
||||
fn write_file(db_root: String) {
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(
|
||||
Path::new(&db_root)
|
||||
.join("txhashset")
|
||||
.join("kernel")
|
||||
.join("strange0"),
|
||||
)
|
||||
.unwrap();
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(Path::new(&db_root).join("txhashset").join("strange1"))
|
||||
.unwrap();
|
||||
fs::create_dir(Path::new(&db_root).join("txhashset").join("strange_dir")).unwrap();
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(
|
||||
Path::new(&db_root)
|
||||
.join("txhashset")
|
||||
.join("strange_dir")
|
||||
.join("strange2"),
|
||||
)
|
||||
.unwrap();
|
||||
fs::create_dir(
|
||||
Path::new(&db_root)
|
||||
.join("txhashset")
|
||||
.join("strange_dir")
|
||||
.join("strange_subdir"),
|
||||
)
|
||||
.unwrap();
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(
|
||||
Path::new(&db_root)
|
||||
.join("txhashset")
|
||||
.join("strange_dir")
|
||||
.join("strange_subdir")
|
||||
.join("strange3"),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn txhashset_contains_expected_files(dirname: String, path_buf: PathBuf) -> bool {
|
||||
let list_zip_files = file::list_files(path_buf.into_os_string().into_string().unwrap());
|
||||
let zip_files_hashset: HashSet<_> = HashSet::from_iter(list_zip_files.iter().cloned());
|
||||
let expected_files = vec![
|
||||
dirname,
|
||||
"output".to_string(),
|
||||
"rangeproof".to_string(),
|
||||
"kernel".to_string(),
|
||||
"pmmr_hash.bin".to_string(),
|
||||
"pmmr_data.bin".to_string(),
|
||||
];
|
||||
let expected_files_hashset = HashSet::from_iter(expected_files.iter().cloned());
|
||||
let intersection: HashSet<_> = zip_files_hashset
|
||||
.difference(&expected_files_hashset)
|
||||
.collect();
|
||||
intersection.is_empty()
|
||||
}
|
||||
|
|
25
chain/tests/test_txhashset_archive.rs
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod chain_test_helper;
|
||||
|
||||
use self::chain_test_helper::{clean_output_dir, mine_chain};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let chain = mine_chain(".txhashset_archive_test", 35);
|
||||
let header = chain.txhashset_archive_header().unwrap();
|
||||
assert_eq!(10, header.height);
|
||||
clean_output_dir(".txhashset_archive_test");
|
||||
}
|
|
@ -210,6 +210,14 @@ fn comments() -> HashMap<String, String> {
|
|||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"always_stem_our_txs".to_string(),
|
||||
"
|
||||
#always stem our (pushed via api) txs regardless of stem/fluff epoch (as per Dandelion++ paper)
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"[server.p2p_config]".to_string(),
|
||||
"#test miner wallet URL (burns if this doesn't exist)
|
||||
|
|
|
@ -359,7 +359,7 @@ impl BlockHeader {
|
|||
pub fn pre_pow(&self) -> Vec<u8> {
|
||||
let mut header_buf = vec![];
|
||||
{
|
||||
let mut writer = ser::BinWriter::new(&mut header_buf);
|
||||
let mut writer = ser::BinWriter::default(&mut header_buf);
|
||||
self.write_pre_pow(&mut writer).unwrap();
|
||||
self.pow.write_pre_pow(&mut writer).unwrap();
|
||||
writer.write_u64(self.pow.nonce).unwrap();
|
||||
|
|
|
@ -25,7 +25,9 @@ use std::{fmt, ops};
|
|||
|
||||
use crate::blake2::blake2b::Blake2b;
|
||||
|
||||
use crate::ser::{self, AsFixedBytes, Error, FixedLength, Readable, Reader, Writeable, Writer};
|
||||
use crate::ser::{
|
||||
self, AsFixedBytes, Error, FixedLength, ProtocolVersion, Readable, Reader, Writeable, Writer,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
|
||||
|
@ -219,6 +221,10 @@ impl ser::Writer for HashWriter {
|
|||
self.state.update(b32.as_ref());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn protocol_version(&self) -> ProtocolVersion {
|
||||
ProtocolVersion::local()
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for types that have a canonical hash
|
||||
|
|
|
@ -78,14 +78,14 @@ impl MerkleProof {
|
|||
/// Serialize the Merkle proof as a hex string (for api json endpoints)
|
||||
pub fn to_hex(&self) -> String {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
util::to_hex(vec)
|
||||
}
|
||||
|
||||
/// Convert hex string representation back to a Merkle proof instance
|
||||
pub fn from_hex(hex: &str) -> Result<MerkleProof, String> {
|
||||
let bytes = util::from_hex(hex.to_string()).unwrap();
|
||||
let res = ser::deserialize(&mut &bytes[..])
|
||||
let res = ser::deserialize_default(&mut &bytes[..])
|
||||
.map_err(|_| "failed to deserialize a Merkle Proof".to_string())?;
|
||||
Ok(res)
|
||||
}
|
||||
|
|
|
@ -185,13 +185,19 @@ hashable_ord!(TxKernel);
|
|||
impl ::std::hash::Hash for TxKernel {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
::std::hash::Hash::hash(&vec, state);
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for TxKernel {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
// We have access to the protocol version here.
|
||||
// This may be a protocol version based on a peer connection
|
||||
// or the version used locally for db storage.
|
||||
// We can handle version specific serialization here.
|
||||
let _version = writer.protocol_version();
|
||||
|
||||
self.features.write(writer)?;
|
||||
ser_multiwrite!(writer, [write_u64, self.fee], [write_u64, self.lock_height]);
|
||||
self.excess.write(writer)?;
|
||||
|
@ -202,6 +208,12 @@ impl Writeable for TxKernel {
|
|||
|
||||
impl Readable for TxKernel {
|
||||
fn read(reader: &mut dyn Reader) -> Result<TxKernel, ser::Error> {
|
||||
// We have access to the protocol version here.
|
||||
// This may be a protocol version based on a peer connection
|
||||
// or the version used locally for db storage.
|
||||
// We can handle version specific deserialization here.
|
||||
let _version = reader.protocol_version();
|
||||
|
||||
Ok(TxKernel {
|
||||
features: KernelFeatures::read(reader)?,
|
||||
fee: reader.read_u64()?,
|
||||
|
@ -338,7 +350,7 @@ impl Writeable for TxKernelEntry {
|
|||
}
|
||||
|
||||
impl Readable for TxKernelEntry {
|
||||
fn read(reader: &mut Reader) -> Result<TxKernelEntry, ser::Error> {
|
||||
fn read(reader: &mut dyn Reader) -> Result<TxKernelEntry, ser::Error> {
|
||||
let kernel = TxKernel::read(reader)?;
|
||||
Ok(TxKernelEntry { kernel })
|
||||
}
|
||||
|
@ -1159,7 +1171,7 @@ hashable_ord!(Input);
|
|||
impl ::std::hash::Hash for Input {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
::std::hash::Hash::hash(&vec, state);
|
||||
}
|
||||
}
|
||||
|
@ -1270,7 +1282,7 @@ hashable_ord!(Output);
|
|||
impl ::std::hash::Hash for Output {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
::std::hash::Hash::hash(&vec, state);
|
||||
}
|
||||
}
|
||||
|
@ -1522,8 +1534,8 @@ mod test {
|
|||
};
|
||||
|
||||
let mut vec = vec![];
|
||||
ser::serialize(&mut vec, &kernel).expect("serialized failed");
|
||||
let kernel2: TxKernel = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &kernel).expect("serialized failed");
|
||||
let kernel2: TxKernel = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
assert_eq!(kernel2.features, KernelFeatures::Plain);
|
||||
assert_eq!(kernel2.lock_height, 0);
|
||||
assert_eq!(kernel2.excess, commit);
|
||||
|
@ -1540,8 +1552,8 @@ mod test {
|
|||
};
|
||||
|
||||
let mut vec = vec![];
|
||||
ser::serialize(&mut vec, &kernel).expect("serialized failed");
|
||||
let kernel2: TxKernel = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &kernel).expect("serialized failed");
|
||||
let kernel2: TxKernel = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
assert_eq!(kernel2.features, KernelFeatures::HeightLocked);
|
||||
assert_eq!(kernel2.lock_height, 100);
|
||||
assert_eq!(kernel2.excess, commit);
|
||||
|
|
|
@ -288,13 +288,13 @@ pub fn genesis_main() -> core::Block {
|
|||
mod test {
|
||||
use super::*;
|
||||
use crate::core::hash::Hashed;
|
||||
use crate::ser;
|
||||
use crate::ser::{self, ProtocolVersion};
|
||||
|
||||
#[test]
|
||||
fn floonet_genesis_hash() {
|
||||
let gen_hash = genesis_floo().hash();
|
||||
println!("floonet genesis hash: {}", gen_hash.to_hex());
|
||||
let gen_bin = ser::ser_vec(&genesis_floo()).unwrap();
|
||||
let gen_bin = ser::ser_vec(&genesis_floo(), ProtocolVersion(1)).unwrap();
|
||||
println!("floonet genesis full hash: {}\n", gen_bin.hash().to_hex());
|
||||
assert_eq!(
|
||||
gen_hash.to_hex(),
|
||||
|
@ -310,7 +310,7 @@ mod test {
|
|||
fn mainnet_genesis_hash() {
|
||||
let gen_hash = genesis_main().hash();
|
||||
println!("mainnet genesis hash: {}", gen_hash.to_hex());
|
||||
let gen_bin = ser::ser_vec(&genesis_main()).unwrap();
|
||||
let gen_bin = ser::ser_vec(&genesis_main(), ProtocolVersion(1)).unwrap();
|
||||
println!("mainnet genesis full hash: {}\n", gen_bin.hash().to_hex());
|
||||
assert_eq!(
|
||||
gen_hash.to_hex(),
|
||||
|
|
|
@ -35,6 +35,13 @@ use crate::util::RwLock;
|
|||
/// Define these here, as they should be developer-set, not really tweakable
|
||||
/// by users
|
||||
|
||||
/// The default "local" protocol version for this node.
|
||||
/// We negotiate compatible versions with each peer via Hand/Shake.
|
||||
/// Note: We also use a specific (possible different) protocol version
|
||||
/// for both the backend database and MMR data files.
|
||||
/// This one is p2p layer specific.
|
||||
pub const PROTOCOL_VERSION: u32 = 1;
|
||||
|
||||
/// Automated testing edge_bits
|
||||
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 9;
|
||||
|
||||
|
@ -54,7 +61,10 @@ pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
|
|||
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
|
||||
|
||||
/// Testing cut through horizon in blocks
|
||||
pub const TESTING_CUT_THROUGH_HORIZON: u32 = 70;
|
||||
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
|
||||
|
||||
/// Testing cut through horizon in blocks
|
||||
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
|
||||
|
||||
/// Testing state sync threshold in blocks
|
||||
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
|
||||
|
@ -85,6 +95,12 @@ pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
|
|||
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
|
||||
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
|
||||
|
||||
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
|
||||
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
|
||||
|
||||
/// Number of blocks to reuse a txhashset zip for.
|
||||
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
|
||||
|
||||
/// Types of chain a server can run with, dictates the genesis block and
|
||||
/// and mining parameters used.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
|
@ -254,8 +270,8 @@ pub fn max_block_weight() -> usize {
|
|||
pub fn cut_through_horizon() -> u32 {
|
||||
let param_ref = CHAIN_TYPE.read();
|
||||
match *param_ref {
|
||||
ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON,
|
||||
ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON,
|
||||
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
|
||||
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
|
||||
_ => CUT_THROUGH_HORIZON,
|
||||
}
|
||||
}
|
||||
|
@ -270,6 +286,16 @@ pub fn state_sync_threshold() -> u32 {
|
|||
}
|
||||
}
|
||||
|
||||
/// Number of blocks to reuse a txhashset zip for.
|
||||
pub fn txhashset_archive_interval() -> u64 {
|
||||
let param_ref = CHAIN_TYPE.read();
|
||||
match *param_ref {
|
||||
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
|
||||
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
|
||||
_ => TXHASHSET_ARCHIVE_INTERVAL,
|
||||
}
|
||||
}
|
||||
|
||||
/// Are we in automated testing mode?
|
||||
pub fn is_automated_testing_mode() -> bool {
|
||||
let param_ref = CHAIN_TYPE.read();
|
||||
|
|
|
@ -73,7 +73,6 @@ pub fn verify_size(bh: &BlockHeader) -> Result<(), Error> {
|
|||
pub fn mine_genesis_block() -> Result<Block, Error> {
|
||||
let mut gen = genesis::genesis_dev();
|
||||
if global::is_user_testing_mode() || global::is_automated_testing_mode() {
|
||||
gen = genesis::genesis_dev();
|
||||
gen.header.timestamp = Utc::now();
|
||||
}
|
||||
|
||||
|
|
135
core/src/ser.rs
|
@ -20,8 +20,8 @@
|
|||
//! `serialize` or `deserialize` functions on them as appropriate.
|
||||
|
||||
use crate::core::hash::{DefaultHashable, Hash, Hashed};
|
||||
use crate::global::PROTOCOL_VERSION;
|
||||
use crate::keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
||||
use crate::util::read_write::read_exact;
|
||||
use crate::util::secp::constants::{
|
||||
AGG_SIGNATURE_SIZE, COMPRESSED_PUBLIC_KEY_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
|
||||
SECRET_KEY_SIZE,
|
||||
|
@ -31,11 +31,10 @@ use crate::util::secp::pedersen::{Commitment, RangeProof};
|
|||
use crate::util::secp::Signature;
|
||||
use crate::util::secp::{ContextFlag, Secp256k1};
|
||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
||||
use std::fmt::Debug;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::marker;
|
||||
use std::time::Duration;
|
||||
use std::{cmp, error, fmt};
|
||||
use std::{cmp, error};
|
||||
|
||||
/// Possible errors deriving from serializing or deserializing.
|
||||
#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
|
@ -135,6 +134,9 @@ pub trait Writer {
|
|||
/// The mode this serializer is writing in
|
||||
fn serialization_mode(&self) -> SerializationMode;
|
||||
|
||||
/// Protocol version for version specific serialization rules.
|
||||
fn protocol_version(&self) -> ProtocolVersion;
|
||||
|
||||
/// Writes a u8 as bytes
|
||||
fn write_u8(&mut self, n: u8) -> Result<(), Error> {
|
||||
self.write_fixed_bytes(&[n])
|
||||
|
@ -209,6 +211,9 @@ pub trait Reader {
|
|||
/// Consumes a byte from the reader, producing an error if it doesn't have
|
||||
/// the expected value
|
||||
fn expect_u8(&mut self, val: u8) -> Result<u8, Error>;
|
||||
/// Access to underlying protocol version to support
|
||||
/// version specific deserialization logic.
|
||||
fn protocol_version(&self) -> ProtocolVersion;
|
||||
}
|
||||
|
||||
/// Trait that every type that can be serialized as binary must implement.
|
||||
|
@ -275,6 +280,55 @@ where
|
|||
Ok(res)
|
||||
}
|
||||
|
||||
/// Protocol version for serialization/deserialization.
|
||||
/// Note: This is used in various places including but limited to
|
||||
/// the p2p layer and our local db storage layer.
|
||||
/// We may speak multiple versions to various peers and a potentially *different*
|
||||
/// version for our local db.
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialOrd, PartialEq, Serialize)]
|
||||
pub struct ProtocolVersion(pub u32);
|
||||
|
||||
impl ProtocolVersion {
|
||||
/// Our default "local" protocol version.
|
||||
pub fn local() -> ProtocolVersion {
|
||||
ProtocolVersion(PROTOCOL_VERSION)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ProtocolVersion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl ProtocolVersion {
|
||||
/// We need to specify a protocol version for our local database.
|
||||
/// Regardless of specific version used when sending/receiving data between peers
|
||||
/// we need to take care with serialization/deserialization of data locally in the db.
|
||||
pub fn local_db() -> ProtocolVersion {
|
||||
ProtocolVersion(1)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProtocolVersion> for u32 {
|
||||
fn from(v: ProtocolVersion) -> u32 {
|
||||
v.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for ProtocolVersion {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
writer.write_u32(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for ProtocolVersion {
|
||||
fn read(reader: &mut dyn Reader) -> Result<ProtocolVersion, Error> {
|
||||
let version = reader.read_u32()?;
|
||||
Ok(ProtocolVersion(version))
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait that every type that can be deserialized from binary must implement.
|
||||
/// Reads directly to a Reader, a utility type thinly wrapping an
|
||||
/// underlying Read implementation.
|
||||
|
@ -287,28 +341,53 @@ where
|
|||
}
|
||||
|
||||
/// Deserializes a Readable from any std::io::Read implementation.
|
||||
pub fn deserialize<T: Readable>(source: &mut dyn Read) -> Result<T, Error> {
|
||||
let mut reader = BinReader { source };
|
||||
pub fn deserialize<T: Readable>(
|
||||
source: &mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<T, Error> {
|
||||
let mut reader = BinReader::new(source, version);
|
||||
T::read(&mut reader)
|
||||
}
|
||||
|
||||
/// Deserialize a Readable based on our default "local" protocol version.
|
||||
pub fn deserialize_default<T: Readable>(source: &mut dyn Read) -> Result<T, Error> {
|
||||
deserialize(source, ProtocolVersion::local())
|
||||
}
|
||||
|
||||
/// Serializes a Writeable into any std::io::Write implementation.
|
||||
pub fn serialize<W: Writeable>(sink: &mut dyn Write, thing: &W) -> Result<(), Error> {
|
||||
let mut writer = BinWriter { sink };
|
||||
pub fn serialize<W: Writeable>(
|
||||
sink: &mut dyn Write,
|
||||
version: ProtocolVersion,
|
||||
thing: &W,
|
||||
) -> Result<(), Error> {
|
||||
let mut writer = BinWriter::new(sink, version);
|
||||
thing.write(&mut writer)
|
||||
}
|
||||
|
||||
/// Serialize a Writeable according to our default "local" protocol version.
|
||||
pub fn serialize_default<W: Writeable>(sink: &mut dyn Write, thing: &W) -> Result<(), Error> {
|
||||
serialize(sink, ProtocolVersion::local(), thing)
|
||||
}
|
||||
|
||||
/// Utility function to serialize a writeable directly in memory using a
|
||||
/// Vec<u8>.
|
||||
pub fn ser_vec<W: Writeable>(thing: &W) -> Result<Vec<u8>, Error> {
|
||||
pub fn ser_vec<W: Writeable>(thing: &W, version: ProtocolVersion) -> Result<Vec<u8>, Error> {
|
||||
let mut vec = vec![];
|
||||
serialize(&mut vec, thing)?;
|
||||
serialize(&mut vec, version, thing)?;
|
||||
Ok(vec)
|
||||
}
|
||||
|
||||
/// Utility to read from a binary source
|
||||
pub struct BinReader<'a> {
|
||||
source: &'a mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
}
|
||||
|
||||
impl<'a> BinReader<'a> {
|
||||
/// Constructor for a new BinReader for the provided source and protocol version.
|
||||
pub fn new(source: &'a mut dyn Read, version: ProtocolVersion) -> BinReader<'a> {
|
||||
BinReader { source, version }
|
||||
}
|
||||
}
|
||||
|
||||
fn map_io_err(err: io::Error) -> Error {
|
||||
|
@ -366,24 +445,28 @@ impl<'a> Reader for BinReader<'a> {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn protocol_version(&self) -> ProtocolVersion {
|
||||
self.version
|
||||
}
|
||||
}
|
||||
|
||||
/// A reader that reads straight off a stream.
|
||||
/// Tracks total bytes read so we can verify we read the right number afterwards.
|
||||
pub struct StreamingReader<'a> {
|
||||
total_bytes_read: u64,
|
||||
version: ProtocolVersion,
|
||||
stream: &'a mut dyn Read,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl<'a> StreamingReader<'a> {
|
||||
/// Create a new streaming reader with the provided underlying stream.
|
||||
/// Also takes a duration to be used for each individual read_exact call.
|
||||
pub fn new(stream: &'a mut dyn Read, timeout: Duration) -> StreamingReader<'a> {
|
||||
pub fn new(stream: &'a mut dyn Read, version: ProtocolVersion) -> StreamingReader<'a> {
|
||||
StreamingReader {
|
||||
total_bytes_read: 0,
|
||||
version,
|
||||
stream,
|
||||
timeout,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -393,32 +476,28 @@ impl<'a> StreamingReader<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Note: We use read_fixed_bytes() here to ensure our "async" I/O behaves as expected.
|
||||
impl<'a> Reader for StreamingReader<'a> {
|
||||
fn read_u8(&mut self) -> Result<u8, Error> {
|
||||
let buf = self.read_fixed_bytes(1)?;
|
||||
Ok(buf[0])
|
||||
}
|
||||
|
||||
fn read_u16(&mut self) -> Result<u16, Error> {
|
||||
let buf = self.read_fixed_bytes(2)?;
|
||||
Ok(BigEndian::read_u16(&buf[..]))
|
||||
}
|
||||
|
||||
fn read_u32(&mut self) -> Result<u32, Error> {
|
||||
let buf = self.read_fixed_bytes(4)?;
|
||||
Ok(BigEndian::read_u32(&buf[..]))
|
||||
}
|
||||
|
||||
fn read_i32(&mut self) -> Result<i32, Error> {
|
||||
let buf = self.read_fixed_bytes(4)?;
|
||||
Ok(BigEndian::read_i32(&buf[..]))
|
||||
}
|
||||
|
||||
fn read_u64(&mut self) -> Result<u64, Error> {
|
||||
let buf = self.read_fixed_bytes(8)?;
|
||||
Ok(BigEndian::read_u64(&buf[..]))
|
||||
}
|
||||
|
||||
fn read_i64(&mut self) -> Result<i64, Error> {
|
||||
let buf = self.read_fixed_bytes(8)?;
|
||||
Ok(BigEndian::read_i64(&buf[..]))
|
||||
|
@ -434,7 +513,7 @@ impl<'a> Reader for StreamingReader<'a> {
|
|||
/// Read a fixed number of bytes.
|
||||
fn read_fixed_bytes(&mut self, len: usize) -> Result<Vec<u8>, Error> {
|
||||
let mut buf = vec![0u8; len];
|
||||
read_exact(&mut self.stream, &mut buf, self.timeout, true)?;
|
||||
self.stream.read_exact(&mut buf)?;
|
||||
self.total_bytes_read += len as u64;
|
||||
Ok(buf)
|
||||
}
|
||||
|
@ -450,6 +529,10 @@ impl<'a> Reader for StreamingReader<'a> {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn protocol_version(&self) -> ProtocolVersion {
|
||||
self.version
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for Commitment {
|
||||
|
@ -597,12 +680,18 @@ impl<T: Hashed> VerifySortedAndUnique<T> for Vec<T> {
|
|||
/// to write numbers, byte vectors, hashes, etc.
|
||||
pub struct BinWriter<'a> {
|
||||
sink: &'a mut dyn Write,
|
||||
version: ProtocolVersion,
|
||||
}
|
||||
|
||||
impl<'a> BinWriter<'a> {
|
||||
/// Wraps a standard Write in a new BinWriter
|
||||
pub fn new(write: &'a mut dyn Write) -> BinWriter<'a> {
|
||||
BinWriter { sink: write }
|
||||
pub fn new(sink: &'a mut dyn Write, version: ProtocolVersion) -> BinWriter<'a> {
|
||||
BinWriter { sink, version }
|
||||
}
|
||||
|
||||
/// Constructor for BinWriter with default "local" protocol version.
|
||||
pub fn default(sink: &'a mut dyn Write) -> BinWriter<'a> {
|
||||
BinWriter::new(sink, ProtocolVersion::local())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -616,6 +705,10 @@ impl<'a> Writer for BinWriter<'a> {
|
|||
self.sink.write_all(bs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn protocol_version(&self) -> ProtocolVersion {
|
||||
self.version
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_int {
|
||||
|
|
|
@ -32,19 +32,18 @@ use crate::util::secp;
|
|||
use crate::util::RwLock;
|
||||
use chrono::Duration;
|
||||
use grin_core as core;
|
||||
use grin_core::global::ChainTypes;
|
||||
use grin_keychain as keychain;
|
||||
use grin_util as util;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
|
||||
Arc::new(RwLock::new(LruVerifierCache::new()))
|
||||
}
|
||||
|
||||
// Too slow for now #[test]
|
||||
// TODO: make this fast enough or add similar but faster test?
|
||||
#[allow(dead_code)]
|
||||
#[test]
|
||||
fn too_large_block() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let max_out = global::max_block_weight() / BLOCK_OUTPUT_WEIGHT;
|
||||
|
@ -59,10 +58,8 @@ fn too_large_block() {
|
|||
parts.push(output(5, pks.pop().unwrap()));
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]);
|
||||
let tx = build::transaction(parts, &keychain, &builder).unwrap();
|
||||
println!("Build tx: {}", now.elapsed().as_secs());
|
||||
|
||||
let prev = BlockHeader::default();
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
|
@ -211,17 +208,17 @@ fn remove_coinbase_kernel_flag() {
|
|||
#[test]
|
||||
fn serialize_deserialize_header_version() {
|
||||
let mut vec1 = Vec::new();
|
||||
ser::serialize(&mut vec1, &1_u16).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec1, &1_u16).expect("serialization failed");
|
||||
|
||||
let mut vec2 = Vec::new();
|
||||
ser::serialize(&mut vec2, &HeaderVersion::default()).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec2, &HeaderVersion::default()).expect("serialization failed");
|
||||
|
||||
// Check that a header_version serializes to a
|
||||
// single u16 value with no extraneous bytes wrapping it.
|
||||
assert_eq!(vec1, vec2);
|
||||
|
||||
// Check we can successfully deserialize a header_version.
|
||||
let version: HeaderVersion = ser::deserialize(&mut &vec2[..]).unwrap();
|
||||
let version: HeaderVersion = ser::deserialize_default(&mut &vec2[..]).unwrap();
|
||||
assert_eq!(version.0, 1)
|
||||
}
|
||||
|
||||
|
@ -235,8 +232,8 @@ fn serialize_deserialize_block_header() {
|
|||
let header1 = b.header;
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &header1).expect("serialization failed");
|
||||
let header2: BlockHeader = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &header1).expect("serialization failed");
|
||||
let header2: BlockHeader = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(header1.hash(), header2.hash());
|
||||
assert_eq!(header1, header2);
|
||||
|
@ -252,8 +249,8 @@ fn serialize_deserialize_block() {
|
|||
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let b2: Block = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
let b2: Block = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(b.hash(), b2.hash());
|
||||
assert_eq!(b.header, b2.header);
|
||||
|
@ -264,19 +261,21 @@ fn serialize_deserialize_block() {
|
|||
|
||||
#[test]
|
||||
fn empty_block_serialized_size() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let prev = BlockHeader::default();
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 1_265;
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 1_107;
|
||||
assert_eq!(vec.len(), target_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_single_tx_serialized_size() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let tx1 = tx1i2o();
|
||||
|
@ -284,13 +283,14 @@ fn block_single_tx_serialized_size() {
|
|||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 2_847;
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 2_689;
|
||||
assert_eq!(vec.len(), target_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_compact_block_serialized_size() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let prev = BlockHeader::default();
|
||||
|
@ -298,13 +298,14 @@ fn empty_compact_block_serialized_size() {
|
|||
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
|
||||
let cb: CompactBlock = b.into();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_273;
|
||||
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_115;
|
||||
assert_eq!(vec.len(), target_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_block_single_tx_serialized_size() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let tx1 = tx1i2o();
|
||||
|
@ -313,16 +314,16 @@ fn compact_block_single_tx_serialized_size() {
|
|||
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
|
||||
let cb: CompactBlock = b.into();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_279;
|
||||
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_121;
|
||||
assert_eq!(vec.len(), target_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_10_tx_serialized_size() {
|
||||
global::set_mining_mode(global::ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
global::set_mining_mode(global::ChainTypes::Mainnet);
|
||||
|
||||
let mut txs = vec![];
|
||||
for _ in 0..10 {
|
||||
|
@ -333,13 +334,14 @@ fn block_10_tx_serialized_size() {
|
|||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let b = new_block(txs.iter().collect(), &keychain, &builder, &prev, &key_id);
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 17_085;
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 16_927;
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compact_block_10_tx_serialized_size() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
|
||||
|
@ -353,8 +355,8 @@ fn compact_block_10_tx_serialized_size() {
|
|||
let b = new_block(txs.iter().collect(), &keychain, &builder, &prev, &key_id);
|
||||
let cb: CompactBlock = b.into();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_333;
|
||||
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
|
||||
let target_len = 1_175;
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
|
@ -439,7 +441,7 @@ fn serialize_deserialize_compact_block() {
|
|||
let mut cb1: CompactBlock = b.into();
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &cb1).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &cb1).expect("serialization failed");
|
||||
|
||||
// After header serialization, timestamp will lose 'nanos' info, that's the designed behavior.
|
||||
// To suppress 'nanos' difference caused assertion fail, we force b.header also lose 'nanos'.
|
||||
|
@ -447,7 +449,7 @@ fn serialize_deserialize_compact_block() {
|
|||
cb1.header.timestamp =
|
||||
origin_ts - Duration::nanoseconds(origin_ts.timestamp_subsec_nanos() as i64);
|
||||
|
||||
let cb2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
let cb2: CompactBlock = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(cb1.header, cb2.header);
|
||||
assert_eq!(cb1.kern_ids(), cb2.kern_ids());
|
||||
|
|
|
@ -39,7 +39,7 @@ use std::sync::Arc;
|
|||
fn simple_tx_ser() {
|
||||
let tx = tx2i1o();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &tx).expect("serialization failed");
|
||||
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
|
||||
let target_len = 955;
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ fn simple_tx_ser() {
|
|||
fn simple_tx_ser_deser() {
|
||||
let tx = tx2i1o();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &tx).expect("serialization failed");
|
||||
let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
|
||||
let dtx: Transaction = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
assert_eq!(dtx.fee(), 2);
|
||||
assert_eq!(dtx.inputs().len(), 2);
|
||||
assert_eq!(dtx.outputs().len(), 1);
|
||||
|
@ -62,12 +62,12 @@ fn tx_double_ser_deser() {
|
|||
let btx = tx2i1o();
|
||||
|
||||
let mut vec = Vec::new();
|
||||
assert!(ser::serialize(&mut vec, &btx).is_ok());
|
||||
let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
assert!(ser::serialize_default(&mut vec, &btx).is_ok());
|
||||
let dtx: Transaction = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
let mut vec2 = Vec::new();
|
||||
assert!(ser::serialize(&mut vec2, &btx).is_ok());
|
||||
let dtx2: Transaction = ser::deserialize(&mut &vec2[..]).unwrap();
|
||||
assert!(ser::serialize_default(&mut vec2, &btx).is_ok());
|
||||
let dtx2: Transaction = ser::deserialize_default(&mut &vec2[..]).unwrap();
|
||||
|
||||
assert_eq!(btx.hash(), dtx.hash());
|
||||
assert_eq!(dtx.hash(), dtx2.hash());
|
||||
|
|
|
@ -16,8 +16,7 @@ mod vec_backend;
|
|||
|
||||
use self::core::core::merkle_proof::MerkleProof;
|
||||
use self::core::core::pmmr::PMMR;
|
||||
use self::core::ser;
|
||||
use self::core::ser::PMMRIndexHashable;
|
||||
use self::core::ser::{self, PMMRIndexHashable};
|
||||
use crate::vec_backend::{TestElem, VecBackend};
|
||||
use grin_core as core;
|
||||
|
||||
|
@ -38,8 +37,8 @@ fn merkle_proof_ser_deser() {
|
|||
let proof = pmmr.merkle_proof(9).unwrap();
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &proof).expect("serialization failed");
|
||||
let proof_2: MerkleProof = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &proof).expect("serialization failed");
|
||||
let proof_2: MerkleProof = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(proof, proof_2);
|
||||
}
|
||||
|
|
|
@ -39,8 +39,8 @@ fn test_output_ser_deser() {
|
|||
};
|
||||
|
||||
let mut vec = vec![];
|
||||
ser::serialize(&mut vec, &out).expect("serialized failed");
|
||||
let dout: Output = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
ser::serialize_default(&mut vec, &out).expect("serialized failed");
|
||||
let dout: Output = ser::deserialize_default(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(dout.features, OutputFeatures::Plain);
|
||||
assert_eq!(dout.commit, out.commit);
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# The Coinbase Maturity Rule (aka Output Lock Heights)
|
||||
|
||||
*Read this in other languages: [Korean](coinbase_maturity_KR.md).*
|
||||
|
||||
Coinbase outputs (block rewards & fees) are "locked" and require 1,440 confirmations (i.e 24 hours worth of blocks added to the chain) before they mature sufficiently to be spendable. This is to reduce the risk of later txs being reversed if a chain reorganization occurs.
|
||||
|
||||
Bitcoin does something very similar, requiring 100 confirmations (Bitcoin blocks are every 10 minutes, Grin blocks are every 60 seconds) before mining rewards can be spent.
|
||||
|
@ -98,7 +100,7 @@ We maintain an index mapping commitment to position in the output MMR.
|
|||
|
||||
If no entry in the index exists or no entry in the output MMR exists for a given commitment then we now the output is not spendable (either it was spent previously or it never existed).
|
||||
|
||||
If we find an entry in the output MMR then we know a spendable output exists in the Output set *but* we do not know if this is the correct one. We do not if it is a coinbase output or not and we do not know the height of the block it originated from.
|
||||
If we find an entry in the output MMR then we know a spendable output exists in the Output set *but* we do not know if this is the correct one. We do not know if it is a coinbase output or not and we do not know the height of the block it originated from.
|
||||
|
||||
If the hash stored in the output MMR covers both the commitment and the output features and we require an input to provide both the commitment and the feature then we can do a further validation step -
|
||||
|
||||
|
|
131
doc/coinbase_maturity_KR.md
Normal file
|
@ -0,0 +1,131 @@
|
|||
# Coinbase 만기 규칙 (A.K.A 출력 )
|
||||
|
||||
Coinbase 산출물 (블록 보상 및 수수료)은 "잠겨"있고 쓰는 것이 가능하기 전에 1,440개의 충분한 확인 (즉, 체인에 추가 된 이후 24 시간정도의 확인, *블록이 추가된 후 24시간 정도의 확인이 필요하다는 뜻 - 역자 주*)이 필요합니다. 이것은 체인 재구성(chain reorganization)이 발생할 경우 나중에 txs가 되돌려질 위험을 줄이기위한 것입니다.
|
||||
|
||||
Bitcoin도 비슷한 과정을 거칩니다. Bitcoin은 마이닝 보상을 쓰기 전에 100 회의 확인 (Bitcoin 블록은 매 10 분, Grin 블록은 매 60초)을 수행합니다.
|
||||
|
||||
Grin은 트랜잭션 풀과 블록입증(block validation) 파이프라인(Pipeline) 둘다 코인베이스 만기 규칙을 강제합니다. 코인베이스 출력을 사용하는 입력을 포함하는 트랜잭션은 현재 체인 높이와 코인베이스 출력을 생성하는 블록의 높이를 기준으로 충분히 만기가 될 때까지 트랜잭션 풀에 추가 될 수 없습니다. 블록에서도 (트랜잭션 과)비슷하게 입력을 포함하는 블록의 높이와 원래 코인베이스 출력을 생성 한 블록의 높이를 기준으로 충분히 만기가 되기 전에 코인베이스 출력을 소비하는 입력을 포함하면 유효하지 않습니다.
|
||||
|
||||
*만기 규칙( Maturity rule)은 코인베이스 출력에만* 적용 됩니다. 일반적인 트랜잭션 출력은 유효한 lock height가 0입니다(Zero confirm 이라는 뜻- 역자 주).
|
||||
|
||||
출력값은 아래와 같은 값으로 이뤄집니다.
|
||||
|
||||
* 기능 (현재의 코인베이스 VS non-코인베이스)
|
||||
* `rG+vH` 실행값(commitment)
|
||||
* rangeproof
|
||||
|
||||
기존 트랜잭션 출력을 사용하려면 두 가지 조건이 충족되어야 합니다. 출력이 이전에 쓰지 않았다는것 을 보여주어야하며 출력의 소유권(ownership of output)을 증명해야합니다.
|
||||
|
||||
|
||||
Grin 트랜잭션은 다음과 같이 구성되어 있습니다.
|
||||
|
||||
* 입력값의 셋, 각 입력값은 이전 소비된 출력값에 대해서 참조합니다.
|
||||
* 새로운 출력값들의 셋은 다음과 같은 요소를 포함합니다.
|
||||
* `v`값과 비밀키이자 Bliding factor인 `r`과 타원곡선 값이 곱해지고 더해진 값인 `rG+vH`(commitment 값이기도 함)
|
||||
* `v`값이 음수가 아님(non-negative)을 보여줄 range proof
|
||||
* 명백한 트래잭션 수수료
|
||||
* 모든 출력값에 수수료를 더하고 입력값을 뺀 초과 Blinding factor 로 계산하고 비밀키로 사용된 서명 값
|
||||
|
||||
현재 출력 셋에서 실행값(`rG+vH`, commitment값 - 역자 주)을 찾음으로써 출력이 미사용 상태임을 나타낼 수 있습니다. 만약 출력이 출력셋에 존재하는 경우 아직 사용되지 않았다는 것을 알 수 있기 때문에 출력셋은 신뢰할 수 있습니다. 출력이 출력 셋에 존재하지 않는다면 출력값이 존재하지 않았거나 이전에는 있었고 소비되었음을 알 수 있습니다 (필연적으로 출력셋이 존재하지 않았던 것이든 이전에 있었던 것이든 어떤 것이든 알지 못할 것입니다).
|
||||
|
||||
소유권을 증명하기 위해 트랜잭션 서명을 입증할 수 있습니다. 트랜잭션의 합이 0이 되고 *그리고* `v`와 `r`을 모두 알고있는 경우에만 *오직* 트랜잭션에 서명 할 수 있습니다.
|
||||
|
||||
`v`와 `r`을 알면 우리는 출력을 실행값(commitment)을 통해 구분 할 수 있습니다. *그리고* 원래 코인베이스 트랜잭션에서 서명의 유효성을 검사하여 출력의 소유권을 증명할 수 있습니다.
|
||||
|
||||
Grin은 동시에 출력 셋에 중복된 실행값(commitment)이 존재하는 것을 허용하지 않습니다. 그러나 일단 출력이 소비되면 출력 셋에서 제거되고 중복된 실행값(commitment)이 출력 셋에 다시 추가 될 수 있습니다.
|
||||
이것은 반드시 권장하지 않지만 Grin은 네트워크를 통해 합의를 깨지 않는 방식으로 이런 상황을 처리해야합니다.
|
||||
|
||||
아래와 같은 몇 가지가 이런 상황을 복잡하게 만듭니다.
|
||||
|
||||
1. 특히 빈 블록의 경우 두 블록이 동일한 보상을 받을 수 있습니다. 뿐만 아니라 거래 수수료가 있는 비어 있지 않은 블록의 경우에도 가능합니다.
|
||||
2. 코인베이스 출력이 아닌 출력값이 코인베이스 출력과 동일한 값을 가질 수 있습니다.
|
||||
3. 권장되진 않지만 마이너가 비밀키(private key)를 재사용 할 수 있습니다.
|
||||
|
||||
Grin은 동시에 출력 셋에 중복된 실행값(commitment)이 존재하는 것을 허용하지 않습니다. 그러나 출력 셋은 특정 체인 분리(fork)의 상태에 따라 다릅니다.
|
||||
같은 순간에 있는 서로 다른 체인에 중복 된 *동일한* 실행값(commitment)가 동시에 *존재할 수 있습니다*. 그리고 이러한 중복된 실행값은 다른 "lock height"를 가질 수 있습니다. 그리고 각각 다른 체인에서 이런 실행값들은 코인베이스 만기가 다 되어서 소비 할 수 있수도 있습니다.
|
||||
|
||||
* block B<sub>1</sub>에서 온 출력값 O<sub>1</sub> 소비 가능한 높이는 h<sub>1</sub> (체인 f<sub>1</sub>)
|
||||
* block B<sub>2</sub>에서 온 출력값 O<sub>1</sub>' 소비 가능한 높이 h<sub>2</sub> (체인 f<sub>2</sub>)
|
||||
|
||||
여기서 복잡한 점은 입력 I<sub>1</sub>을 포함하는 블록이 있는 포크에 따라 O<sub>1</sub> 또는 O<sub>1</sub>'을 사용한다는 것입니다. 그리고 결정적으로 I<sub>1</sub>은 어떤 체인에서는 특정 블록 높이에서 유효하지만 다른 포크에서는 유효하지 않을 수 있다는 것입니다.
|
||||
|
||||
다른 말로 하자면, 실행값은 여러 개의 출력을 의미 할 수 있으며, 모든 출력은 서로 다른 lock height를 가질 수 있습니다.
|
||||
그리고 우리는 어떤 결과가 실제로 소비되고 있는지, 코인베이스 만기 규칙이 현재의 체인 상태(chain state)를 기반으로 정확하게 시행되고 있는지 정확하게 *반드시 확인해야 합니다.*
|
||||
|
||||
특정 lock height에서 coinbase 만기 규칙으로 잠긴 코인베이스 출력은 고유하게 확인될 수 *없습니다*. 그리고 자신의 실행값 만으로는 안전하게 사용할 수 *없습니다.*
|
||||
코인베이스 출력을 사용하려면 아래와 같이 추가적인 요소 하나를 알아야 합니다.
|
||||
|
||||
* 코인베이스 출력값이 어디서 나왔는지 기록되어 있는 블록(The block the coinbase output originated from 라고 원문에 표기되어 있음 -역자 주)
|
||||
|
||||
이 경우 블록의 높이를 검증하고 출력물의 "lock height"(+ 1000 블록)를 알아 낼 수 있습니다.
|
||||
|
||||
## 풀 아카이브 노드
|
||||
|
||||
풀 아카이브 노드가 있으면 출력이 어느 블록에서 시작되었는지 식별하는 것은 간단한 작업입니다. 전체 아카이브 노드는 다음을 저장합니다.
|
||||
|
||||
* 체인 내에 있는 전체 블록의 모든 블록 데이터
|
||||
* 해당 블록들 안에 있는 전체 출력값의 모든 출력값 데이터
|
||||
|
||||
체인의 이전의 모든 블록을 볼 수 있으며 특정 출력이 들어있는 블록을 찾을 수 있습니다.
|
||||
|
||||
문제는 전체 블록 데이터가 없는 노드(Pruning 노드, 아카이브 노드가 아닌 노드)를 고려해야 할 때입니다.
|
||||
|
||||
전체 블록 데이터가 없더라도 코인베이스 만기에 대해서 어떻게 입증 할 수 있을까요??
|
||||
|
||||
## 비 - 아카이브 노드(non - achive node)
|
||||
|
||||
A node may not have full block data.
|
||||
A pruned node may only store the following (refer to pruning doc) -
|
||||
노드에는 전체 블록 데이터가 없을 수 있습니다. pruning 노드는 다음을 저장할 수 있습니다 ([프루닝 문서 참조](pruning_KR.md)).
|
||||
|
||||
* 블록 헤더 체인
|
||||
* All transaction kernels.
|
||||
* 전체 트랜잭션 커널(kernels)
|
||||
* 전체 미사용된 출력값(unspent outputs)
|
||||
* 출력값의 MMR 과 range proof MMR
|
||||
|
||||
이 최소한의 데이터 셋을 가지고 어느 블록에서 시작되었는지 어떻게 알 수 있습니까?
|
||||
|
||||
주어진 데이터로 여러 출력(여러 포크, 잠재적으로 서로 다른 lock height)이 모두 *같은* 실행값을가질 수 있는지, 사용한 출력을 고유하게 식별하기 위해 입력값에 어떤 추가 정보가 필요한가요?
|
||||
|
||||
그리고 한 단계 더 나아가기 간다면 전체 출력 데이터에 액세스하지 않고도 이 모든 작업을 수행 할 수 있을까요? 출력 MMR만 사용할 수 있나요?
|
||||
|
||||
### 제안된 해결법
|
||||
|
||||
출력 MMR 내에서 위치의 인덱스 매핑 실행값을 유지합니다.
|
||||
|
||||
인덱스에 항목이 없거나 주어진 실행값에 대해 출력 MMR내에서 항목이 존재하지 않으면 출력값은 사용할 수 없습니다. (이전에 소비되었거나 아예 존재하지 않았음).
|
||||
|
||||
출력 MMR에서 항목을 찾으면 쓸 수 있는 출력이 출력셋에 있음을 알 수 있습니다. *하지만* 이것이 올바른지 여부는 알 수 없습니다. 코인베이스 출력인지 아닌지와 그 블록이 시작된 블록의 높이를 알지 못합니다.
|
||||
|
||||
출력 MMR에 저장된 해시가 실행값(commitment)및 출력 기능을 모두 포함하고 실행값과 기능을 모두 제공하기 위해 입력이 필요한 경우 아래와 같은 추가 검증 단계를 수행 할 수 있습니다.
|
||||
|
||||
* 실행값 베이스의 출력값 MMR 내에 출력값이 있고
|
||||
* MMR의 해시는 입력에 포함 된 출력 데이터와 일치합니다.
|
||||
|
||||
이 추가 단계를 통해 출력이 코인베이스 출력인지 또는 제공된 기능을 기반으로 하는 일반적인 트랜잭션 출력인지 여부를 알 수 있습니다. 입력값이 원래 출력값과 일치하지 않으면 해시가 일치하지 않습니다.
|
||||
|
||||
일반적인 non-coinbase 출력에 대해서 설명은 이미 끝냈습니다. 출력값은 현재 쓸 수 있으므로 lock height를 확인할 필요가 없습니다.
|
||||
|
||||
코인베이스 출력의 경우 lock height와 블록 만기를 검증 할 수 있습니다. 이를 위해 출력이 발생한 블록을 식별해야합니다.
|
||||
블록 자체를 결정할 수는 없지만 블록 해시을 특정하기 위해서 입력을 필요 할 수 있습니다. 그런 다음 전체 블록 데이터 없이 블록 헤더의 머클 루트를 기반으로 증명할 수 있습니다.
|
||||
|
||||
[추후에 결정될 것 - 머클 proofs의 개요와 블록헤더 안의 머클 루트 (merkle root)기반으로 어떻게 머클 proofs 를 사용하여 포함된 것을 증명 할 것인가 ]
|
||||
|
||||
요약하자면 -
|
||||
|
||||
실행값(commitment) 자체만으로는 충분하지 않기 때문에 출력 MMR은 `commitment | features`을 기반으로 출력 해시를 저장합니다 .
|
||||
출력 해시 생성시 range proof를 포함 할 필요가 없습니다.
|
||||
출력값을 소비하기 위해 아래와 같은 것이 필요합니다. -
|
||||
|
||||
* `r`과 `v`는 실행값(commitment)을 만들고(build) 소유권을 증명합니다.
|
||||
|
||||
입력값은 아래와 같은 값을 반드시 포함해야 합니다.
|
||||
|
||||
* MMR 내에서 찾은 실행값 ( commitment)
|
||||
* 출력값 (features|commitment 에 따른 출력값 MMR 의 해시)
|
||||
* 원래 불록 내에서의 Merkle proof 를 포함하는 출력값
|
||||
* 원래 블록의 블록 해시
|
||||
* [추후에 결정될 것 - Merkle proof 기반 인덱스를 유지할 것인가?]
|
||||
실행값(commitment)과 기능을 통해 올바른 출력이 현재 사용되지 않은지 확인할 수 있습니다.
|
||||
블록과 출력값로부터 lock height(있는 경우)를 결정할 수 있습니다.
|
|
@ -1,23 +1,26 @@
|
|||
# Dandelion in Grin: Privacy-Preserving Transaction Aggregation and Propagation
|
||||
# Dandelion++ in Grin: Privacy-Preserving Transaction Aggregation and Propagation
|
||||
|
||||
*Read this document in other languages: [Korean](dandelion_KR.md).*
|
||||
*Read this document in other languages: [Korean](dandelion_KR.md). [out of date]*
|
||||
|
||||
This document describes the implementation of Dandelion in Grin and its modification to handle transactions aggregation in the P2P protocol.
|
||||
## Introduction
|
||||
|
||||
Dandelion is a new transaction broadcasting mechanism that reduces the risk of eavesdroppers linking transactions to the source IP. Moreover, it allows Grin transactions to be aggregated (removing input-output pairs) before being broadcasted to the entire network giving an additional privacy perk.
|
||||
The Dandelion++ protocol for broadcasting transactions, proposed by Fanti et al. (Sigmetrics 2018)[1], intends to defend against deanonymization attacks during transaction propagation. In Grin, it also provides an opportunity to aggregate transactions before they are broadcasted to the entire network. This document describes the protocol and the simplified version of it that is implemented in Grin.
|
||||
|
||||
Dandelion was introduced in [1] by G. Fanti et al. and presented at ACM Sigmetrics 2017. On June 2017, a BIP [2] was proposed introducing a more practical and robust variant of Dandelion called Dandelion++ [3] published later in 2018. This document is an adaptation of this BIP for Grin.
|
||||
In the following section, past research on the protocol is summarized. This is then followed by describing details of the Grin implementation; the objectives behind its inclusion, how the current implementation differs from the original paper, what some of the known limitations are, and outlining some areas of improvement for future work.
|
||||
|
||||
We first define the original Dandelion propagation then the Grin adaptation of the protocol with transaction aggregation.
|
||||
## Previous research
|
||||
|
||||
## Original Dandelion
|
||||
The original version of Dandelion was introduced by Fanti et al. and presented at ACM Sigmetrics 2017 [2]. On June 2017, a BIP [3] was proposed introducing a more practical and robust variant of Dandelion called Dandelion++, which was formalized into a paper in 2018. [1] The protocols are outlined at a high level here. For a more in-depth presentation with extensive literature references, please refer to the original papers.
|
||||
|
||||
### Mechanism
|
||||
### Motivation
|
||||
|
||||
Dandelion transaction propagation proceeds in two phases: first the “stem” phase, and then “fluff” phase. During the stem phase, each node relays the transaction to a *single* peer. After a random number of hops along the stem, the transaction enters the fluff phase, which behaves just like ordinary flooding/diffusion. Even when an attacker can identify the location of the fluff phase, it is much more difficult to identify the source of the stem.
|
||||
Dandelion was conceived as a way to mitigate against large scale deanonymization attacks on the network layer of Bitcoin, made possible by the diffusion method for propagating transactions on the network. By deploying "super-nodes" that connect to a large number of honest nodes on the network, adversaries can listen to the transactions relayed by the honest nodes as they get diffused symmetrically on the network using epidemic flooding or diffusion. By observing the spreading dynamic of a transaction, it has been proven possible to link it (and therefore also the sender's Bitcoin address) to the originating IP address with a high degree of accuracy, and as a result deanonymize users.
|
||||
|
||||
Illustration:
|
||||
### Original Dandelion
|
||||
|
||||
In the original paper [2], a **dandelion spreading protocol** is introduced. Dandelion spreading propagation consists of two phases: first the anonymity phase, or the **“stem”** phase, and second the spreading phase, or the **“fluff”** phase, as illustrated in Figure 1.
|
||||
|
||||
**Figure 1.** Dandelion phase illustration.
|
||||
|
||||
```
|
||||
┌-> F ...
|
||||
|
@ -29,59 +32,168 @@ Illustration:
|
|||
└-> I ...
|
||||
```
|
||||
|
||||
### Specifications
|
||||
In the initial **stem-phase**, each node relays the transaction to a *single randomly selected peer*, constructing a line graph. Users then forward transactions along the *same* path on the graph. After a random number of hops along the single stem, the transaction enters the **fluff-phase**, which behaves like ordinary diffusion. This means that even when an attacker can identify the originator of the fluff phase, it becomes more difficult to identify the source of the stem (and thus the original broadcaster of the transaction). The constructed line graph is periodically re-generated randomly, at the expiry of each _epoch_, limiting an adversary's possibility to build knowledge of graph. Epochs are asynchronous, with each individual node keeping its own internal clock and starting a new epoch once a certain threshold has been reached.
|
||||
|
||||
The Dandelion protocol is based on three mechanisms:
|
||||
The 'dandelion' name is derived from how the protocol resembles the spreading of the seeds of a dandelion.
|
||||
|
||||
1. *Stem/fluff propagation.* Dandelion transactions begin in “stem mode,” during which each node relays the transaction to a single randomly-chosen peer. With some fixed probability, the transaction transitions to “fluff” mode, after which it is relayed according to ordinary flooding/diffusion.
|
||||
### Dandelion++
|
||||
|
||||
2. *Stem Mempool.* During the stem phase, each stem node (Alice) stores the transaction in a transaction pool containing only stem transactions: the stempool. The content of the stempool is specific to each node and is non shareable. A stem transaction is removed from the stempool if:
|
||||
In the Dandelion++ paper[1], the authors build on the original concept further, by defending against stronger adversaries that are allowed to disobey protocol.
|
||||
|
||||
1. Alice receives it "normally" advertising the transaction as being in fluff mode.
|
||||
2. Alice receives a block containing this transaction meaning that the transaction was propagated and included in a block.
|
||||
The original paper makes three idealistic assumptions:
|
||||
1. All nodes obey protocol;
|
||||
2. Each node generates exactly one transaction; and
|
||||
3. All nodes on the network run Dandelion.
|
||||
|
||||
3. *Robust propagation.* Privacy enhancements should not put transactions at risk of not propagating. To protect against failures (either malicious or accidental) where a stem node fails to relay a transaction (thereby precluding the fluff phase), each node starts a random timer upon receiving a transaction in stem phase. If the node does not receive any transaction message or block for that transaction before the timer expires, then the node diffuses the transaction normally.
|
||||
An adversary can violate these rules, and by doing so break some of the anonymity properties.
|
||||
|
||||
Dandelion stem mode transactions are indicated by a new type of relay message type.
|
||||
The modified Dandelion++ protocol makes small changes to most of the Dandelion choices, resulting in an exponentially more complex information space. This in turn makes it harder for an adversary to deanonymize the network.
|
||||
|
||||
Stem transaction relay message type:
|
||||
The paper describes five types of attacks, and proposes specific updates to the original Dandelion protocol to mitigate against these, presented in Table A (here in summarized form).
|
||||
|
||||
```rust
|
||||
Type::StemTransaction;
|
||||
**Table A.** Summary of Dandelion++ changes
|
||||
|
||||
| Attack | Solution |
|
||||
|---|---|
|
||||
| Graph-learning | 4-regular anonymity graph |
|
||||
| Intersection | Pseudorandom forwarding |
|
||||
| Graph-construction | Non-interactive construction |
|
||||
| Black-hole | Random stem timers |
|
||||
| Partial deployment | Blind stem selection |
|
||||
|
||||
#### The Dandelion++ algorithm
|
||||
|
||||
As with the original Dandelion protocol epochs are asynchronous, each node keeping track of its own epoch, which the suggested duration being in the order of 10 minutes.
|
||||
|
||||
##### 1. Anonymity Graph
|
||||
Rather than a line graph as per the original paper (which is 2-regular), a *quasi-4-regular graph* (Figure 2) is constructed by a node at the beginning of each epoch: the node chooses (up to) two of its outbound edges uniformly at random as its _dandelion++ relays_. As a node enters into a new epoch, new dandelion++ relays are chosen.
|
||||
|
||||
**Figure 2.** A 4-regular graph.
|
||||
```
|
||||
in1 out1
|
||||
\ /
|
||||
\ /
|
||||
NodeX
|
||||
/ \
|
||||
/ \
|
||||
in2 out2
|
||||
```
|
||||
*`NodeX` has four connections to other nodes, input nodes `in1` and `in2`, and output nodes `out1` and `out2`.*
|
||||
|
||||
After receiving a stem transaction, the node flips a biased coin to determine whether to propagate it in “stem mode”, or to switch to “fluff mode.” The bias is controlled by a parameter exposed to the configuration file, initially 90% chance of staying in stem mode (meaning the expected stem length would be 10 hops).
|
||||
***Note on using 4-regular vs 2-regular graphs***
|
||||
|
||||
Nodes that receives stem transactions are called stem relays. This relay is chosen from among the outgoing (or whitelisted) connections, which prevents an adversary from easily inserting itself into the stem graph. Each node periodically randomly choose its stem relay every 10 minutes.
|
||||
The choice between using 4-regular or 2-regular (line) graphs is not obvious. The authors note that it is difficult to construct an exact 4-regular graph within a fully-distributed network in practice. They outline a method to construct an approximate 4-regular graph in the paper. They also write:
|
||||
|
||||
### Considerations
|
||||
> [...] We recommend making the design decision between 4-regular graphs and line graphs based on the priorities of the system builders. **If linkability of transactions is a first-order concern, then line graphs may be a better choice.** Otherwise, we find that 4-regular graphs can give constant- order privacy benefits against adversaries with knowledge of the graph.
|
||||
|
||||
The main implementation challenges are: (1) identifying a satisfactory tradeoff between Dandelion’s privacy guarantees and its latency/overhead, and (2) ensuring that privacy cannot be degraded through abuse of existing mechanisms. In particular, the implementation should prevent an attacker from identifying stem nodes without interfering too much with the various existing mechanisms for efficient and DoS-resistant propagation.
|
||||
##### 2. Transaction forwarding (own)
|
||||
|
||||
At the beginning of each epoch, `NodeX` picks one of `out1` and `out2` to use as a route to broadcast its own transactions through as a stem-phase transaction. The _same route_ is used throughout the duration epoch, and `NodeX` _always_ forwards (stems) its own transaction.
|
||||
|
||||
##### 3. Transaction forwarding (relay)
|
||||
|
||||
At the start of each epoch, `NodeX` makes a choice to be either in fluff-mode or in stem-mode. This choice is made in pseudorandom fashion, with the paper suggesting it being computed from a hash of the node's own identity and epoch number. The probability of choosing to be in fluff-mode (or as the paper calls it, *the path length parameter `q`*) is recommended to be q ≤ 0.2.
|
||||
|
||||
Once the choice has been made whether to stem or to fluff, it applies to *all relayed transactions* during the epoch.
|
||||
|
||||
If `NodeX` is in **fluff-mode**, it will broadcast any received transactions to the network using diffusion.
|
||||
|
||||
If `NodeX` is in **stem-mode**, then at the beginning of each epoch it will map `in1` to either `out1` or `out2` pseudorandomly, and similarly map `in2` to either `out1` or `out2` in the same fashion. Based on this mapping, it will then forward *all* txs from `in1` along the chosen route, and similarly forward all transactions from `in2` along that route. The mapping persists throughout the duration of the epoch.
|
||||
|
||||
##### 4. Fail-safe mechanism
|
||||
|
||||
For each stem-phase transaction that was sent or relayed, `NodeX` tracks whether it is seen again as a fluff-phase transaction within some random amount of time. If not, the node fluffs the transaction itself.
|
||||
|
||||
This expiration timer is set by each stem-node upon receiving a transaction to forward, and is chosen randomly. Nodes are initialized with a timeout parameter T<sub>base</sub>. As per equation (7) in the paper, when a stem-node *v* receives a transaction, it sets an expiration time T<sub>out</sub>(v):
|
||||
|
||||
T<sub>out</sub>(v) ~ current_time + exp(1/T<sub>base</sub>)
|
||||
|
||||
If the transaction is not received again by relay v before the expiry of T<sub>out</sub>(v), it broadcasts the message using diffusion. This approach means that the first stem-node to broadcast is approximately uniformly selected among all stem-nodes who have seen the message, rather than the originating node.
|
||||
|
||||
The paper also proceeds to specify the size of the initiating time out parameter T<sub>base</sub> as part of `Proposition 3` in the paper:
|
||||
|
||||
> Proposition3. For a timeout parameter
|
||||
>
|
||||
> T<sub>base</sub> ≥ (−k(k−1)δ<sub>hop</sub>) / 2 log(1−ε ),
|
||||
>
|
||||
> where `k`, `ε` are parameters and δ<sub>hop</sub> is
|
||||
the time between each hop (e.g., network and/or internal node latency), transactions travel for `k` hops without any peer initiating diffusion with a probability of at least `1 − ε`.
|
||||
|
||||
* The privacy afforded by Dandelion depends on 3 parameters: the stem probability, the number of outbound peers that can serve as dandelion relay, and the time between re-randomizations of the stem relay. These parameters define a tradeoff between privacy and broadcast latency/processing overhead. Lowering the stem probability harms privacy but helps reduce latency by shortening the mean stem length; based on theory, simulations, and experiments, we have chosen a default of 90%. Reducing the time between each node’s re-randomization of its stem relay reduces the chance of an adversary learning the stem relay for each node, at the expense of increased overhead.
|
||||
* When receiving a Dandelion stem transaction, we avoid placing that transaction in `tracking_adapter`. This way, transactions can also travel back “up” the stem in the fluff phase.
|
||||
* Like ordinary transactions, Dandelion stem transactions are only relayed after being successfully accepted to mempool. This ensures that nodes will never be punished for relaying Dandelion stem transactions.
|
||||
* If a stem orphan transaction is received, it is added to the `orphan` pool, and also marked as stem-mode. If the transaction is later accepted to mempool, then it is relayed as a stem transaction or regular transaction (either stem mode or fluff mode, depending on a coin flip).
|
||||
* If a node receives a child transaction that depends on one or more currently-embargoed Dandelion transactions, then the transaction is also relayed in stem mode, and the embargo timer is set to the maximum of the embargo times of its parents. This helps ensure that parent transactions enter fluff mode before child transactions. Later on, this two transaction will be aggregated in one unique transaction removing the need for the timer.
|
||||
* Transaction propagation latency should be minimally affected by opting-in to this privacy feature; in particular, a transaction should never be prevented from propagating at all because of Dandelion. The random timer guarantees that the embargo mechanism is temporary, and every transaction is relayed according to the ordinary diffusion mechanism after some maximum (random) delay on the order of 30-60 seconds.
|
||||
|
||||
## Dandelion in Grin
|
||||
|
||||
Dandelion also allows Grin transactions to be aggregated during the stem phase and then broadcasted to all the nodes on the network. This result in transaction aggregation and possibly cut-through (thus removing spent outputs) giving a significant privacy gain similar to a non-interactive coinjoin with cut-through. This section details this mechanism.
|
||||
### Objectives
|
||||
|
||||
### Aggregation Mechanism
|
||||
There are two main motives behind why Dandelion is included in Grin:
|
||||
|
||||
In order to aggregate transactions, Grin implements a modified version of the Dandelion protocol [4].
|
||||
1. **Act as a countermeasure against mass de-anonymization attacks.** Similar to Bitcoin, the Grin P2P network would be vulnerable to attackers deploying malicious "super-nodes" connecting to most peers on the network and monitoring transactions as they become diffused by their honest peers. This would allow a motivated actor to infer with a high degree of probability from which peer (IP address) transactions originate from, having negative privacy consequences.
|
||||
2. **Aggregate transactions before they are being broadcasted to the entire network.** This is a benefit to blockchains that enable non-interactive CoinJoins on the protocol level, such as Mimblewimble. Despite its good privacy features, some input and output linking is still possible in Mimblewimble and Grin.[4] If you know which input spends to which output, it is possible to construct a (very limited) transaction graph and follow a chain of transaction outputs (TXOs) as they are being spent. Aggregating transactions make this more difficult to carry out, as it becomes less clear which input spends to which output (Figure 3). In order for this to be effective, there needs to be a large anonymity set, i.e. many transactions to aggregate a transaction with. Dandelion enables this aggregation to occur before transactions are fluffed and diffused to the entire network. This adds obfuscation to the transaction graph, as a malicious observer who is not participating in the stemming or fluffing would not only need to figure out from where a transaction originated, but also which TXOs out of a larger group should be attributed to the originating transaction.
|
||||
|
||||
By default, when a node sends a transaction on the network it will be broadcasted with the Dandelion protocol as a stem transaction to its Dandelion relay. The Dandelion relay will then wait a period of time (the patience timer), in order to get more stem transactions to aggregate. At the end of the timer, the relay does a coin flip for each new stem transaction and determines if it will stem it (send to the next Dandelion relay) or fluff it (broadcast normally). Then the relay will take all the transactions to stem, aggregate them, and broadcast them to the next Dandelion relay. It will do the same for the transactions to fluff, except that it will broadcast the aggregated transactions “normally” (to a random subset of the peers).
|
||||
**Figure 3.** Aggregating transactions
|
||||
```
|
||||
3.1 Transactions (not aggregated)
|
||||
---------------------------------------------
|
||||
TX1 INPUT_A ______________ OUTPUT_X
|
||||
|_____ OUTPUT_Y
|
||||
|
||||
This gives us a P2P protocol that can handle transaction merging.
|
||||
KERNEL 1
|
||||
---------------------------------------------
|
||||
TX2 INPUT_B ______________ OUTPUT_Z
|
||||
INPUT_C ________|
|
||||
|
||||
A simulation of this scenario is available [here](simulation.md).
|
||||
KERNEL 2
|
||||
---------------------------------------------
|
||||
|
||||
3.2 Transactions (aggregated)
|
||||
---------------------------------------------
|
||||
TX1+2 INPUT_A ______________ OUTPUT_X
|
||||
INPUT_B ________|_____ OUTPUT_Y
|
||||
INPUT_C ________|_____ OUTPUT_Z
|
||||
|
||||
KERNEL 1
|
||||
KERNEL 2
|
||||
---------------------------------------------
|
||||
```
|
||||
|
||||
### Current implementation
|
||||
|
||||
Grin implements a simplified version of the Dandelion++ protocol. It's been improved several times, most recently in version 1.1.0 [5].
|
||||
|
||||
1. `DandelionEpoch` tracks a node's current epoch. This is configurable via `epoch_secs` with default epoch set to last for 10 minutes. Epochs are set and tracked by nodes individually.
|
||||
2. At the beginning of an epoch, the node chooses a single connected peer at random to use as their outbound relay.
|
||||
3. At the beginning of an epoch, the node makes a decision whether to be in stem mode or in fluff mode. This decision lasts for the duration of the epoch. By default, this is a random choice, with the probability to be in stem mode set to 90%, which implies a fluff mode probability, `q` of 10%. The probability is configurable via `DANDELION_STEM_PROBABILITY`. The number of expected stem hops a transaction does before arriving to a fluff node is `1/q = 1/0.1 = 10`.
|
||||
4. Any transactions received from inbound connected nodes or transactions originated from the node itself are first added to the node's `stempool`, which is a list of stem transactions, that each node keeps track of individually. Transactions are removed from the stempool if:
|
||||
* The node fluffs the transaction itself.
|
||||
* The node sees the transaction in question propagated through regular diffusion, i.e. from a different peer having "fluffed" it.
|
||||
* The node receives a block containing this transaction, meaning that the transaction was propagated and included in a block.
|
||||
5. For each transaction added to the stempool, the node sets an *embargo timer*. This is set by default to 180 seconds, and is configurable via `DANDELION_EMBARGO_SECS`.
|
||||
6. Regardless of whether the node is in fluff or stem mode, any transactions generated from the node itself are forwarded onwards to their relay node as a stem transaction.[6]
|
||||
7. A `dandelion_monitor` runs every 10 seconds and handles tasks.
|
||||
8. If the node is in **stem mode**, then:
|
||||
1. After being added to the stempool, received stem transactions are forwarded onto the their relay node as a stem transaction.
|
||||
2. As peers connect at random, it is possible they create a circular loop of connected stem mode nodes (i.e. `A -> B -> C -> A`). Therefore, if a node receives a stem transaction from an inbound node that already exists in its own stempool, it will fluff it, broadcasting it using regular diffusion.
|
||||
3. `dandelion_monitor` checks for transactions in the node's stempool with an expired embargo timer, and broadcast those individually.
|
||||
9. If the node is in **fluff mode**, then:
|
||||
1. Transactions received from inbound nodes are kept in the stempool.
|
||||
2. `dandelion_monitor` checks in the stempool whether any transactions are older than 30 seconds (configurable as `DANDELION_AGGREGATION_SECS`). If so, these are aggregated and then fluffed. Otherwise no action is taken, allowing for more stem transactions to aggregate in the stempool in time for the next triggering of `dandelion_monitor`.
|
||||
3. At the expiry of an epoch, all stem transactions remaining in the stem pool are aggregated and fluffed.
|
||||
|
||||
### Known limitations
|
||||
|
||||
* 2-regular graphs are used rather than 4-regular graphs as proposed by the paper. It's not clear what impact this has, the paper suggests a trade-off between general linkability of transactions and protection against adversaries who know the entire network graph.
|
||||
* Unlike the Dandelion++ paper, the embargo timer is by default identical across all nodes. This means that during a black-hole attack where a malicious node withholds transactions, the node most likely to have its embargo timer expire and fluff the transaction will be the originating node, therefore exposing itself.
|
||||
|
||||
### Future work
|
||||
|
||||
* Randomized embargo timer according to the recommendations of the paper to make it more random which node fluffs an expired transaction.
|
||||
* Evaluation of whether 4-regular graphs are preferred over 2-regular line graphs.
|
||||
* Simulation of the current implementation to understand performance.
|
||||
* Improved understanding of the benefits of transaction aggregation prior to fluffing.
|
||||
|
||||
## References
|
||||
|
||||
* [1] (Sigmetrics 2017) [Dandelion: Redesigning the Bitcoin Network for Anonymity](https://arxiv.org/abs/1701.04439)
|
||||
* [2] [Dandelion BIP](https://github.com/dandelion-org/bips/blob/master/bip-dandelion.mediawiki)
|
||||
* [3] (Sigmetrics 2018) [Dandelion++: Lightweight Cryptocurrency Networking with Formal Anonymity Guarantees](https://arxiv.org/abs/1805.11060)
|
||||
* [4] [Dandelion Grin Pull Request #1067](https://github.com/mimblewimble/grin/pull/1067)
|
||||
* [1] (Sigmetrics 2018) [Dandelion++: Lightweight Cryptocurrency Networking with Formal Anonymity Guarantees](https://arxiv.org/abs/1805.11060)
|
||||
* [2] (Sigmetrics 2017) [Dandelion: Redesigning the Bitcoin Network for Anonymity](https://arxiv.org/abs/1701.04439)
|
||||
* [3] [Dandelion BIP](https://github.com/dandelion-org/bips/blob/master/bip-dandelion.mediawiki)
|
||||
* [4] [Grin Privacy Primer](https://github.com/mimblewimble/docs/wiki/Grin-Privacy-Primer)
|
||||
* [5] [#2628: Dandelion++ Rewrite](https://github.com/mimblewimble/grin/pull/2628)
|
||||
* [6] [#2876: Always stem local txs if configured that way (unless explicitly fluffed)](https://github.com/mimblewimble/grin/pull/2876)
|
Before Width: | Height: | Size: 59 KiB |
Before Width: | Height: | Size: 60 KiB |
Before Width: | Height: | Size: 62 KiB |
Before Width: | Height: | Size: 62 KiB |
Before Width: | Height: | Size: 65 KiB |
Before Width: | Height: | Size: 60 KiB |
Before Width: | Height: | Size: 64 KiB |
Before Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 69 KiB |
Before Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 67 KiB |
|
@ -1,79 +0,0 @@
|
|||
# Dandelion Simulation
|
||||
|
||||
*Read this document in other languages: [Korean](simulation_KR.md).*
|
||||
|
||||
This document describes a network of node using the Dandelion protocol with transaction aggregation.
|
||||
|
||||
In this scenario, we simulate a successful aggregation.
|
||||
|
||||
This document also helps visualizing all the timers in a simple way.
|
||||
|
||||
## T = 0 - Initial Situation
|
||||
|
||||
![t = 0](images/t0.png)
|
||||
|
||||
## T = 5
|
||||
|
||||
A sends grins to B. A adds the transaction to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 5](images/t5.png)
|
||||
|
||||
## T = 10
|
||||
|
||||
A waits until he runs out of patience.
|
||||
|
||||
![t = 10](images/t10.png)
|
||||
|
||||
## T = 30
|
||||
|
||||
A runs out of patience, flips a coin and broadcasts the stem transaction to its Dandelion relay G.
|
||||
G receives the stem transaction, add it to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 30](images/t30.png)
|
||||
|
||||
## T = 40
|
||||
|
||||
G sends grins to E.
|
||||
G adds the transaction it to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 40](images/t40.png)
|
||||
|
||||
## T = 45
|
||||
|
||||
G runs out of patience, flips a coin and broadcasts the stem transaction to its Dandelion relay D.
|
||||
|
||||
![t = 45](images/t45.png)
|
||||
|
||||
## T = 50
|
||||
|
||||
B spends B1 to D.
|
||||
B add it to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 55](images/t55.png)
|
||||
|
||||
## T = 55
|
||||
|
||||
B runs out of patience, flips a coin and broadcasts the stem transaction to its Dandelion relay H.
|
||||
D runs out of patience, flips a coin and broadcasts the aggregated stem transaction to its Dandelion relay E.
|
||||
E receives the stem transaction, add it to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 55](images/t55.png)
|
||||
|
||||
## T = 60
|
||||
|
||||
H runs out of patience, flips a coin broadcasts the stem transaction to its Dandelion relay E.
|
||||
E receives the stem transaction, add it to its stempool and starts the embargo timer for this transaction.
|
||||
|
||||
![t = 60](images/t60.png)
|
||||
|
||||
## T = 70 - Step 1
|
||||
|
||||
E runs out of patience, flips a coin and decide to broadcast the transaction to all its peers (fluff in the mempool).
|
||||
|
||||
![t = 70_1](images/t70_1.png)
|
||||
|
||||
## T = 70 - Step 2
|
||||
|
||||
All the nodes add this transaction to their mempool and remove the related transactions from their stempool.
|
||||
|
||||
![t = 70_2](images/t70_2.png)
|
|
@ -1,73 +0,0 @@
|
|||
# Dandelion 시뮬레이션
|
||||
|
||||
이 문서는 노드의 네트워크가 Dandelion 프로토콜을 트랜잭션 통합(Transaction aggregation)과 함께 사용하는 것에 대해서 설명합니다. 이 시나리오에서 성공적인 (트랜잭션)통합을 시뮬레이션 할 것입니다.
|
||||
이 문서는 (트랜잭션의) 모든 순간 순간에 대해서 간단히 시각화 하는것을 도와줄것입니다.
|
||||
|
||||
## T = 0 - Initial Situation
|
||||
|
||||
![t = 0](images/t0.png)
|
||||
|
||||
## T = 5
|
||||
|
||||
A는 B에게 grin를 보냅니다. A는 거래를 스템풀(stem pool)에 추가하고 이 트랜잭션에 대한 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 5](images/t5.png)
|
||||
|
||||
## T = 10
|
||||
|
||||
A는 인내심이 바닥날때까지 기다립니다. ( 아마도 엠바고 타이머가 끝나는 때를 의미하는 듯 - 역자 주)
|
||||
|
||||
![t = 10](images/t10.png)
|
||||
|
||||
## T = 30
|
||||
|
||||
A는 인내심이 바닥나면 동전을 뒤집고 Stem transaction을 G에게 Dandelion을 중계(Relay)합니다. G는 Stem transaction을 받은뒤 Stem pool에 Transaction을 추가하고 이 Transaction의 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 30](images/t30.png)
|
||||
|
||||
## T = 40
|
||||
|
||||
G는 E에게 Grin을 보냅니다ㅏ.
|
||||
G는 이 Transaction을 Stem pool에 Transaction을 추가하고 이 Transaction의 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 40](images/t40.png)
|
||||
|
||||
## T = 45
|
||||
|
||||
G는 인내심이 바닥나면 동전을 뒤집고 Stem transaction을 D에게 Dandelion을 중계(Relay)합니다.
|
||||
|
||||
![t = 45](images/t45.png)
|
||||
|
||||
## T = 50
|
||||
|
||||
B는 B1을 D에게 씁니다.
|
||||
B는 B1을 Stem pool에 추가하고 이 Transaction의 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 55](images/t55.png)
|
||||
|
||||
## T = 55
|
||||
|
||||
B는 인내심이 바닥나면 동전을 뒤집고 Stem transaction을 H에게 Dandelion을 중계(Relay)합니다.
|
||||
D는 인내심이 바닥나면 동전을 뒤집고 통합된(aggregated) Stem transaction을 E에게 Dandelion을 중계(Relay)합니다.
|
||||
E는 Stem transaction을 받은뒤 Stem pool에 Transaction을 추가하고 이 Transaction의 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 55](images/t55.png)
|
||||
|
||||
## T = 60
|
||||
|
||||
H는 인내심이 바닥나면 동전을 뒤집고 Stem transaction을 E에게 Dandelion을 중계(Relay)합니다.
|
||||
E는 Stem transaction을 받은뒤 Stem pool에 Transaction을 추가하고 이 Transaction의 엠바고 타이머를 시작합니다.
|
||||
|
||||
![t = 60](images/t60.png)
|
||||
|
||||
## T = 70 - Step 1
|
||||
|
||||
E는 인내심이 바닥나면 동전을 뒤집고 transaction을 모든 피어에게 전송하기로 합니다.(mempool안의 fluff 상태)
|
||||
|
||||
![t = 70_1](images/t70_1.png)
|
||||
|
||||
## T = 70 - Step 2
|
||||
|
||||
All the nodes add this transaction to their mempool and remove the related transactions from their stempool.
|
||||
모든 노드는 이 transaction을 자신의 mempool에 넣고 자신의 stempool 에서 이 transaction과 관련된 transaction을 제거합니다.
|
||||
![t = 70_2](images/t70_2.png)
|
|
@ -30,7 +30,7 @@ Grin 项目的主要目的和特性如下:
|
|||
[了解更多](http://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/).
|
||||
|
||||
用于密码学目的的椭圆曲线只是一大组我们称之为 _C_ 的点。这些点可以被加、减或乘以整数(也称为标量)。 给定一个整数 _k_ 并使用标量乘法运算,我们可以计算`k * H`,这也是曲线 _C_ 上的一个点。
|
||||
给定另一个整数 _j_,我们也可以计算`(k + j)* H`,它等于`k * H + j * H`。 椭圆曲线上的加法和标量乘法运算保持加法和乘法的交换率和结合律:
|
||||
给定另一个整数 _j_,我们也可以计算`(k + j)* H`,它等于`k * H + j * H`。 椭圆曲线上的加法和标量乘法运算保持加法和乘法的交换律和结合律:
|
||||
|
||||
(k+j)*H = k*H + j*H
|
||||
|
||||
|
|
198
p2p/src/conn.rs
|
@ -20,25 +20,26 @@
|
|||
//! forces us to go through some additional gymnastic to loop over the async
|
||||
//! stream and make sure we get the right number of bytes out.
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::net::{Shutdown, TcpStream};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::{
|
||||
cmp,
|
||||
thread::{self, JoinHandle},
|
||||
time,
|
||||
};
|
||||
|
||||
use crate::core::ser;
|
||||
use crate::core::ser::FixedLength;
|
||||
use crate::core::ser::{FixedLength, ProtocolVersion};
|
||||
use crate::msg::{
|
||||
read_body, read_discard, read_header, read_item, write_to_buf, MsgHeader, MsgHeaderWrapper,
|
||||
Type,
|
||||
};
|
||||
use crate::types::Error;
|
||||
use crate::util::read_write::{read_exact, write_all};
|
||||
use crate::util::{RateCounter, RwLock};
|
||||
use std::fs::File;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::net::{Shutdown, TcpStream};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
cmp,
|
||||
thread::{self, JoinHandle},
|
||||
};
|
||||
|
||||
const IO_TIMEOUT: Duration = Duration::from_millis(1000);
|
||||
|
||||
/// A trait to be implemented in order to receive messages from the
|
||||
/// connection. Allows providing an optional response.
|
||||
|
@ -57,7 +58,11 @@ macro_rules! try_break {
|
|||
($inner:expr) => {
|
||||
match $inner {
|
||||
Ok(v) => Some(v),
|
||||
Err(Error::Connection(ref e)) if e.kind() == io::ErrorKind::WouldBlock => None,
|
||||
Err(Error::Connection(ref e))
|
||||
if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut =>
|
||||
{
|
||||
None
|
||||
}
|
||||
Err(Error::Store(_))
|
||||
| Err(Error::Chain(_))
|
||||
| Err(Error::Internal)
|
||||
|
@ -75,22 +80,31 @@ macro_rules! try_break {
|
|||
pub struct Message<'a> {
|
||||
pub header: MsgHeader,
|
||||
stream: &'a mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
}
|
||||
|
||||
impl<'a> Message<'a> {
|
||||
fn from_header(header: MsgHeader, stream: &'a mut dyn Read) -> Message<'a> {
|
||||
Message { header, stream }
|
||||
fn from_header(
|
||||
header: MsgHeader,
|
||||
stream: &'a mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
) -> Message<'a> {
|
||||
Message {
|
||||
header,
|
||||
stream,
|
||||
version,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the message body from the underlying connection
|
||||
pub fn body<T: ser::Readable>(&mut self) -> Result<T, Error> {
|
||||
read_body(&self.header, self.stream)
|
||||
read_body(&self.header, self.stream, self.version)
|
||||
}
|
||||
|
||||
/// Read a single "thing" from the underlying connection.
|
||||
/// Return the thing and the total bytes read.
|
||||
pub fn streaming_read<T: ser::Readable>(&mut self) -> Result<(T, u64), Error> {
|
||||
read_item(self.stream)
|
||||
read_item(self.stream, self.version)
|
||||
}
|
||||
|
||||
pub fn copy_attachment(&mut self, len: usize, writer: &mut dyn Write) -> Result<usize, Error> {
|
||||
|
@ -98,12 +112,7 @@ impl<'a> Message<'a> {
|
|||
while written < len {
|
||||
let read_len = cmp::min(8000, len - written);
|
||||
let mut buf = vec![0u8; read_len];
|
||||
read_exact(
|
||||
&mut self.stream,
|
||||
&mut buf[..],
|
||||
time::Duration::from_secs(10),
|
||||
true,
|
||||
)?;
|
||||
self.stream.read_exact(&mut buf[..])?;
|
||||
writer.write_all(&mut buf)?;
|
||||
written += read_len;
|
||||
}
|
||||
|
@ -115,6 +124,7 @@ impl<'a> Message<'a> {
|
|||
pub struct Response<'a> {
|
||||
resp_type: Type,
|
||||
body: Vec<u8>,
|
||||
version: ProtocolVersion,
|
||||
stream: &'a mut dyn Write,
|
||||
attachment: Option<File>,
|
||||
}
|
||||
|
@ -122,22 +132,27 @@ pub struct Response<'a> {
|
|||
impl<'a> Response<'a> {
|
||||
pub fn new<T: ser::Writeable>(
|
||||
resp_type: Type,
|
||||
version: ProtocolVersion,
|
||||
body: T,
|
||||
stream: &'a mut dyn Write,
|
||||
) -> Result<Response<'a>, Error> {
|
||||
let body = ser::ser_vec(&body)?;
|
||||
let body = ser::ser_vec(&body, version)?;
|
||||
Ok(Response {
|
||||
resp_type,
|
||||
body,
|
||||
version,
|
||||
stream,
|
||||
attachment: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn write(mut self, tracker: Arc<Tracker>) -> Result<(), Error> {
|
||||
let mut msg = ser::ser_vec(&MsgHeader::new(self.resp_type, self.body.len() as u64))?;
|
||||
let mut msg = ser::ser_vec(
|
||||
&MsgHeader::new(self.resp_type, self.body.len() as u64),
|
||||
self.version,
|
||||
)?;
|
||||
msg.append(&mut self.body);
|
||||
write_all(&mut self.stream, &msg[..], time::Duration::from_secs(10))?;
|
||||
self.stream.write_all(&msg[..])?;
|
||||
tracker.inc_sent(msg.len() as u64);
|
||||
|
||||
if let Some(mut file) = self.attachment {
|
||||
|
@ -146,7 +161,7 @@ impl<'a> Response<'a> {
|
|||
match file.read(&mut buf[..]) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
write_all(&mut self.stream, &buf[..n], time::Duration::from_secs(10))?;
|
||||
self.stream.write_all(&buf[..n])?;
|
||||
// Increase sent bytes "quietly" without incrementing the counter.
|
||||
// (In a loop here for the single attachment).
|
||||
tracker.inc_quiet_sent(n as u64);
|
||||
|
@ -167,34 +182,39 @@ pub const SEND_CHANNEL_CAP: usize = 100;
|
|||
|
||||
pub struct StopHandle {
|
||||
/// Channel to close the connection
|
||||
pub close_channel: mpsc::Sender<()>,
|
||||
stopped: Arc<AtomicBool>,
|
||||
// we need Option to take ownhership of the handle in stop()
|
||||
peer_thread: Option<JoinHandle<()>>,
|
||||
reader_thread: Option<JoinHandle<()>>,
|
||||
writer_thread: Option<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl StopHandle {
|
||||
/// Schedule this connection to safely close via the async close_channel.
|
||||
pub fn stop(&self) {
|
||||
if self.close_channel.send(()).is_err() {
|
||||
debug!("peer's close_channel is disconnected, must be stopped already");
|
||||
return;
|
||||
}
|
||||
self.stopped.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn wait(&mut self) {
|
||||
if let Some(peer_thread) = self.peer_thread.take() {
|
||||
// wait only if other thread is calling us, eg shutdown
|
||||
if thread::current().id() != peer_thread.thread().id() {
|
||||
debug!("waiting for thread {:?} exit", peer_thread.thread().id());
|
||||
if let Err(e) = peer_thread.join() {
|
||||
error!("failed to wait for peer thread to stop: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
"attempt to wait for thread {:?} from itself",
|
||||
peer_thread.thread().id()
|
||||
);
|
||||
if let Some(reader_thread) = self.reader_thread.take() {
|
||||
self.join_thread(reader_thread);
|
||||
}
|
||||
if let Some(writer_thread) = self.writer_thread.take() {
|
||||
self.join_thread(writer_thread);
|
||||
}
|
||||
}
|
||||
|
||||
fn join_thread(&self, peer_thread: JoinHandle<()>) {
|
||||
// wait only if other thread is calling us, eg shutdown
|
||||
if thread::current().id() != peer_thread.thread().id() {
|
||||
debug!("waiting for thread {:?} exit", peer_thread.thread().id());
|
||||
if let Err(e) = peer_thread.join() {
|
||||
error!("failed to stop peer thread: {:?}", e);
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
"attempt to stop thread {:?} from itself",
|
||||
peer_thread.thread().id()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,11 +225,11 @@ pub struct ConnHandle {
|
|||
}
|
||||
|
||||
impl ConnHandle {
|
||||
pub fn send<T>(&self, body: T, msg_type: Type) -> Result<u64, Error>
|
||||
pub fn send<T>(&self, body: T, msg_type: Type, version: ProtocolVersion) -> Result<u64, Error>
|
||||
where
|
||||
T: ser::Writeable,
|
||||
{
|
||||
let buf = write_to_buf(body, msg_type)?;
|
||||
let buf = write_to_buf(body, msg_type, version)?;
|
||||
let buf_len = buf.len();
|
||||
self.send_channel.try_send(buf)?;
|
||||
Ok(buf_len as u64)
|
||||
|
@ -255,6 +275,7 @@ impl Tracker {
|
|||
/// itself.
|
||||
pub fn listen<H>(
|
||||
stream: TcpStream,
|
||||
version: ProtocolVersion,
|
||||
tracker: Arc<Tracker>,
|
||||
handler: H,
|
||||
) -> io::Result<(ConnHandle, StopHandle)>
|
||||
|
@ -262,48 +283,56 @@ where
|
|||
H: MessageHandler,
|
||||
{
|
||||
let (send_tx, send_rx) = mpsc::sync_channel(SEND_CHANNEL_CAP);
|
||||
let (close_tx, close_rx) = mpsc::channel();
|
||||
|
||||
stream
|
||||
.set_nonblocking(true)
|
||||
.expect("Non-blocking IO not available.");
|
||||
let peer_thread = poll(stream, handler, send_rx, close_rx, tracker)?;
|
||||
.set_read_timeout(Some(IO_TIMEOUT))
|
||||
.expect("can't set read timeout");
|
||||
stream
|
||||
.set_write_timeout(Some(IO_TIMEOUT))
|
||||
.expect("can't set read timeout");
|
||||
|
||||
let stopped = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let (reader_thread, writer_thread) =
|
||||
poll(stream, version, handler, send_rx, stopped.clone(), tracker)?;
|
||||
|
||||
Ok((
|
||||
ConnHandle {
|
||||
send_channel: send_tx,
|
||||
},
|
||||
StopHandle {
|
||||
close_channel: close_tx,
|
||||
peer_thread: Some(peer_thread),
|
||||
stopped,
|
||||
reader_thread: Some(reader_thread),
|
||||
writer_thread: Some(writer_thread),
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
fn poll<H>(
|
||||
conn: TcpStream,
|
||||
version: ProtocolVersion,
|
||||
handler: H,
|
||||
send_rx: mpsc::Receiver<Vec<u8>>,
|
||||
close_rx: mpsc::Receiver<()>,
|
||||
stopped: Arc<AtomicBool>,
|
||||
tracker: Arc<Tracker>,
|
||||
) -> io::Result<JoinHandle<()>>
|
||||
) -> io::Result<(JoinHandle<()>, JoinHandle<()>)>
|
||||
where
|
||||
H: MessageHandler,
|
||||
{
|
||||
// Split out tcp stream out into separate reader/writer halves.
|
||||
let mut reader = conn.try_clone().expect("clone conn for reader failed");
|
||||
let mut writer = conn.try_clone().expect("clone conn for writer failed");
|
||||
let mut responder = conn.try_clone().expect("clone conn for writer failed");
|
||||
let reader_stopped = stopped.clone();
|
||||
|
||||
thread::Builder::new()
|
||||
.name("peer".to_string())
|
||||
let reader_thread = thread::Builder::new()
|
||||
.name("peer_read".to_string())
|
||||
.spawn(move || {
|
||||
let sleep_time = time::Duration::from_millis(5);
|
||||
let mut retry_send = Err(());
|
||||
loop {
|
||||
// check the read end
|
||||
match try_break!(read_header(&mut reader, None)) {
|
||||
match try_break!(read_header(&mut reader, version)) {
|
||||
Some(MsgHeaderWrapper::Known(header)) => {
|
||||
let msg = Message::from_header(header, &mut reader);
|
||||
let msg = Message::from_header(header, &mut reader, version);
|
||||
|
||||
trace!(
|
||||
"Received message header, type {:?}, len {}.",
|
||||
|
@ -315,7 +344,7 @@ where
|
|||
tracker.inc_received(MsgHeader::LEN as u64 + msg.header.msg_len);
|
||||
|
||||
if let Some(Some(resp)) =
|
||||
try_break!(handler.consume(msg, &mut writer, tracker.clone()))
|
||||
try_break!(handler.consume(msg, &mut responder, tracker.clone()))
|
||||
{
|
||||
try_break!(resp.write(tracker.clone()));
|
||||
}
|
||||
|
@ -329,35 +358,48 @@ where
|
|||
None => {}
|
||||
}
|
||||
|
||||
// check the write end, use or_else so try_recv is lazily eval'd
|
||||
let maybe_data = retry_send.or_else(|_| send_rx.try_recv());
|
||||
// check the close channel
|
||||
if reader_stopped.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Shutting down reader connection with {}",
|
||||
reader
|
||||
.peer_addr()
|
||||
.map(|a| a.to_string())
|
||||
.unwrap_or("?".to_owned())
|
||||
);
|
||||
let _ = reader.shutdown(Shutdown::Both);
|
||||
})?;
|
||||
|
||||
let writer_thread = thread::Builder::new()
|
||||
.name("peer_read".to_string())
|
||||
.spawn(move || {
|
||||
let mut retry_send = Err(());
|
||||
loop {
|
||||
let maybe_data = retry_send.or_else(|_| send_rx.recv_timeout(IO_TIMEOUT));
|
||||
retry_send = Err(());
|
||||
if let Ok(data) = maybe_data {
|
||||
let written = try_break!(write_all(
|
||||
&mut writer,
|
||||
&data[..],
|
||||
std::time::Duration::from_secs(10)
|
||||
)
|
||||
.map_err(&From::from));
|
||||
let written = try_break!(writer.write_all(&data[..]).map_err(&From::from));
|
||||
if written.is_none() {
|
||||
retry_send = Ok(data);
|
||||
}
|
||||
}
|
||||
|
||||
// check the close channel
|
||||
if let Ok(_) = close_rx.try_recv() {
|
||||
if stopped.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
thread::sleep(sleep_time);
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Shutting down connection with {}",
|
||||
conn.peer_addr()
|
||||
"Shutting down reader connection with {}",
|
||||
writer
|
||||
.peer_addr()
|
||||
.map(|a| a.to_string())
|
||||
.unwrap_or("?".to_owned())
|
||||
);
|
||||
let _ = conn.shutdown(Shutdown::Both);
|
||||
})
|
||||
})?;
|
||||
Ok((reader_thread, writer_thread))
|
||||
}
|
||||
|
|
|
@ -14,7 +14,8 @@
|
|||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::msg::{read_message, write_message, Hand, ProtocolVersion, Shake, Type, USER_AGENT};
|
||||
use crate::core::ser::ProtocolVersion;
|
||||
use crate::msg::{read_message, write_message, Hand, Shake, Type, USER_AGENT};
|
||||
use crate::peer::Peer;
|
||||
use crate::types::{Capabilities, Direction, Error, P2PConfig, PeerAddr, PeerInfo, PeerLiveInfo};
|
||||
use crate::util::RwLock;
|
||||
|
@ -60,7 +61,7 @@ impl Handshake {
|
|||
|
||||
pub fn initiate(
|
||||
&self,
|
||||
capab: Capabilities,
|
||||
capabilities: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: PeerAddr,
|
||||
conn: &mut TcpStream,
|
||||
|
@ -72,20 +73,26 @@ impl Handshake {
|
|||
Err(e) => return Err(Error::Connection(e)),
|
||||
};
|
||||
|
||||
// Using our default "local" protocol version.
|
||||
let version = ProtocolVersion::local();
|
||||
|
||||
let hand = Hand {
|
||||
version: ProtocolVersion::default(),
|
||||
capabilities: capab,
|
||||
nonce: nonce,
|
||||
version,
|
||||
capabilities,
|
||||
nonce,
|
||||
genesis: self.genesis,
|
||||
total_difficulty: total_difficulty,
|
||||
total_difficulty,
|
||||
sender_addr: self_addr,
|
||||
receiver_addr: peer_addr,
|
||||
user_agent: USER_AGENT.to_string(),
|
||||
};
|
||||
|
||||
// write and read the handshake response
|
||||
write_message(conn, hand, Type::Hand)?;
|
||||
let shake: Shake = read_message(conn, Type::Shake)?;
|
||||
write_message(conn, hand, Type::Hand, version)?;
|
||||
|
||||
// Note: We have to read the Shake message *before* we know which protocol
|
||||
// version our peer supports (it is in the shake message itself).
|
||||
let shake: Shake = read_message(conn, version, Type::Shake)?;
|
||||
if shake.genesis != self.genesis {
|
||||
return Err(Error::GenesisMismatch {
|
||||
us: self.genesis,
|
||||
|
@ -124,7 +131,11 @@ impl Handshake {
|
|||
total_difficulty: Difficulty,
|
||||
conn: &mut TcpStream,
|
||||
) -> Result<PeerInfo, Error> {
|
||||
let hand: Hand = read_message(conn, Type::Hand)?;
|
||||
// Note: We read the Hand message *before* we know which protocol version
|
||||
// is supported by our peer (in the Hand message).
|
||||
let version = ProtocolVersion::local();
|
||||
|
||||
let hand: Hand = read_message(conn, version, Type::Hand)?;
|
||||
|
||||
// all the reasons we could refuse this connection for
|
||||
if hand.genesis != self.genesis {
|
||||
|
@ -167,17 +178,16 @@ impl Handshake {
|
|||
|
||||
// send our reply with our info
|
||||
let shake = Shake {
|
||||
version: ProtocolVersion::default(),
|
||||
version,
|
||||
capabilities: capab,
|
||||
genesis: self.genesis,
|
||||
total_difficulty: total_difficulty,
|
||||
user_agent: USER_AGENT.to_string(),
|
||||
};
|
||||
|
||||
write_message(conn, shake, Type::Shake)?;
|
||||
write_message(conn, shake, Type::Shake, version)?;
|
||||
trace!("Success handshake with {}.", peer_info.addr);
|
||||
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
Ok(peer_info)
|
||||
}
|
||||
|
||||
|
|
111
p2p/src/msg.rs
|
@ -14,30 +14,18 @@
|
|||
|
||||
//! Message types that transit over the network and related serialization code.
|
||||
|
||||
use num::FromPrimitive;
|
||||
use std::fmt;
|
||||
use std::io::{Read, Write};
|
||||
use std::time;
|
||||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::core::BlockHeader;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::ser::{self, FixedLength, Readable, Reader, StreamingReader, Writeable, Writer};
|
||||
use crate::core::ser::{
|
||||
self, FixedLength, ProtocolVersion, Readable, Reader, StreamingReader, Writeable, Writer,
|
||||
};
|
||||
use crate::core::{consensus, global};
|
||||
use crate::types::{
|
||||
Capabilities, Error, PeerAddr, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
|
||||
};
|
||||
use crate::util::read_write::read_exact;
|
||||
|
||||
/// Our local node protocol version.
|
||||
/// We will increment the protocol version with every change to p2p msg serialization
|
||||
/// so we will likely connect with peers with both higher and lower protocol versions.
|
||||
/// We need to be aware that some msg formats will be potentially incompatible and handle
|
||||
/// this for each individual peer connection.
|
||||
/// Note: A peer may disconnect and reconnect with an updated protocol version. Normally
|
||||
/// the protocol version will increase but we need to handle decreasing values also
|
||||
/// as a peer may rollback to previous version of the code.
|
||||
const PROTOCOL_VERSION: u32 = 1;
|
||||
use num::FromPrimitive;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
/// Grin's user agent with current version
|
||||
pub const USER_AGENT: &'static str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION"));
|
||||
|
@ -134,49 +122,55 @@ fn magic() -> [u8; 2] {
|
|||
///
|
||||
pub fn read_header(
|
||||
stream: &mut dyn Read,
|
||||
msg_type: Option<Type>,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<MsgHeaderWrapper, Error> {
|
||||
let mut head = vec![0u8; MsgHeader::LEN];
|
||||
if Some(Type::Hand) == msg_type {
|
||||
read_exact(stream, &mut head, time::Duration::from_millis(10), true)?;
|
||||
} else {
|
||||
read_exact(stream, &mut head, time::Duration::from_secs(10), false)?;
|
||||
}
|
||||
let header = ser::deserialize::<MsgHeaderWrapper>(&mut &head[..])?;
|
||||
stream.read_exact(&mut head)?;
|
||||
let header = ser::deserialize::<MsgHeaderWrapper>(&mut &head[..], version)?;
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// Read a single item from the provided stream, always blocking until we
|
||||
/// have a result (or timeout).
|
||||
/// Returns the item and the total bytes read.
|
||||
pub fn read_item<T: Readable>(stream: &mut dyn Read) -> Result<(T, u64), Error> {
|
||||
let timeout = time::Duration::from_secs(20);
|
||||
let mut reader = StreamingReader::new(stream, timeout);
|
||||
pub fn read_item<T: Readable>(
|
||||
stream: &mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<(T, u64), Error> {
|
||||
let mut reader = StreamingReader::new(stream, version);
|
||||
let res = T::read(&mut reader)?;
|
||||
Ok((res, reader.total_bytes_read()))
|
||||
}
|
||||
|
||||
/// Read a message body from the provided stream, always blocking
|
||||
/// until we have a result (or timeout).
|
||||
pub fn read_body<T: Readable>(h: &MsgHeader, stream: &mut dyn Read) -> Result<T, Error> {
|
||||
pub fn read_body<T: Readable>(
|
||||
h: &MsgHeader,
|
||||
stream: &mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<T, Error> {
|
||||
let mut body = vec![0u8; h.msg_len as usize];
|
||||
read_exact(stream, &mut body, time::Duration::from_secs(20), true)?;
|
||||
ser::deserialize(&mut &body[..]).map_err(From::from)
|
||||
stream.read_exact(&mut body)?;
|
||||
ser::deserialize(&mut &body[..], version).map_err(From::from)
|
||||
}
|
||||
|
||||
/// Read (an unknown) message from the provided stream and discard it.
|
||||
pub fn read_discard(msg_len: u64, stream: &mut dyn Read) -> Result<(), Error> {
|
||||
let mut buffer = vec![0u8; msg_len as usize];
|
||||
read_exact(stream, &mut buffer, time::Duration::from_secs(20), true)?;
|
||||
stream.read_exact(&mut buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads a full message from the underlying stream.
|
||||
pub fn read_message<T: Readable>(stream: &mut dyn Read, msg_type: Type) -> Result<T, Error> {
|
||||
match read_header(stream, Some(msg_type))? {
|
||||
pub fn read_message<T: Readable>(
|
||||
stream: &mut dyn Read,
|
||||
version: ProtocolVersion,
|
||||
msg_type: Type,
|
||||
) -> Result<T, Error> {
|
||||
match read_header(stream, version)? {
|
||||
MsgHeaderWrapper::Known(header) => {
|
||||
if header.msg_type == msg_type {
|
||||
read_body(&header, stream)
|
||||
read_body(&header, stream, version)
|
||||
} else {
|
||||
Err(Error::BadMessage)
|
||||
}
|
||||
|
@ -188,15 +182,19 @@ pub fn read_message<T: Readable>(stream: &mut dyn Read, msg_type: Type) -> Resul
|
|||
}
|
||||
}
|
||||
|
||||
pub fn write_to_buf<T: Writeable>(msg: T, msg_type: Type) -> Result<Vec<u8>, Error> {
|
||||
pub fn write_to_buf<T: Writeable>(
|
||||
msg: T,
|
||||
msg_type: Type,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
// prepare the body first so we know its serialized length
|
||||
let mut body_buf = vec![];
|
||||
ser::serialize(&mut body_buf, &msg)?;
|
||||
ser::serialize(&mut body_buf, version, &msg)?;
|
||||
|
||||
// build and serialize the header using the body size
|
||||
let mut msg_buf = vec![];
|
||||
let blen = body_buf.len() as u64;
|
||||
ser::serialize(&mut msg_buf, &MsgHeader::new(msg_type, blen))?;
|
||||
ser::serialize(&mut msg_buf, version, &MsgHeader::new(msg_type, blen))?;
|
||||
msg_buf.append(&mut body_buf);
|
||||
|
||||
Ok(msg_buf)
|
||||
|
@ -206,8 +204,9 @@ pub fn write_message<T: Writeable>(
|
|||
stream: &mut dyn Write,
|
||||
msg: T,
|
||||
msg_type: Type,
|
||||
version: ProtocolVersion,
|
||||
) -> Result<(), Error> {
|
||||
let buf = write_to_buf(msg, msg_type)?;
|
||||
let buf = write_to_buf(msg, msg_type, version)?;
|
||||
stream.write_all(&buf[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -309,40 +308,6 @@ impl Readable for MsgHeaderWrapper {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialOrd, PartialEq, Serialize)]
|
||||
pub struct ProtocolVersion(pub u32);
|
||||
|
||||
impl Default for ProtocolVersion {
|
||||
fn default() -> ProtocolVersion {
|
||||
ProtocolVersion(PROTOCOL_VERSION)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProtocolVersion> for u32 {
|
||||
fn from(v: ProtocolVersion) -> u32 {
|
||||
v.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ProtocolVersion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for ProtocolVersion {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
writer.write_u32(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for ProtocolVersion {
|
||||
fn read(reader: &mut dyn Reader) -> Result<ProtocolVersion, ser::Error> {
|
||||
let version = reader.read_u32()?;
|
||||
Ok(ProtocolVersion(version))
|
||||
}
|
||||
}
|
||||
|
||||
/// First part of a handshake, sender advertises its version and
|
||||
/// characteristics.
|
||||
pub struct Hand {
|
||||
|
@ -436,10 +401,8 @@ impl Writeable for Shake {
|
|||
impl Readable for Shake {
|
||||
fn read(reader: &mut dyn Reader) -> Result<Shake, ser::Error> {
|
||||
let version = ProtocolVersion::read(reader)?;
|
||||
|
||||
let capab = reader.read_u32()?;
|
||||
let capabilities = Capabilities::from_bits_truncate(capab);
|
||||
|
||||
let total_difficulty = Difficulty::read(reader)?;
|
||||
let ua = reader.read_bytes_len_prefix()?;
|
||||
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
||||
|
|
|
@ -75,7 +75,7 @@ impl Peer {
|
|||
let tracking_adapter = TrackingAdapter::new(adapter);
|
||||
let handler = Protocol::new(Arc::new(tracking_adapter.clone()), info.clone());
|
||||
let tracker = Arc::new(conn::Tracker::new());
|
||||
let (sendh, stoph) = conn::listen(conn, tracker.clone(), handler)?;
|
||||
let (sendh, stoph) = conn::listen(conn, info.version, tracker.clone(), handler)?;
|
||||
let send_handle = Mutex::new(sendh);
|
||||
let stop_handle = Mutex::new(stoph);
|
||||
Ok(Peer {
|
||||
|
@ -224,7 +224,10 @@ impl Peer {
|
|||
|
||||
/// Send a msg with given msg_type to our peer via the connection.
|
||||
fn send<T: Writeable>(&self, msg: T, msg_type: Type) -> Result<(), Error> {
|
||||
let bytes = self.send_handle.lock().send(msg, msg_type)?;
|
||||
let bytes = self
|
||||
.send_handle
|
||||
.lock()
|
||||
.send(msg, msg_type, self.info.version)?;
|
||||
self.tracker.inc_sent(bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -562,6 +565,10 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.txhashset_read(h)
|
||||
}
|
||||
|
||||
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
||||
self.adapter.txhashset_archive_header()
|
||||
}
|
||||
|
||||
fn txhashset_receive_ready(&self) -> bool {
|
||||
self.adapter.txhashset_receive_ready()
|
||||
}
|
||||
|
|
|
@ -674,6 +674,10 @@ impl ChainAdapter for Peers {
|
|||
self.adapter.txhashset_read(h)
|
||||
}
|
||||
|
||||
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
||||
self.adapter.txhashset_archive_header()
|
||||
}
|
||||
|
||||
fn txhashset_receive_ready(&self) -> bool {
|
||||
self.adapter.txhashset_receive_ready()
|
||||
}
|
||||
|
|
|
@ -12,8 +12,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
|
||||
use crate::conn::{Message, MessageHandler, Response, Tracker};
|
||||
use crate::core::core::{self, hash::Hash, CompactBlock};
|
||||
use crate::core::core::{self, hash::Hash, hash::Hashed, CompactBlock};
|
||||
|
||||
use crate::msg::{
|
||||
BanReason, GetPeerAddrs, Headers, KernelDataResponse, Locator, PeerAddrs, Ping, Pong,
|
||||
|
@ -26,6 +27,7 @@ use std::cmp;
|
|||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{BufWriter, Seek, SeekFrom, Write};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tempfile::tempfile;
|
||||
|
||||
pub struct Protocol {
|
||||
|
@ -66,6 +68,7 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Ok(Some(Response::new(
|
||||
Type::Pong,
|
||||
self.peer_info.version,
|
||||
Pong {
|
||||
total_difficulty: adapter.total_difficulty()?,
|
||||
height: adapter.total_height()?,
|
||||
|
@ -104,7 +107,12 @@ impl MessageHandler for Protocol {
|
|||
);
|
||||
let tx = adapter.get_transaction(h);
|
||||
if let Some(tx) = tx {
|
||||
Ok(Some(Response::new(Type::Transaction, tx, writer)?))
|
||||
Ok(Some(Response::new(
|
||||
Type::Transaction,
|
||||
self.peer_info.version,
|
||||
tx,
|
||||
writer,
|
||||
)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -140,7 +148,12 @@ impl MessageHandler for Protocol {
|
|||
|
||||
let bo = adapter.get_block(h);
|
||||
if let Some(b) = bo {
|
||||
return Ok(Some(Response::new(Type::Block, b, writer)?));
|
||||
return Ok(Some(Response::new(
|
||||
Type::Block,
|
||||
self.peer_info.version,
|
||||
b,
|
||||
writer,
|
||||
)?));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -162,7 +175,12 @@ impl MessageHandler for Protocol {
|
|||
let h: Hash = msg.body()?;
|
||||
if let Some(b) = adapter.get_block(h) {
|
||||
let cb: CompactBlock = b.into();
|
||||
Ok(Some(Response::new(Type::CompactBlock, cb, writer)?))
|
||||
Ok(Some(Response::new(
|
||||
Type::CompactBlock,
|
||||
self.peer_info.version,
|
||||
cb,
|
||||
writer,
|
||||
)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -187,6 +205,7 @@ impl MessageHandler for Protocol {
|
|||
// serialize and send all the headers over
|
||||
Ok(Some(Response::new(
|
||||
Type::Headers,
|
||||
self.peer_info.version,
|
||||
Headers { headers },
|
||||
writer,
|
||||
)?))
|
||||
|
@ -232,6 +251,7 @@ impl MessageHandler for Protocol {
|
|||
let peers = adapter.find_peer_addrs(get_peers.capabilities);
|
||||
Ok(Some(Response::new(
|
||||
Type::PeerAddrs,
|
||||
self.peer_info.version,
|
||||
PeerAddrs { peers },
|
||||
writer,
|
||||
)?))
|
||||
|
@ -248,8 +268,12 @@ impl MessageHandler for Protocol {
|
|||
let kernel_data = self.adapter.kernel_data_read()?;
|
||||
let bytes = kernel_data.metadata()?.len();
|
||||
let kernel_data_response = KernelDataResponse { bytes };
|
||||
let mut response =
|
||||
Response::new(Type::KernelDataResponse, &kernel_data_response, writer)?;
|
||||
let mut response = Response::new(
|
||||
Type::KernelDataResponse,
|
||||
self.peer_info.version,
|
||||
&kernel_data_response,
|
||||
writer,
|
||||
)?;
|
||||
response.add_attachment(kernel_data);
|
||||
Ok(Some(response))
|
||||
}
|
||||
|
@ -298,15 +322,18 @@ impl MessageHandler for Protocol {
|
|||
sm_req.hash, sm_req.height
|
||||
);
|
||||
|
||||
let txhashset = self.adapter.txhashset_read(sm_req.hash);
|
||||
let txhashset_header = self.adapter.txhashset_archive_header()?;
|
||||
let txhashset_header_hash = txhashset_header.hash();
|
||||
let txhashset = self.adapter.txhashset_read(txhashset_header_hash);
|
||||
|
||||
if let Some(txhashset) = txhashset {
|
||||
let file_sz = txhashset.reader.metadata()?.len();
|
||||
let mut resp = Response::new(
|
||||
Type::TxHashSetArchive,
|
||||
self.peer_info.version,
|
||||
&TxHashSetArchive {
|
||||
height: sm_req.height as u64,
|
||||
hash: sm_req.hash,
|
||||
height: txhashset_header.height as u64,
|
||||
hash: txhashset_header_hash,
|
||||
bytes: file_sz,
|
||||
},
|
||||
writer,
|
||||
|
@ -341,6 +368,7 @@ impl MessageHandler for Protocol {
|
|||
download_start_time.timestamp(),
|
||||
nonce
|
||||
));
|
||||
let mut now = Instant::now();
|
||||
let mut save_txhashset_to_file = |file| -> Result<(), Error> {
|
||||
let mut tmp_zip =
|
||||
BufWriter::new(OpenOptions::new().write(true).create_new(true).open(file)?);
|
||||
|
@ -356,11 +384,21 @@ impl MessageHandler for Protocol {
|
|||
downloaded_size as u64,
|
||||
total_size as u64,
|
||||
);
|
||||
|
||||
if now.elapsed().as_secs() > 10 {
|
||||
now = Instant::now();
|
||||
debug!(
|
||||
"handle_payload: txhashset archive: {}/{}",
|
||||
downloaded_size, total_size
|
||||
);
|
||||
}
|
||||
// Increase received bytes quietly (without affecting the counters).
|
||||
// Otherwise we risk banning a peer as "abusive".
|
||||
tracker.inc_quiet_received(size as u64)
|
||||
}
|
||||
debug!(
|
||||
"handle_payload: txhashset archive: {}/{} ... DONE",
|
||||
downloaded_size, total_size
|
||||
);
|
||||
tmp_zip
|
||||
.into_inner()
|
||||
.map_err(|_| Error::Internal)?
|
||||
|
|
|
@ -302,6 +302,10 @@ impl ChainAdapter for DummyAdapter {
|
|||
unimplemented!()
|
||||
}
|
||||
|
||||
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn txhashset_receive_ready(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
|
|
@ -29,8 +29,7 @@ use crate::core::core;
|
|||
use crate::core::core::hash::Hash;
|
||||
use crate::core::global;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use crate::msg::ProtocolVersion;
|
||||
use crate::core::ser::{self, ProtocolVersion, Readable, Reader, Writeable, Writer};
|
||||
use grin_store;
|
||||
|
||||
/// Maximum number of block headers a peer should ever send
|
||||
|
@ -536,6 +535,9 @@ pub trait ChainAdapter: Sync + Send {
|
|||
/// at the provided block hash.
|
||||
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead>;
|
||||
|
||||
/// Header of the txhashset archive currently being served to peers.
|
||||
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error>;
|
||||
|
||||
/// Whether the node is ready to accept a new txhashset. If this isn't the
|
||||
/// case, the archive is provided without being requested and likely an
|
||||
/// attack attempt. This should be checked *before* downloading the whole
|
||||
|
|
|
@ -202,10 +202,10 @@ impl Pool {
|
|||
|
||||
fn log_pool_add(&self, entry: &PoolEntry, header: &BlockHeader) {
|
||||
debug!(
|
||||
"add_to_pool [{}]: {} ({}) [in/out/kern: {}/{}/{}] pool: {} (at block {})",
|
||||
"add_to_pool [{}]: {} ({:?}) [in/out/kern: {}/{}/{}] pool: {} (at block {})",
|
||||
self.name,
|
||||
entry.tx.hash(),
|
||||
entry.src.debug_name,
|
||||
entry.src,
|
||||
entry.tx.inputs().len(),
|
||||
entry.tx.outputs().len(),
|
||||
entry.tx.kernels().len(),
|
||||
|
@ -355,7 +355,7 @@ impl Pool {
|
|||
// This is the common case for non 0-conf txs in the txpool.
|
||||
// We assume the tx is valid here as we validated it on the way into the txpool.
|
||||
insert_pos = Some(tx_buckets.len());
|
||||
tx_buckets.push(Bucket::new(entry.tx.clone()));
|
||||
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
|
||||
}
|
||||
Some(pos) => {
|
||||
// We found a single parent tx, so aggregate in the bucket
|
||||
|
@ -375,7 +375,7 @@ impl Pool {
|
|||
// Otherwise put it in its own bucket at the end.
|
||||
// Note: This bucket will have a lower fee_to_weight
|
||||
// than the bucket it depends on.
|
||||
tx_buckets.push(Bucket::new(entry.tx.clone()));
|
||||
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
|
||||
}
|
||||
} else {
|
||||
// Aggregation failed so discard this new tx.
|
||||
|
@ -397,10 +397,11 @@ impl Pool {
|
|||
}
|
||||
}
|
||||
|
||||
// Sort them by fee_to_weight (descending).
|
||||
// Txs with no dependencies will be toward the start of the vec.
|
||||
// Txs with a big chain of dependencies will be toward the end of the vec.
|
||||
tx_buckets.sort_unstable_by_key(|x| Reverse(x.fee_to_weight));
|
||||
// Sort buckets by fee_to_weight (descending) and age (oldest first).
|
||||
// Txs with highest fee_to_weight will be prioritied.
|
||||
// Aggregation that increases the fee_to_weight of a bucket will prioritize the bucket.
|
||||
// Oldest (based on pool insertion time) will then be prioritized.
|
||||
tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_to_weight), x.age_idx));
|
||||
|
||||
tx_buckets
|
||||
.into_iter()
|
||||
|
@ -454,13 +455,19 @@ impl Pool {
|
|||
struct Bucket {
|
||||
raw_txs: Vec<Transaction>,
|
||||
fee_to_weight: u64,
|
||||
age_idx: usize,
|
||||
}
|
||||
|
||||
impl Bucket {
|
||||
fn new(tx: Transaction) -> Bucket {
|
||||
/// Construct a new bucket with the given tx.
|
||||
/// also specifies an "age_idx" so we can sort buckets by age
|
||||
/// as well as fee_to_weight. Txs are maintainedin the pool in insert order
|
||||
/// so buckets with low age_idx contain oldest txs.
|
||||
fn new(tx: Transaction, age_idx: usize) -> Bucket {
|
||||
Bucket {
|
||||
fee_to_weight: tx.fee_to_weight(),
|
||||
raw_txs: vec![tx.clone()],
|
||||
age_idx,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,6 +484,7 @@ impl Bucket {
|
|||
Ok(Bucket {
|
||||
fee_to_weight: agg_tx.fee_to_weight(),
|
||||
raw_txs: raw_txs,
|
||||
age_idx: self.age_idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ impl TransactionPool {
|
|||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;
|
||||
|
||||
entry.tx = tx;
|
||||
entry.src.debug_name = "deagg".to_string();
|
||||
entry.src = TxSource::Deaggregate;
|
||||
}
|
||||
}
|
||||
self.txpool.add_to_pool(entry.clone(), vec![], header)?;
|
||||
|
@ -169,12 +169,12 @@ impl TransactionPool {
|
|||
if !stem
|
||||
|| self
|
||||
.add_to_stempool(entry.clone(), header)
|
||||
.and_then(|_| self.adapter.stem_tx_accepted(&entry.tx))
|
||||
.and_then(|_| self.adapter.stem_tx_accepted(&entry))
|
||||
.is_err()
|
||||
{
|
||||
self.add_to_txpool(entry.clone(), header)?;
|
||||
self.add_to_reorg_cache(entry.clone());
|
||||
self.adapter.tx_accepted(&entry.tx);
|
||||
self.adapter.tx_accepted(&entry);
|
||||
}
|
||||
|
||||
// Transaction passed all the checks but we have to make space for it
|
||||
|
|
|
@ -39,23 +39,32 @@ const DANDELION_AGGREGATION_SECS: u16 = 30;
|
|||
/// Dandelion stem probability (stem 90% of the time, fluff 10%).
|
||||
const DANDELION_STEM_PROBABILITY: u8 = 90;
|
||||
|
||||
/// Always stem our (pushed via api) txs?
|
||||
/// Defaults to true to match the Dandelion++ paper.
|
||||
/// But can be overridden to allow a node to fluff our txs if desired.
|
||||
/// If set to false we will stem/fluff our txs as per current epoch.
|
||||
const DANDELION_ALWAYS_STEM_OUR_TXS: bool = true;
|
||||
|
||||
/// Configuration for "Dandelion".
|
||||
/// Note: shared between p2p and pool.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct DandelionConfig {
|
||||
/// Length of each "epoch".
|
||||
#[serde(default = "default_dandelion_epoch_secs")]
|
||||
pub epoch_secs: Option<u16>,
|
||||
pub epoch_secs: u16,
|
||||
/// Dandelion embargo timer. Fluff and broadcast individual txs if not seen
|
||||
/// on network before embargo expires.
|
||||
#[serde(default = "default_dandelion_embargo_secs")]
|
||||
pub embargo_secs: Option<u16>,
|
||||
pub embargo_secs: u16,
|
||||
/// Dandelion aggregation timer.
|
||||
#[serde(default = "default_dandelion_aggregation_secs")]
|
||||
pub aggregation_secs: Option<u16>,
|
||||
pub aggregation_secs: u16,
|
||||
/// Dandelion stem probability (stem 90% of the time, fluff 10% etc.)
|
||||
#[serde(default = "default_dandelion_stem_probability")]
|
||||
pub stem_probability: Option<u8>,
|
||||
pub stem_probability: u8,
|
||||
/// Default to always stem our txs as described in Dandelion++ paper.
|
||||
#[serde(default = "default_dandelion_always_stem_our_txs")]
|
||||
pub always_stem_our_txs: bool,
|
||||
}
|
||||
|
||||
impl Default for DandelionConfig {
|
||||
|
@ -65,24 +74,29 @@ impl Default for DandelionConfig {
|
|||
embargo_secs: default_dandelion_embargo_secs(),
|
||||
aggregation_secs: default_dandelion_aggregation_secs(),
|
||||
stem_probability: default_dandelion_stem_probability(),
|
||||
always_stem_our_txs: default_dandelion_always_stem_our_txs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_dandelion_epoch_secs() -> Option<u16> {
|
||||
Some(DANDELION_EPOCH_SECS)
|
||||
fn default_dandelion_epoch_secs() -> u16 {
|
||||
DANDELION_EPOCH_SECS
|
||||
}
|
||||
|
||||
fn default_dandelion_embargo_secs() -> Option<u16> {
|
||||
Some(DANDELION_EMBARGO_SECS)
|
||||
fn default_dandelion_embargo_secs() -> u16 {
|
||||
DANDELION_EMBARGO_SECS
|
||||
}
|
||||
|
||||
fn default_dandelion_aggregation_secs() -> Option<u16> {
|
||||
Some(DANDELION_AGGREGATION_SECS)
|
||||
fn default_dandelion_aggregation_secs() -> u16 {
|
||||
DANDELION_AGGREGATION_SECS
|
||||
}
|
||||
|
||||
fn default_dandelion_stem_probability() -> Option<u8> {
|
||||
Some(DANDELION_STEM_PROBABILITY)
|
||||
fn default_dandelion_stem_probability() -> u8 {
|
||||
DANDELION_STEM_PROBABILITY
|
||||
}
|
||||
|
||||
fn default_dandelion_always_stem_our_txs() -> bool {
|
||||
DANDELION_ALWAYS_STEM_OUR_TXS
|
||||
}
|
||||
|
||||
/// Transaction pool configuration
|
||||
|
@ -145,20 +159,29 @@ pub struct PoolEntry {
|
|||
pub tx: Transaction,
|
||||
}
|
||||
|
||||
/// Placeholder: the data representing where we heard about a tx from.
|
||||
///
|
||||
/// Used to make decisions based on transaction acceptance priority from
|
||||
/// various sources. For example, a node may want to bypass pool size
|
||||
/// restrictions when accepting a transaction from a local wallet.
|
||||
///
|
||||
/// Most likely this will evolve to contain some sort of network identifier,
|
||||
/// once we get a better sense of what transaction building might look like.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TxSource {
|
||||
/// Human-readable name used for logging and errors.
|
||||
pub debug_name: String,
|
||||
/// Unique identifier used to distinguish this peer from others.
|
||||
pub identifier: String,
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum TxSource {
|
||||
PushApi,
|
||||
Broadcast,
|
||||
Fluff,
|
||||
EmbargoExpired,
|
||||
Deaggregate,
|
||||
}
|
||||
|
||||
impl TxSource {
|
||||
/// Convenience fn for checking if this tx was sourced via the push api.
|
||||
pub fn is_pushed(&self) -> bool {
|
||||
match self {
|
||||
TxSource::PushApi => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Possible errors when interacting with the transaction pool.
|
||||
|
@ -250,10 +273,10 @@ pub trait BlockChain: Sync + Send {
|
|||
/// importantly the broadcasting of transactions to our peers.
|
||||
pub trait PoolAdapter: Send + Sync {
|
||||
/// The transaction pool has accepted this transaction as valid.
|
||||
fn tx_accepted(&self, tx: &transaction::Transaction);
|
||||
fn tx_accepted(&self, entry: &PoolEntry);
|
||||
|
||||
/// The stem transaction pool has accepted this transactions as valid.
|
||||
fn stem_tx_accepted(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||
fn stem_tx_accepted(&self, entry: &PoolEntry) -> Result<(), PoolError>;
|
||||
}
|
||||
|
||||
/// Dummy adapter used as a placeholder for real implementations
|
||||
|
@ -261,8 +284,8 @@ pub trait PoolAdapter: Send + Sync {
|
|||
pub struct NoopAdapter {}
|
||||
|
||||
impl PoolAdapter for NoopAdapter {
|
||||
fn tx_accepted(&self, _tx: &transaction::Transaction) {}
|
||||
fn stem_tx_accepted(&self, _tx: &transaction::Transaction) -> Result<(), PoolError> {
|
||||
fn tx_accepted(&self, _entry: &PoolEntry) {}
|
||||
fn stem_tx_accepted(&self, _entry: &PoolEntry) -> Result<(), PoolError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -229,10 +229,7 @@ where
|
|||
}
|
||||
|
||||
pub fn test_source() -> TxSource {
|
||||
TxSource {
|
||||
debug_name: format!("test"),
|
||||
identifier: format!("127.0.0.1"),
|
||||
}
|
||||
TxSource::Broadcast
|
||||
}
|
||||
|
||||
pub fn clean_output_dir(db_root: String) {
|
||||
|
|
|
@ -19,10 +19,12 @@ use self::core::core::{transaction, Block, BlockHeader, Weighting};
|
|||
use self::core::libtx;
|
||||
use self::core::pow::Difficulty;
|
||||
use self::keychain::{ExtKeychain, Keychain};
|
||||
use self::pool::TxSource;
|
||||
use self::util::RwLock;
|
||||
use crate::common::*;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_pool as pool;
|
||||
use grin_util as util;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -237,7 +239,7 @@ fn test_the_transaction_pool() {
|
|||
assert_eq!(write_pool.total_size(), 6);
|
||||
let entry = write_pool.txpool.entries.last().unwrap();
|
||||
assert_eq!(entry.tx.kernels().len(), 1);
|
||||
assert_eq!(entry.src.debug_name, "deagg");
|
||||
assert_eq!(entry.src, TxSource::Deaggregate);
|
||||
}
|
||||
|
||||
// Check we cannot "double spend" an output spent in a previous block.
|
||||
|
@ -447,7 +449,7 @@ fn test_the_transaction_pool() {
|
|||
assert_eq!(write_pool.total_size(), 6);
|
||||
let entry = write_pool.txpool.entries.last().unwrap();
|
||||
assert_eq!(entry.tx.kernels().len(), 1);
|
||||
assert_eq!(entry.src.debug_name, "deagg");
|
||||
assert_eq!(entry.src, TxSource::Deaggregate);
|
||||
}
|
||||
|
||||
// Check we cannot "double spend" an output spent in a previous block.
|
||||
|
|
|
@ -23,11 +23,9 @@ use std::sync::{Arc, Weak};
|
|||
use std::thread;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::chain::{self, BlockStatus, ChainAdapter, Options};
|
||||
use crate::chain::{self, BlockStatus, ChainAdapter, Options, SyncState, SyncStatus};
|
||||
use crate::common::hooks::{ChainEvents, NetEvents};
|
||||
use crate::common::types::{
|
||||
self, ChainValidationMode, DandelionEpoch, ServerConfig, SyncState, SyncStatus,
|
||||
};
|
||||
use crate::common::types::{ChainValidationMode, DandelionEpoch, ServerConfig};
|
||||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::transaction::Transaction;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
|
@ -37,7 +35,6 @@ use crate::core::{core, global};
|
|||
use crate::p2p;
|
||||
use crate::p2p::types::PeerInfo;
|
||||
use crate::pool;
|
||||
use crate::pool::types::DandelionConfig;
|
||||
use crate::util::OneTime;
|
||||
use chrono::prelude::*;
|
||||
use chrono::Duration;
|
||||
|
@ -97,10 +94,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
return Ok(true);
|
||||
}
|
||||
|
||||
let source = pool::TxSource {
|
||||
debug_name: "p2p".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
let source = pool::TxSource::Broadcast;
|
||||
|
||||
let header = self.chain().head_header()?;
|
||||
|
||||
|
@ -242,11 +236,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
bh: core::BlockHeader,
|
||||
peer_info: &PeerInfo,
|
||||
) -> Result<bool, chain::Error> {
|
||||
let bhash = bh.hash();
|
||||
debug!(
|
||||
"Received block header {} at {} from {}, going to process.",
|
||||
bhash, bh.height, peer_info.addr,
|
||||
);
|
||||
if !self.sync_state.is_syncing() {
|
||||
for hook in &self.hooks {
|
||||
hook.on_header_received(&bh, &peer_info.addr);
|
||||
}
|
||||
}
|
||||
|
||||
// pushing the new block header through the header chain pipeline
|
||||
// we will go ask for the block if this is a new header
|
||||
|
@ -255,7 +249,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
.process_block_header(&bh, self.chain_opts(false));
|
||||
|
||||
if let Err(e) = res {
|
||||
debug!("Block header {} refused by chain: {:?}", bhash, e.kind());
|
||||
debug!(
|
||||
"Block header {} refused by chain: {:?}",
|
||||
bh.hash(),
|
||||
e.kind()
|
||||
);
|
||||
if e.is_bad_data() {
|
||||
return Ok(false);
|
||||
} else {
|
||||
|
@ -372,6 +370,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
}
|
||||
}
|
||||
|
||||
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
||||
self.chain().txhashset_archive_header()
|
||||
}
|
||||
|
||||
fn txhashset_receive_ready(&self) -> bool {
|
||||
match self.sync_state.status() {
|
||||
SyncStatus::TxHashsetDownload { .. } => true,
|
||||
|
@ -428,7 +430,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
error!("Failed to save txhashset archive: {}", e);
|
||||
|
||||
let is_good_data = !e.is_bad_data();
|
||||
self.sync_state.set_sync_error(types::Error::Chain(e));
|
||||
self.sync_state.set_sync_error(e);
|
||||
Ok(is_good_data)
|
||||
} else {
|
||||
info!("Received valid txhashset data for {}.", h);
|
||||
|
@ -804,11 +806,11 @@ impl DandelionAdapter for PoolToNetAdapter {
|
|||
}
|
||||
|
||||
impl pool::PoolAdapter for PoolToNetAdapter {
|
||||
fn tx_accepted(&self, tx: &core::Transaction) {
|
||||
self.peers().broadcast_transaction(tx);
|
||||
fn tx_accepted(&self, entry: &pool::PoolEntry) {
|
||||
self.peers().broadcast_transaction(&entry.tx);
|
||||
}
|
||||
|
||||
fn stem_tx_accepted(&self, tx: &core::Transaction) -> Result<(), pool::PoolError> {
|
||||
fn stem_tx_accepted(&self, entry: &pool::PoolEntry) -> Result<(), pool::PoolError> {
|
||||
// Take write lock on the current epoch.
|
||||
// We need to be able to update the current relay peer if not currently connected.
|
||||
let mut epoch = self.dandelion_epoch.write();
|
||||
|
@ -816,9 +818,10 @@ impl pool::PoolAdapter for PoolToNetAdapter {
|
|||
// If "stem" epoch attempt to relay the tx to the next Dandelion relay.
|
||||
// Fallback to immediately fluffing the tx if we cannot stem for any reason.
|
||||
// If "fluff" epoch then nothing to do right now (fluff via Dandelion monitor).
|
||||
if epoch.is_stem() {
|
||||
// If node is configured to always stem our (pushed via api) txs then do so.
|
||||
if epoch.is_stem() || (entry.src.is_pushed() && epoch.always_stem_our_txs()) {
|
||||
if let Some(peer) = epoch.relay_peer(&self.peers()) {
|
||||
match peer.send_stem_transaction(tx) {
|
||||
match peer.send_stem_transaction(&entry.tx) {
|
||||
Ok(_) => {
|
||||
info!("Stemming this epoch, relaying to next peer.");
|
||||
Ok(())
|
||||
|
@ -841,7 +844,7 @@ impl pool::PoolAdapter for PoolToNetAdapter {
|
|||
|
||||
impl PoolToNetAdapter {
|
||||
/// Create a new pool to net adapter
|
||||
pub fn new(config: DandelionConfig) -> PoolToNetAdapter {
|
||||
pub fn new(config: pool::DandelionConfig) -> PoolToNetAdapter {
|
||||
PoolToNetAdapter {
|
||||
peers: OneTime::new(),
|
||||
dandelion_epoch: Arc::new(RwLock::new(DandelionEpoch::new(config))),
|
||||
|
|
|
@ -21,11 +21,12 @@ use std::time::SystemTime;
|
|||
|
||||
use crate::core::consensus::graph_weight;
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::ser::ProtocolVersion;
|
||||
|
||||
use chrono::prelude::*;
|
||||
|
||||
use crate::chain;
|
||||
use crate::common::types::SyncStatus;
|
||||
use crate::chain::SyncStatus;
|
||||
use crate::p2p;
|
||||
|
||||
/// Server state info collection struct, to be passed around into internals
|
||||
|
@ -147,7 +148,7 @@ pub struct PeerStats {
|
|||
/// Address
|
||||
pub addr: String,
|
||||
/// version running
|
||||
pub version: p2p::msg::ProtocolVersion,
|
||||
pub version: ProtocolVersion,
|
||||
/// Peer user agent string.
|
||||
pub user_agent: String,
|
||||
/// difficulty reported by peer
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
use std::convert::From;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
use chrono::prelude::Utc;
|
||||
use rand::prelude::*;
|
||||
|
||||
use crate::api;
|
||||
|
@ -28,7 +28,6 @@ use crate::p2p;
|
|||
use crate::pool;
|
||||
use crate::pool::types::DandelionConfig;
|
||||
use crate::store;
|
||||
use crate::util::RwLock;
|
||||
|
||||
/// Error type wrapping underlying module errors.
|
||||
#[derive(Debug)]
|
||||
|
@ -301,171 +300,6 @@ impl Default for WebHooksConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Various status sync can be in, whether it's fast sync or archival.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum SyncStatus {
|
||||
/// Initial State (we do not yet know if we are/should be syncing)
|
||||
Initial,
|
||||
/// Not syncing
|
||||
NoSync,
|
||||
/// Not enough peers to do anything yet, boolean indicates whether
|
||||
/// we should wait at all or ignore and start ASAP
|
||||
AwaitingPeers(bool),
|
||||
/// Downloading block headers
|
||||
HeaderSync {
|
||||
current_height: u64,
|
||||
highest_height: u64,
|
||||
},
|
||||
/// Downloading the various txhashsets
|
||||
TxHashsetDownload {
|
||||
start_time: DateTime<Utc>,
|
||||
prev_update_time: DateTime<Utc>,
|
||||
update_time: DateTime<Utc>,
|
||||
prev_downloaded_size: u64,
|
||||
downloaded_size: u64,
|
||||
total_size: u64,
|
||||
},
|
||||
/// Setting up before validation
|
||||
TxHashsetSetup,
|
||||
/// Validating the full state
|
||||
TxHashsetValidation {
|
||||
kernels: u64,
|
||||
kernel_total: u64,
|
||||
rproofs: u64,
|
||||
rproof_total: u64,
|
||||
},
|
||||
/// Finalizing the new state
|
||||
TxHashsetSave,
|
||||
/// State sync finalized
|
||||
TxHashsetDone,
|
||||
/// Downloading blocks
|
||||
BodySync {
|
||||
current_height: u64,
|
||||
highest_height: u64,
|
||||
},
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
/// Current sync state. Encapsulates the current SyncStatus.
|
||||
pub struct SyncState {
|
||||
current: RwLock<SyncStatus>,
|
||||
sync_error: Arc<RwLock<Option<Error>>>,
|
||||
}
|
||||
|
||||
impl SyncState {
|
||||
/// Return a new SyncState initialize to NoSync
|
||||
pub fn new() -> SyncState {
|
||||
SyncState {
|
||||
current: RwLock::new(SyncStatus::Initial),
|
||||
sync_error: Arc::new(RwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the current state matches any active syncing operation.
|
||||
/// Note: This includes our "initial" state.
|
||||
pub fn is_syncing(&self) -> bool {
|
||||
*self.current.read() != SyncStatus::NoSync
|
||||
}
|
||||
|
||||
/// Current syncing status
|
||||
pub fn status(&self) -> SyncStatus {
|
||||
*self.current.read()
|
||||
}
|
||||
|
||||
/// Update the syncing status
|
||||
pub fn update(&self, new_status: SyncStatus) {
|
||||
if self.status() == new_status {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut status = self.current.write();
|
||||
|
||||
debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
|
||||
|
||||
*status = new_status;
|
||||
}
|
||||
|
||||
/// Update txhashset downloading progress
|
||||
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
|
||||
if let SyncStatus::TxHashsetDownload { .. } = new_status {
|
||||
let mut status = self.current.write();
|
||||
*status = new_status;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Communicate sync error
|
||||
pub fn set_sync_error(&self, error: Error) {
|
||||
*self.sync_error.write() = Some(error);
|
||||
}
|
||||
|
||||
/// Get sync error
|
||||
pub fn sync_error(&self) -> Arc<RwLock<Option<Error>>> {
|
||||
Arc::clone(&self.sync_error)
|
||||
}
|
||||
|
||||
/// Clear sync error
|
||||
pub fn clear_sync_error(&self) {
|
||||
*self.sync_error.write() = None;
|
||||
}
|
||||
}
|
||||
|
||||
impl chain::TxHashsetWriteStatus for SyncState {
|
||||
fn on_setup(&self) {
|
||||
self.update(SyncStatus::TxHashsetSetup);
|
||||
}
|
||||
|
||||
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
|
||||
let mut status = self.current.write();
|
||||
match *status {
|
||||
SyncStatus::TxHashsetValidation {
|
||||
kernels,
|
||||
kernel_total,
|
||||
rproofs,
|
||||
rproof_total,
|
||||
} => {
|
||||
let ks = if vkernels > 0 { vkernels } else { kernels };
|
||||
let kt = if vkernel_total > 0 {
|
||||
vkernel_total
|
||||
} else {
|
||||
kernel_total
|
||||
};
|
||||
let rps = if vrproofs > 0 { vrproofs } else { rproofs };
|
||||
let rpt = if vrproof_total > 0 {
|
||||
vrproof_total
|
||||
} else {
|
||||
rproof_total
|
||||
};
|
||||
*status = SyncStatus::TxHashsetValidation {
|
||||
kernels: ks,
|
||||
kernel_total: kt,
|
||||
rproofs: rps,
|
||||
rproof_total: rpt,
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
*status = SyncStatus::TxHashsetValidation {
|
||||
kernels: 0,
|
||||
kernel_total: 0,
|
||||
rproofs: 0,
|
||||
rproof_total: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_save(&self) {
|
||||
self.update(SyncStatus::TxHashsetSave);
|
||||
}
|
||||
|
||||
fn on_done(&self) {
|
||||
self.update(SyncStatus::TxHashsetDone);
|
||||
}
|
||||
}
|
||||
|
||||
/// A node is either "stem" of "fluff" for the duration of a single epoch.
|
||||
/// A node also maintains an outbound relay peer for the epoch.
|
||||
#[derive(Debug)]
|
||||
|
@ -496,8 +330,8 @@ impl DandelionEpoch {
|
|||
match self.start_time {
|
||||
None => true,
|
||||
Some(start_time) => {
|
||||
let epoch_secs = self.config.epoch_secs.expect("epoch_secs config missing") as i64;
|
||||
Utc::now().timestamp().saturating_sub(start_time) > epoch_secs
|
||||
let epoch_secs = self.config.epoch_secs;
|
||||
Utc::now().timestamp().saturating_sub(start_time) > epoch_secs as i64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -511,10 +345,7 @@ impl DandelionEpoch {
|
|||
|
||||
// If stem_probability == 90 then we stem 90% of the time.
|
||||
let mut rng = rand::thread_rng();
|
||||
let stem_probability = self
|
||||
.config
|
||||
.stem_probability
|
||||
.expect("stem_probability config missing");
|
||||
let stem_probability = self.config.stem_probability;
|
||||
self.is_stem = rng.gen_range(0, 100) < stem_probability;
|
||||
|
||||
let addr = self.relay_peer.clone().map(|p| p.info.addr);
|
||||
|
@ -529,6 +360,11 @@ impl DandelionEpoch {
|
|||
self.is_stem
|
||||
}
|
||||
|
||||
/// Always stem our (pushed via api) txs regardless of stem/fluff epoch?
|
||||
pub fn always_stem_our_txs(&self) -> bool {
|
||||
self.config.always_stem_our_txs
|
||||
}
|
||||
|
||||
/// What is our current relay peer?
|
||||
/// If it is not connected then choose a new one.
|
||||
pub fn relay_peer(&mut self, peers: &Arc<p2p::Peers>) -> Option<Arc<p2p::Peer>> {
|
||||
|
|
|
@ -113,9 +113,7 @@ fn process_fluff_phase(
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
let cutoff_secs = dandelion_config
|
||||
.aggregation_secs
|
||||
.expect("aggregation secs config missing");
|
||||
let cutoff_secs = dandelion_config.aggregation_secs;
|
||||
let cutoff_entries = select_txs_cutoff(&tx_pool.stempool, cutoff_secs);
|
||||
|
||||
// If epoch is expired, fluff *all* outstanding entries in stempool.
|
||||
|
@ -149,12 +147,7 @@ fn process_fluff_phase(
|
|||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "fluff".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||
tx_pool.add_to_pool(TxSource::Fluff, agg_tx, false, &header)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -165,10 +158,7 @@ fn process_expired_entries(
|
|||
// Take a write lock on the txpool for the duration of this processing.
|
||||
let mut tx_pool = tx_pool.write();
|
||||
|
||||
let embargo_secs = dandelion_config
|
||||
.embargo_secs
|
||||
.expect("embargo_secs config missing")
|
||||
+ thread_rng().gen_range(0, 31);
|
||||
let embargo_secs = dandelion_config.embargo_secs + thread_rng().gen_range(0, 31);
|
||||
let expired_entries = select_txs_cutoff(&tx_pool.stempool, embargo_secs);
|
||||
|
||||
if expired_entries.is_empty() {
|
||||
|
@ -179,14 +169,9 @@ fn process_expired_entries(
|
|||
|
||||
let header = tx_pool.chain_head()?;
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "embargo_expired".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
for entry in expired_entries {
|
||||
let txhash = entry.tx.hash();
|
||||
match tx_pool.add_to_pool(src.clone(), entry.tx, false, &header) {
|
||||
match tx_pool.add_to_pool(TxSource::EmbargoExpired, entry.tx, false, &header) {
|
||||
Ok(_) => info!(
|
||||
"dand_mon: embargo expired for {}, fluffed successfully.",
|
||||
txhash
|
||||
|
|
|
@ -30,15 +30,16 @@ use fs2::FileExt;
|
|||
|
||||
use crate::api;
|
||||
use crate::api::TLSConfig;
|
||||
use crate::chain;
|
||||
use crate::chain::{self, SyncState, SyncStatus};
|
||||
use crate::common::adapters::{
|
||||
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
|
||||
};
|
||||
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
|
||||
use crate::common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
|
||||
use crate::common::types::{Error, ServerConfig, StratumServerConfig, SyncState, SyncStatus};
|
||||
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
|
||||
use crate::core::core::hash::{Hashed, ZERO_HASH};
|
||||
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use crate::core::ser::ProtocolVersion;
|
||||
use crate::core::{consensus, genesis, global, pow};
|
||||
use crate::grin::{dandelion_monitor, seed, sync};
|
||||
use crate::mining::stratumserver;
|
||||
|
@ -412,9 +413,9 @@ impl Server {
|
|||
self.chain.header_head().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Current p2p layer protocol version.
|
||||
pub fn protocol_version() -> p2p::msg::ProtocolVersion {
|
||||
p2p::msg::ProtocolVersion::default()
|
||||
/// The p2p layer protocol version for this node.
|
||||
pub fn protocol_version() -> ProtocolVersion {
|
||||
ProtocolVersion::local()
|
||||
}
|
||||
|
||||
/// Returns a set of stats about this server. This and the ServerStats
|
||||
|
|
|
@ -17,8 +17,7 @@ use chrono::Duration;
|
|||
use std::cmp;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::chain;
|
||||
use crate::common::types::{SyncState, SyncStatus};
|
||||
use crate::chain::{self, SyncState, SyncStatus};
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::p2p;
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ use chrono::prelude::{DateTime, Utc};
|
|||
use chrono::Duration;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::chain;
|
||||
use crate::common::types::{Error, SyncState, SyncStatus};
|
||||
use crate::chain::{self, SyncState, SyncStatus};
|
||||
use crate::common::types::Error;
|
||||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::p2p::{self, types::ReasonForBan, Peer};
|
||||
|
||||
|
|
|
@ -16,8 +16,7 @@ use chrono::prelude::{DateTime, Utc};
|
|||
use chrono::Duration;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::chain;
|
||||
use crate::common::types::{Error, SyncState, SyncStatus};
|
||||
use crate::chain::{self, SyncState, SyncStatus};
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::global;
|
||||
use crate::p2p::{self, Peer};
|
||||
|
@ -119,8 +118,9 @@ impl StateSync {
|
|||
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
|
||||
if download_timeout {
|
||||
error!("state_sync: TxHashsetDownload status timeout in 10 minutes!");
|
||||
self.sync_state
|
||||
.set_sync_error(Error::P2P(p2p::Error::Timeout));
|
||||
self.sync_state.set_sync_error(
|
||||
chain::ErrorKind::SyncError(format!("{:?}", p2p::Error::Timeout)).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,9 @@ impl StateSync {
|
|||
Ok(peer) => {
|
||||
self.state_sync_peer = Some(peer);
|
||||
}
|
||||
Err(e) => self.sync_state.set_sync_error(Error::P2P(e)),
|
||||
Err(e) => self
|
||||
.sync_state
|
||||
.set_sync_error(chain::ErrorKind::SyncError(format!("{:?}", e)).into()),
|
||||
}
|
||||
|
||||
// to avoid the confusing log,
|
||||
|
@ -160,6 +162,9 @@ impl StateSync {
|
|||
|
||||
fn request_state(&self, header_head: &chain::Tip) -> Result<Arc<Peer>, p2p::Error> {
|
||||
let threshold = global::state_sync_threshold() as u64;
|
||||
let archive_interval = global::txhashset_archive_interval();
|
||||
let mut txhashset_height = header_head.height.saturating_sub(threshold);
|
||||
txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval);
|
||||
|
||||
if let Some(peer) = self.peers.most_work_peer() {
|
||||
// ask for txhashset at state_sync_threshold
|
||||
|
@ -173,7 +178,7 @@ impl StateSync {
|
|||
);
|
||||
p2p::Error::Internal
|
||||
})?;
|
||||
for _ in 0..threshold {
|
||||
while txhashset_head.height > txhashset_height {
|
||||
txhashset_head = self
|
||||
.chain
|
||||
.get_previous_header(&txhashset_head)
|
||||
|
|
|
@ -16,8 +16,7 @@ use std::sync::Arc;
|
|||
use std::thread;
|
||||
use std::time;
|
||||
|
||||
use crate::chain;
|
||||
use crate::common::types::{SyncState, SyncStatus};
|
||||
use crate::chain::{self, SyncState, SyncStatus};
|
||||
use crate::core::global;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::grin::sync::body_sync::BodySync;
|
||||
|
|
|
@ -32,9 +32,9 @@ use std::sync::Arc;
|
|||
use std::time::{Duration, SystemTime};
|
||||
use std::{cmp, thread};
|
||||
|
||||
use crate::chain;
|
||||
use crate::chain::{self, SyncState};
|
||||
use crate::common::stats::{StratumStats, WorkerStats};
|
||||
use crate::common::types::{StratumServerConfig, SyncState};
|
||||
use crate::common::types::StratumServerConfig;
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::core::Block;
|
||||
|
@ -331,7 +331,7 @@ impl Handler {
|
|||
// Serialize the block header into pre and post nonce strings
|
||||
let mut header_buf = vec![];
|
||||
{
|
||||
let mut writer = ser::BinWriter::new(&mut header_buf);
|
||||
let mut writer = ser::BinWriter::default(&mut header_buf);
|
||||
bh.write_pre_pow(&mut writer).unwrap();
|
||||
bh.pow.write_pre_pow(&mut writer).unwrap();
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ use crate::core::global;
|
|||
use crate::util::init_logger;
|
||||
use clap::App;
|
||||
use grin_api as api;
|
||||
use grin_chain as chain;
|
||||
use grin_config as config;
|
||||
use grin_core as core;
|
||||
use grin_p2p as p2p;
|
||||
|
|
|
@ -24,7 +24,7 @@ use cursive::Cursive;
|
|||
use crate::tui::constants::VIEW_BASIC_STATUS;
|
||||
use crate::tui::types::TUIStatusListener;
|
||||
|
||||
use crate::servers::common::types::SyncStatus;
|
||||
use crate::chain::SyncStatus;
|
||||
use crate::servers::ServerStats;
|
||||
|
||||
const NANO_TO_MILLIS: f64 = 1.0 / 1_000_000.0;
|
||||
|
|
|
@ -22,7 +22,7 @@ use lmdb_zero as lmdb;
|
|||
use lmdb_zero::traits::CreateCursor;
|
||||
use lmdb_zero::LmdbResultExt;
|
||||
|
||||
use crate::core::ser;
|
||||
use crate::core::ser::{self, ProtocolVersion};
|
||||
use crate::util::{RwLock, RwLockReadGuard};
|
||||
|
||||
/// number of bytes to grow the database by when needed
|
||||
|
@ -68,6 +68,7 @@ pub struct Store {
|
|||
env: Arc<lmdb::Environment>,
|
||||
db: RwLock<Option<Arc<lmdb::Database<'static>>>>,
|
||||
name: String,
|
||||
version: ProtocolVersion,
|
||||
}
|
||||
|
||||
impl Store {
|
||||
|
@ -111,6 +112,7 @@ impl Store {
|
|||
env: Arc::new(env),
|
||||
db: RwLock::new(None),
|
||||
name: db_name,
|
||||
version: ProtocolVersion(1),
|
||||
};
|
||||
|
||||
{
|
||||
|
@ -230,7 +232,7 @@ impl Store {
|
|||
) -> Result<Option<T>, Error> {
|
||||
let res: lmdb::error::Result<&[u8]> = access.get(&db.as_ref().unwrap(), key);
|
||||
match res.to_opt() {
|
||||
Ok(Some(mut res)) => match ser::deserialize(&mut res) {
|
||||
Ok(Some(mut res)) => match ser::deserialize(&mut res, self.version) {
|
||||
Ok(res) => Ok(Some(res)),
|
||||
Err(e) => Err(Error::SerErr(format!("{}", e))),
|
||||
},
|
||||
|
@ -259,6 +261,7 @@ impl Store {
|
|||
cursor,
|
||||
seek: false,
|
||||
prefix: from.to_vec(),
|
||||
version: self.version,
|
||||
_marker: marker::PhantomData,
|
||||
})
|
||||
}
|
||||
|
@ -296,7 +299,7 @@ impl<'a> Batch<'a> {
|
|||
/// Writes a single key and its `Writeable` value to the db. Encapsulates
|
||||
/// serialization.
|
||||
pub fn put_ser<W: ser::Writeable>(&self, key: &[u8], value: &W) -> Result<(), Error> {
|
||||
let ser_value = ser::ser_vec(value);
|
||||
let ser_value = ser::ser_vec(value, self.store.version);
|
||||
match ser_value {
|
||||
Ok(data) => self.put(key, &data),
|
||||
Err(err) => Err(Error::SerErr(format!("{}", err))),
|
||||
|
@ -360,6 +363,7 @@ where
|
|||
cursor: Arc<lmdb::Cursor<'static, 'static>>,
|
||||
seek: bool,
|
||||
prefix: Vec<u8>,
|
||||
version: ProtocolVersion,
|
||||
_marker: marker::PhantomData<T>,
|
||||
}
|
||||
|
||||
|
@ -393,7 +397,7 @@ where
|
|||
fn deser_if_prefix_match(&self, key: &[u8], value: &[u8]) -> Option<(Vec<u8>, T)> {
|
||||
let plen = self.prefix.len();
|
||||
if plen == 0 || key[0..plen] == self.prefix[..] {
|
||||
if let Ok(value) = ser::deserialize(&mut &value[..]) {
|
||||
if let Ok(value) = ser::deserialize(&mut &value[..], self.version) {
|
||||
Some((key.to_vec(), value))
|
||||
} else {
|
||||
None
|
||||
|
|
|
@ -19,7 +19,7 @@ use std::{io, time};
|
|||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::pmmr::{self, family, Backend};
|
||||
use crate::core::core::BlockHeader;
|
||||
use crate::core::ser::{FixedLength, PMMRable};
|
||||
use crate::core::ser::{FixedLength, PMMRable, ProtocolVersion};
|
||||
use crate::leaf_set::LeafSet;
|
||||
use crate::prune_list::PruneList;
|
||||
use crate::types::{AppendOnlyFile, DataFile, SizeEntry, SizeInfo};
|
||||
|
@ -206,6 +206,11 @@ impl<T: PMMRable> PMMRBackend<T> {
|
|||
fixed_size: bool,
|
||||
header: Option<&BlockHeader>,
|
||||
) -> io::Result<PMMRBackend<T>> {
|
||||
// Note: Explicit protocol version here.
|
||||
// Regardless of our "default" protocol version we have existing MMR files
|
||||
// and we need to be able to support these across upgrades.
|
||||
let version = ProtocolVersion(1);
|
||||
|
||||
let data_dir = data_dir.as_ref();
|
||||
|
||||
// Are we dealing with "fixed size" data elements or "variable size" data elements
|
||||
|
@ -216,14 +221,15 @@ impl<T: PMMRable> PMMRBackend<T> {
|
|||
SizeInfo::VariableSize(Box::new(AppendOnlyFile::open(
|
||||
data_dir.join(PMMR_SIZE_FILE),
|
||||
SizeInfo::FixedSize(SizeEntry::LEN as u16),
|
||||
version,
|
||||
)?))
|
||||
};
|
||||
|
||||
// Hash file is always "fixed size" and we use 32 bytes per hash.
|
||||
let hash_size_info = SizeInfo::FixedSize(Hash::LEN as u16);
|
||||
|
||||
let hash_file = DataFile::open(&data_dir.join(PMMR_HASH_FILE), hash_size_info)?;
|
||||
let data_file = DataFile::open(&data_dir.join(PMMR_DATA_FILE), size_info)?;
|
||||
let hash_file = DataFile::open(&data_dir.join(PMMR_HASH_FILE), hash_size_info, version)?;
|
||||
let data_file = DataFile::open(&data_dir.join(PMMR_DATA_FILE), size_info, version)?;
|
||||
|
||||
let leaf_set_path = data_dir.join(PMMR_LEAF_FILE);
|
||||
|
||||
|
|
|
@ -16,14 +16,14 @@ use memmap;
|
|||
use tempfile::tempfile;
|
||||
|
||||
use crate::core::ser::{
|
||||
self, BinWriter, FixedLength, Readable, Reader, StreamingReader, Writeable, Writer,
|
||||
self, BinWriter, FixedLength, ProtocolVersion, Readable, Reader, StreamingReader, Writeable,
|
||||
Writer,
|
||||
};
|
||||
use std::fmt::Debug;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, BufReader, BufWriter, Seek, SeekFrom, Write};
|
||||
use std::marker;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time;
|
||||
|
||||
/// Represents a single entry in the size_file.
|
||||
/// Offset (in bytes) and size (in bytes) of a variable sized entry
|
||||
|
@ -78,12 +78,16 @@ where
|
|||
T: Readable + Writeable + Debug,
|
||||
{
|
||||
/// Open (or create) a file at the provided path on disk.
|
||||
pub fn open<P>(path: P, size_info: SizeInfo) -> io::Result<DataFile<T>>
|
||||
pub fn open<P>(
|
||||
path: P,
|
||||
size_info: SizeInfo,
|
||||
version: ProtocolVersion,
|
||||
) -> io::Result<DataFile<T>>
|
||||
where
|
||||
P: AsRef<Path> + Debug,
|
||||
{
|
||||
Ok(DataFile {
|
||||
file: AppendOnlyFile::open(path, size_info)?,
|
||||
file: AppendOnlyFile::open(path, size_info, version)?,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -177,6 +181,7 @@ pub struct AppendOnlyFile<T> {
|
|||
path: PathBuf,
|
||||
file: Option<File>,
|
||||
size_info: SizeInfo,
|
||||
version: ProtocolVersion,
|
||||
mmap: Option<memmap::Mmap>,
|
||||
|
||||
// Buffer of unsync'd bytes. These bytes will be appended to the file when flushed.
|
||||
|
@ -191,7 +196,11 @@ where
|
|||
T: Debug + Readable + Writeable,
|
||||
{
|
||||
/// Open a file (existing or not) as append-only, backed by a mmap.
|
||||
pub fn open<P>(path: P, size_info: SizeInfo) -> io::Result<AppendOnlyFile<T>>
|
||||
pub fn open<P>(
|
||||
path: P,
|
||||
size_info: SizeInfo,
|
||||
version: ProtocolVersion,
|
||||
) -> io::Result<AppendOnlyFile<T>>
|
||||
where
|
||||
P: AsRef<Path> + Debug,
|
||||
{
|
||||
|
@ -199,6 +208,7 @@ where
|
|||
file: None,
|
||||
path: path.as_ref().to_path_buf(),
|
||||
size_info,
|
||||
version,
|
||||
mmap: None,
|
||||
buffer: vec![],
|
||||
buffer_start_pos: 0,
|
||||
|
@ -268,7 +278,8 @@ where
|
|||
|
||||
/// Append element to append-only file by serializing it to bytes and appending the bytes.
|
||||
fn append_elmt(&mut self, data: &T) -> io::Result<()> {
|
||||
let mut bytes = ser::ser_vec(data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
let mut bytes = ser::ser_vec(data, self.version)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
self.append(&mut bytes)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -415,7 +426,8 @@ where
|
|||
|
||||
fn read_as_elmt(&self, pos: u64) -> io::Result<T> {
|
||||
let data = self.read(pos)?;
|
||||
ser::deserialize(&mut &data[..]).map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
ser::deserialize(&mut &data[..], self.version)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
}
|
||||
|
||||
// Read length bytes starting at offset from the buffer.
|
||||
|
@ -470,11 +482,10 @@ where
|
|||
{
|
||||
let reader = File::open(&self.path)?;
|
||||
let mut buf_reader = BufReader::new(reader);
|
||||
let mut streaming_reader =
|
||||
StreamingReader::new(&mut buf_reader, time::Duration::from_secs(1));
|
||||
let mut streaming_reader = StreamingReader::new(&mut buf_reader, self.version);
|
||||
|
||||
let mut buf_writer = BufWriter::new(File::create(&tmp_path)?);
|
||||
let mut bin_writer = BinWriter::new(&mut buf_writer);
|
||||
let mut bin_writer = BinWriter::new(&mut buf_writer, self.version);
|
||||
|
||||
let mut current_pos = 0;
|
||||
let mut prune_pos = prune_pos;
|
||||
|
@ -517,11 +528,10 @@ where
|
|||
{
|
||||
let reader = File::open(&self.path)?;
|
||||
let mut buf_reader = BufReader::new(reader);
|
||||
let mut streaming_reader =
|
||||
StreamingReader::new(&mut buf_reader, time::Duration::from_secs(1));
|
||||
let mut streaming_reader = StreamingReader::new(&mut buf_reader, self.version);
|
||||
|
||||
let mut buf_writer = BufWriter::new(File::create(&tmp_path)?);
|
||||
let mut bin_writer = BinWriter::new(&mut buf_writer);
|
||||
let mut bin_writer = BinWriter::new(&mut buf_writer, self.version);
|
||||
|
||||
let mut current_offset = 0;
|
||||
while let Ok(_) = T::read(&mut streaming_reader) {
|
||||
|
|
|
@ -20,7 +20,7 @@ serde_derive = "1"
|
|||
log4rs = { version = "0.8.1", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] }
|
||||
log = "0.4"
|
||||
walkdir = "2"
|
||||
zip = { version = "0.4", default-features = false }
|
||||
zip = { version = "0.5", default-features = false }
|
||||
parking_lot = {version = "0.6"}
|
||||
zeroize = "0.9"
|
||||
|
||||
|
|
|
@ -44,18 +44,15 @@ pub fn copy_dir_to(src: &Path, dst: &Path) -> io::Result<u64> {
|
|||
}
|
||||
|
||||
/// List directory
|
||||
pub fn list_files(path: String) -> Vec<String> {
|
||||
let mut files_vec: Vec<String> = vec![];
|
||||
for entry in WalkDir::new(Path::new(&path))
|
||||
pub fn list_files(path: &Path) -> Vec<PathBuf> {
|
||||
WalkDir::new(path)
|
||||
.sort_by(|a, b| a.path().cmp(b.path()))
|
||||
.min_depth(1)
|
||||
.into_iter()
|
||||
.filter_map(|e| e.ok())
|
||||
{
|
||||
match entry.file_name().to_str() {
|
||||
Some(path_str) => files_vec.push(path_str.to_string()),
|
||||
None => println!("Could not read optional type"),
|
||||
}
|
||||
}
|
||||
return files_vec;
|
||||
.filter_map(|x| x.ok())
|
||||
.filter(|x| x.file_type().is_file())
|
||||
.filter_map(|x| x.path().strip_prefix(path).map(|x| x.to_path_buf()).ok())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn copy_to(src: &Path, src_type: &fs::FileType, dst: &Path) -> io::Result<u64> {
|
||||
|
|
|
@ -47,9 +47,6 @@ pub use crate::types::{LogLevel, LoggingConfig, ZeroingString};
|
|||
|
||||
pub mod macros;
|
||||
|
||||
// read_exact and write_all impls
|
||||
pub mod read_write;
|
||||
|
||||
// other utils
|
||||
#[allow(unused_imports)]
|
||||
use std::ops::Deref;
|
||||
|
|
|
@ -1,110 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Custom impls of read_exact and write_all to work around async stream restrictions.
|
||||
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
/// The default implementation of read_exact is useless with an async stream (TcpStream) as
|
||||
/// it will return as soon as something has been read, regardless of
|
||||
/// whether the buffer has been filled (and then errors). This implementation
|
||||
/// will block until it has read exactly `len` bytes and returns them as a
|
||||
/// `vec<u8>`. Except for a timeout, this implementation will never return a
|
||||
/// partially filled buffer.
|
||||
///
|
||||
/// The timeout in milliseconds aborts the read when it's met. Note that the
|
||||
/// time is not guaranteed to be exact. To support cases where we want to poll
|
||||
/// instead of blocking, a `block_on_empty` boolean, when false, ensures
|
||||
/// `read_exact` returns early with a `io::ErrorKind::WouldBlock` if nothing
|
||||
/// has been read from the socket.
|
||||
pub fn read_exact(
|
||||
stream: &mut dyn Read,
|
||||
mut buf: &mut [u8],
|
||||
timeout: Duration,
|
||||
block_on_empty: bool,
|
||||
) -> io::Result<()> {
|
||||
let sleep_time = Duration::from_micros(10);
|
||||
let mut count = Duration::new(0, 0);
|
||||
|
||||
let mut read = 0;
|
||||
loop {
|
||||
match stream.read(buf) {
|
||||
Ok(0) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::ConnectionAborted,
|
||||
"read_exact",
|
||||
));
|
||||
}
|
||||
Ok(n) => {
|
||||
let tmp = buf;
|
||||
buf = &mut tmp[n..];
|
||||
read += n;
|
||||
}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
|
||||
if read == 0 && !block_on_empty {
|
||||
return Err(io::Error::new(io::ErrorKind::WouldBlock, "read_exact"));
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
if !buf.is_empty() {
|
||||
thread::sleep(sleep_time);
|
||||
count += sleep_time;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if count > timeout {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::TimedOut,
|
||||
"reading from stream",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Same as `read_exact` but for writing.
|
||||
pub fn write_all(stream: &mut dyn Write, mut buf: &[u8], timeout: Duration) -> io::Result<()> {
|
||||
let sleep_time = Duration::from_micros(10);
|
||||
let mut count = Duration::new(0, 0);
|
||||
|
||||
while !buf.is_empty() {
|
||||
match stream.write(buf) {
|
||||
Ok(0) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write whole buffer",
|
||||
));
|
||||
}
|
||||
Ok(n) => buf = &buf[n..],
|
||||
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
if !buf.is_empty() {
|
||||
thread::sleep(sleep_time);
|
||||
count += sleep_time;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if count > timeout {
|
||||
return Err(io::Error::new(io::ErrorKind::TimedOut, "writing to stream"));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
157
util/src/zip.rs
|
@ -12,133 +12,76 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fs::{self, File};
|
||||
/// Wrappers around the `zip-rs` library to compress and decompress zip archives.
|
||||
use std::io;
|
||||
use std::panic;
|
||||
use std::path::Path;
|
||||
use walkdir::WalkDir;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, BufReader, BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
|
||||
use self::zip_rs::result::{ZipError, ZipResult};
|
||||
use self::zip_rs::write::FileOptions;
|
||||
use zip as zip_rs;
|
||||
|
||||
/// Compress a source directory recursively into a zip file.
|
||||
/// Permissions are set to 644 by default to avoid any
|
||||
/// unwanted execution bits.
|
||||
pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
|
||||
if !Path::new(src_dir).is_dir() {
|
||||
return Err(ZipError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Source must be a directory.",
|
||||
)));
|
||||
}
|
||||
/// Create a zip archive from source dir and list of relative file paths.
|
||||
/// Permissions are set to 644 by default.
|
||||
pub fn create_zip(dst_file: &File, src_dir: &Path, files: Vec<PathBuf>) -> io::Result<()> {
|
||||
let mut writer = {
|
||||
let zip = zip_rs::ZipWriter::new(dst_file);
|
||||
BufWriter::new(zip)
|
||||
};
|
||||
|
||||
let options = FileOptions::default()
|
||||
.compression_method(zip_rs::CompressionMethod::Stored)
|
||||
.unix_permissions(0o644);
|
||||
|
||||
let mut zip = zip_rs::ZipWriter::new(dst_file);
|
||||
let walkdir = WalkDir::new(src_dir.to_str().unwrap());
|
||||
let it = walkdir.into_iter();
|
||||
|
||||
for dent in it.filter_map(|e| e.ok()) {
|
||||
let path = dent.path();
|
||||
let name = path
|
||||
.strip_prefix(Path::new(src_dir))
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap();
|
||||
|
||||
if path.is_file() {
|
||||
zip.start_file(name, options)?;
|
||||
let mut f = File::open(path)?;
|
||||
io::copy(&mut f, &mut zip)?;
|
||||
for x in &files {
|
||||
let file_path = src_dir.join(x);
|
||||
if let Ok(file) = File::open(file_path.clone()) {
|
||||
info!("compress: {:?} -> {:?}", file_path, x);
|
||||
writer.get_mut().start_file_from_path(x, options)?;
|
||||
io::copy(&mut BufReader::new(file), &mut writer)?;
|
||||
// Flush the BufWriter after each file so we start then next one correctly.
|
||||
writer.flush()?;
|
||||
}
|
||||
}
|
||||
|
||||
zip.finish()?;
|
||||
writer.get_mut().finish()?;
|
||||
dst_file.sync_all()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decompress a source file into the provided destination path.
|
||||
pub fn decompress<R, F>(src_file: R, dest: &Path, expected: F) -> ZipResult<usize>
|
||||
where
|
||||
R: io::Read + io::Seek + panic::UnwindSafe,
|
||||
F: Fn(&Path) -> bool + panic::UnwindSafe,
|
||||
{
|
||||
let mut decompressed = 0;
|
||||
/// Extract a set of files from the provided zip archive.
|
||||
pub fn extract_files(from_archive: File, dest: &Path, files: Vec<PathBuf>) -> io::Result<()> {
|
||||
let dest: PathBuf = PathBuf::from(dest);
|
||||
let files: Vec<_> = files.iter().cloned().collect();
|
||||
let res = thread::spawn(move || {
|
||||
let mut archive = zip_rs::ZipArchive::new(from_archive).expect("archive file exists");
|
||||
for x in files {
|
||||
if let Ok(file) = archive.by_name(x.to_str().expect("valid path")) {
|
||||
let path = dest.join(file.sanitized_name());
|
||||
let parent_dir = path.parent().expect("valid parent dir");
|
||||
fs::create_dir_all(&parent_dir).expect("create parent dir");
|
||||
let outfile = fs::File::create(&path).expect("file created");
|
||||
io::copy(&mut BufReader::new(file), &mut BufWriter::new(outfile))
|
||||
.expect("write to file");
|
||||
|
||||
// catch the panic to avoid the thread quit
|
||||
panic::set_hook(Box::new(|panic_info| {
|
||||
error!(
|
||||
"panic occurred: {:?}",
|
||||
panic_info.payload().downcast_ref::<&str>().unwrap()
|
||||
);
|
||||
}));
|
||||
let result = panic::catch_unwind(move || {
|
||||
let mut archive = zip_rs::ZipArchive::new(src_file)?;
|
||||
info!("extract_files: {:?} -> {:?}", x, path);
|
||||
|
||||
for i in 0..archive.len() {
|
||||
let mut file = archive.by_index(i)?;
|
||||
let san_name = file.sanitized_name();
|
||||
if san_name.to_str().unwrap_or("").replace("\\", "/") != file.name().replace("\\", "/")
|
||||
|| !expected(&san_name)
|
||||
{
|
||||
info!(
|
||||
"ignoring a suspicious file: {}, got {:?}",
|
||||
file.name(),
|
||||
san_name.to_str()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let file_path = dest.join(san_name);
|
||||
|
||||
if (&*file.name()).ends_with('/') {
|
||||
fs::create_dir_all(&file_path)?;
|
||||
} else {
|
||||
if let Some(p) = file_path.parent() {
|
||||
if !p.exists() {
|
||||
fs::create_dir_all(&p)?;
|
||||
}
|
||||
}
|
||||
let res = fs::File::create(&file_path);
|
||||
let mut outfile = match res {
|
||||
Err(e) => {
|
||||
error!("{:?}", e);
|
||||
return Err(zip::result::ZipError::Io(e));
|
||||
}
|
||||
Ok(r) => r,
|
||||
};
|
||||
io::copy(&mut file, &mut outfile)?;
|
||||
decompressed += 1;
|
||||
}
|
||||
|
||||
// Get and Set permissions
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(mode) = file.unix_mode() {
|
||||
fs::set_permissions(
|
||||
&file_path.to_str().unwrap(),
|
||||
PermissionsExt::from_mode(mode),
|
||||
)?;
|
||||
// Set file permissions to "644" (Unix only).
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mode = PermissionsExt::from_mode(0o644);
|
||||
fs::set_permissions(&path, mode).expect("set file permissions");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(decompressed)
|
||||
});
|
||||
match result {
|
||||
Ok(res) => match res {
|
||||
Err(e) => Err(e.into()),
|
||||
Ok(_) => res,
|
||||
},
|
||||
Err(_) => {
|
||||
error!("panic occurred on zip::decompress!");
|
||||
Err(zip::result::ZipError::InvalidArchive(
|
||||
"panic occurred on zip::decompress",
|
||||
))
|
||||
}
|
||||
}
|
||||
})
|
||||
.join();
|
||||
|
||||
// If join() above is Ok then we successfully extracted the files.
|
||||
// If the result is Err then we failed to extract the files.
|
||||
res.map_err(|e| {
|
||||
error!("failed to extract files from zip: {:?}", e);
|
||||
io::Error::new(io::ErrorKind::Other, "failed to extract files from zip")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -28,11 +28,9 @@ fn copy_dir() {
|
|||
let original_path = Path::new("./target/tmp2/original");
|
||||
let copy_path = Path::new("./target/tmp2/copy");
|
||||
file::copy_dir_to(original_path, copy_path).unwrap();
|
||||
let original_files = file::list_files("./target/tmp2/original".to_string());
|
||||
let copied_files = file::list_files("./target/tmp2/copy".to_string());
|
||||
for i in 1..5 {
|
||||
assert_eq!(copied_files[i], original_files[i]);
|
||||
}
|
||||
let original_files = file::list_files(&Path::new("./target/tmp2/original"));
|
||||
let copied_files = file::list_files(&Path::new("./target/tmp2/copy"));
|
||||
assert_eq!(original_files, copied_files);
|
||||
fs::remove_dir_all(root).unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -16,52 +16,80 @@ use grin_util as util;
|
|||
|
||||
use crate::util::zip;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[test]
|
||||
fn zip_unzip() {
|
||||
let root = Path::new("./target/tmp");
|
||||
let zip_name = "./target/tmp/zipped.zip";
|
||||
let root = Path::new("target/tmp");
|
||||
let zip_path = root.join("zipped.zip");
|
||||
let path = root.join("to_zip");
|
||||
|
||||
fs::create_dir_all(root.join("./to_zip/sub")).unwrap();
|
||||
write_files("to_zip".to_string(), &root).unwrap();
|
||||
// Some files we want to use for testing our zip file.
|
||||
{
|
||||
fs::create_dir_all(&path).unwrap();
|
||||
|
||||
let zip_file = File::create(zip_name).unwrap();
|
||||
zip::compress(&root.join("./to_zip"), &zip_file).unwrap();
|
||||
zip_file.sync_all().unwrap();
|
||||
let mut file = File::create(path.join("foo.txt")).unwrap();
|
||||
file.write_all(b"Hello, world!").unwrap();
|
||||
|
||||
let mut file = File::create(path.join("bar.txt")).unwrap();
|
||||
file.write_all(b"This, was unexpected!").unwrap();
|
||||
|
||||
let mut file = File::create(path.join("wat.txt")).unwrap();
|
||||
file.write_all(b"Goodbye, world!").unwrap();
|
||||
|
||||
let sub_path = path.join("sub");
|
||||
fs::create_dir_all(&sub_path).unwrap();
|
||||
let mut file = File::create(sub_path.join("lorem.txt")).unwrap();
|
||||
file.write_all(b"Lorem ipsum dolor sit amet, consectetur adipiscing elit")
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Create our zip file using an explicit (sub)set of the above files.
|
||||
{
|
||||
// List of files to be accepted when creating the zip and extracting from the zip.
|
||||
// Note: "wat.txt" is not included in the list of files (hence it is excluded).
|
||||
let files = vec![
|
||||
PathBuf::from("foo.txt"),
|
||||
PathBuf::from("bar.txt"),
|
||||
PathBuf::from("sub/lorem.txt"),
|
||||
];
|
||||
|
||||
let zip_file = File::create(&zip_path).unwrap();
|
||||
zip::create_zip(&zip_file, &path, files).unwrap();
|
||||
zip_file.sync_all().unwrap();
|
||||
}
|
||||
|
||||
let zip_path = Path::new(zip_name);
|
||||
assert!(zip_path.exists());
|
||||
assert!(zip_path.is_file());
|
||||
assert!(zip_path.metadata().unwrap().len() > 300);
|
||||
|
||||
fs::create_dir_all(root.join("./dezipped")).unwrap();
|
||||
let zip_file = File::open(zip_name).unwrap();
|
||||
zip::decompress(zip_file, &root.join("./dezipped"), |_| true).unwrap();
|
||||
let zip_file = File::open(zip_path).unwrap();
|
||||
|
||||
assert!(root.join("to_zip/foo.txt").is_file());
|
||||
assert!(root.join("to_zip/bar.txt").is_file());
|
||||
assert!(root.join("to_zip/sub").is_dir());
|
||||
let lorem = root.join("to_zip/sub/lorem");
|
||||
assert!(lorem.is_file());
|
||||
assert!(lorem.metadata().unwrap().len() == 55);
|
||||
{
|
||||
let dest_dir = root.join("unzipped");
|
||||
fs::create_dir_all(&dest_dir).unwrap();
|
||||
|
||||
let decompressed = zip::decompress(
|
||||
File::open("tests/test.zip").unwrap(),
|
||||
&root.join("./dezipped"),
|
||||
|_| true,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(decompressed, 1);
|
||||
}
|
||||
|
||||
fn write_files(dir_name: String, root: &Path) -> io::Result<()> {
|
||||
let mut file = File::create(root.join(dir_name.clone() + "/foo.txt"))?;
|
||||
file.write_all(b"Hello, world!")?;
|
||||
let mut file = File::create(root.join(dir_name.clone() + "/bar.txt"))?;
|
||||
file.write_all(b"Goodbye, world!")?;
|
||||
let mut file = File::create(root.join(dir_name.clone() + "/sub/lorem"))?;
|
||||
file.write_all(b"Lorem ipsum dolor sit amet, consectetur adipiscing elit")?;
|
||||
Ok(())
|
||||
// List of files to extract from the zip.
|
||||
// Note: we do not extract "wat.txt" here, even if present in the zip.
|
||||
let files = vec![PathBuf::from("foo.txt"), PathBuf::from("sub/lorem.txt")];
|
||||
|
||||
zip::extract_files(zip_file, &dest_dir, files).unwrap();
|
||||
|
||||
assert!(dest_dir.join("foo.txt").is_file());
|
||||
|
||||
// Check we did not extract "bar.txt" from the zip file.
|
||||
// We should *only* extract the files explicitly listed.
|
||||
assert!(!dest_dir.join("bar.txt").exists());
|
||||
|
||||
let sub_path = dest_dir.join("sub");
|
||||
assert!(sub_path.is_dir());
|
||||
|
||||
let lorem = sub_path.join("lorem.txt");
|
||||
assert!(lorem.is_file());
|
||||
assert_eq!(
|
||||
fs::read_to_string(lorem).unwrap(),
|
||||
"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|