merge T4 into master

This commit is contained in:
yeastplume 2018-10-18 11:23:04 +01:00
commit f94ede9af3
109 changed files with 3081 additions and 2420 deletions

739
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[package]
name = "grin"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
exclude = ["**/*.grin", "**/*.grin2"]
publish = false

View file

@ -1,6 +1,6 @@
[package]
name = "grin_api"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -491,6 +491,8 @@ pub struct BlockHeaderPrintable {
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: String,
/// Root hash of the header MMR at the previous header.
pub prev_root: String,
/// rfc3339 timestamp at which the block was built.
pub timestamp: String,
/// Merklish root of all the commitments in the TxHashSet
@ -502,10 +504,13 @@ pub struct BlockHeaderPrintable {
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Size of the cuckoo graph
pub cuckoo_size: u8,
pub edge_bits: u8,
/// Nonces of the cuckoo solution
pub cuckoo_solution: Vec<u64>,
/// Total accumulated difficulty since genesis block
pub total_difficulty: u64,
/// Difficulty scaling factor between the different proofs of work
pub scaling_difficulty: u32,
/// Total kernel offset since genesis block
pub total_kernel_offset: String,
}
@ -517,14 +522,16 @@ impl BlockHeaderPrintable {
version: h.version,
height: h.height,
previous: util::to_hex(h.previous.to_vec()),
prev_root: util::to_hex(h.prev_root.to_vec()),
timestamp: h.timestamp.to_rfc3339(),
output_root: util::to_hex(h.output_root.to_vec()),
range_proof_root: util::to_hex(h.range_proof_root.to_vec()),
kernel_root: util::to_hex(h.kernel_root.to_vec()),
nonce: h.pow.nonce,
cuckoo_size: h.pow.cuckoo_sizeshift(),
edge_bits: h.pow.edge_bits(),
cuckoo_solution: h.pow.proof.nonces.clone(),
total_difficulty: h.pow.total_difficulty.to_num(),
scaling_difficulty: h.pow.scaling_difficulty,
total_kernel_offset: h.total_kernel_offset.to_hex(),
}
}

View file

@ -71,9 +71,9 @@ fn test_start_api() {
let addr: SocketAddr = server_addr.parse().expect("unable to parse server address");
assert!(server.start(addr, router, None).is_ok());
let url = format!("http://{}/v1/", server_addr);
let index = api::client::get::<Vec<String>>(url.as_str(), None).unwrap();
// assert_eq!(index.len(), 2);
// assert_eq!(counter.value(), 1);
let index = request_with_retry(url.as_str()).unwrap();
assert_eq!(index.len(), 2);
assert_eq!(counter.value(), 1);
assert!(server.stop());
thread::sleep(time::Duration::from_millis(1_000));
}
@ -95,7 +95,22 @@ fn test_start_api_tls() {
let server_addr = "0.0.0.0:14444";
let addr: SocketAddr = server_addr.parse().expect("unable to parse server address");
assert!(server.start(addr, router, Some(tls_conf)).is_ok());
let index = api::client::get::<Vec<String>>("https://yourdomain.com:14444/v1/", None).unwrap();
let index = request_with_retry("https://yourdomain.com:14444/v1/").unwrap();
assert_eq!(index.len(), 2);
assert!(!server.stop());
}
fn request_with_retry(url: &str) -> Result<Vec<String>, api::Error> {
let mut tries = 0;
loop {
let res = api::client::get::<Vec<String>>(url, None);
if res.is_ok() {
return res;
}
if tries > 5 {
return res;
}
tries += 1;
thread::sleep(time::Duration::from_millis(500));
}
}

View file

@ -1,6 +1,6 @@
[package]
name = "grin_chain"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -486,23 +486,37 @@ impl Chain {
/// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let (roots, sizes) = txhashset::extending_readonly(&mut txhashset, |extension| {
if is_fork {
pipe::rewind_and_apply_fork(b, extension)?;
}
extension.apply_block(b)?;
Ok((extension.roots(), extension.sizes()))
})?;
let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut txhashset, |extension| {
if is_fork {
pipe::rewind_and_apply_fork(b, extension)?;
}
// Carefully destructure these correctly...
// TODO - Maybe sizes should be a struct to add some type safety here...
let (_, output_mmr_size, _, kernel_mmr_size) = sizes;
// Retrieve the header root before we apply the new block
let prev_root = extension.header_root();
// Apply the latest block to the chain state via the extension.
extension.apply_block(b)?;
Ok((prev_root, extension.roots(), extension.sizes()))
})?;
// Set the prev_root on the header.
b.header.prev_root = prev_root;
// Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root;
b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root;
b.header.output_mmr_size = output_mmr_size;
b.header.kernel_mmr_size = kernel_mmr_size;
// Set the output and kernel MMR sizes.
{
// Carefully destructure these correctly...
let (_, output_mmr_size, _, kernel_mmr_size) = sizes;
b.header.output_mmr_size = output_mmr_size;
b.header.kernel_mmr_size = kernel_mmr_size;
}
Ok(())
}
@ -681,13 +695,6 @@ impl Chain {
// Full validation, including rangeproofs and kernel signature verification.
let (utxo_sum, kernel_sum) = extension.validate(false, status)?;
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
if header.total_kernel_sum != kernel_sum {
return Err(
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
);
}
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
extension.batch.save_block_sums(
&header.hash(),

View file

@ -45,9 +45,9 @@ pub enum ErrorKind {
/// Addition of difficulties on all previous block is wrong
#[fail(display = "Addition of difficulties on all previous blocks is wrong")]
WrongTotalDifficulty,
/// Block header sizeshift is lower than our min
#[fail(display = "Cuckoo Size too Low")]
LowSizeshift,
/// Block header edge_bits is lower than our min
#[fail(display = "Cuckoo Size too small")]
LowEdgebits,
/// Scaling factor between primary and secondary PoW is invalid
#[fail(display = "Wrong scaling factor")]
InvalidScaling,

View file

@ -28,7 +28,7 @@ use core::core::verifier_cache::VerifierCache;
use core::core::Committed;
use core::core::{Block, BlockHeader, BlockSums};
use core::global;
use core::pow::{self, Difficulty};
use core::pow;
use error::{Error, ErrorKind};
use grin_store;
use store;
@ -36,8 +36,6 @@ use txhashset;
use types::{Options, Tip};
use util::LOGGER;
use failure::ResultExt;
/// Contextual information required to process a new block and either reject or
/// accept it.
pub struct BlockContext<'a> {
@ -220,6 +218,7 @@ pub fn sync_block_headers(
extension.rewind(&prev_header)?;
for header in headers {
extension.validate_root(header)?;
extension.apply_header(header)?;
}
@ -372,20 +371,14 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
if !ctx.opts.contains(Options::SKIP_POW) {
let shift = header.pow.cuckoo_sizeshift();
// size shift can either be larger than the minimum on the primary PoW
// or equal to the seconday PoW size shift
if shift != consensus::SECOND_POW_SIZESHIFT && global::min_sizeshift() > shift {
return Err(ErrorKind::LowSizeshift.into());
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
// primary PoW must have a scaling factor of 1
if shift != consensus::SECOND_POW_SIZESHIFT && header.pow.scaling_difficulty != 1 {
return Err(ErrorKind::InvalidScaling.into());
}
if !(ctx.pow_verifier)(header, shift).is_ok() {
let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
error!(
LOGGER,
"pipe: validate_header bad cuckoo shift size {}", shift
"pipe: error validating header with cuckoo edge_bits {}", edge_bits
);
return Err(ErrorKind::InvalidPow.into());
}
@ -432,28 +425,25 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
return Err(ErrorKind::DifficultyTooLow.into());
}
// explicit check to ensure we are not below the minimum difficulty
// we will also check difficulty based on next_difficulty later on
if target_difficulty < Difficulty::one() {
return Err(ErrorKind::DifficultyTooLow.into());
}
// explicit check to ensure total_difficulty has increased by exactly
// the _network_ difficulty of the previous block
// (during testnet1 we use _block_ difficulty here)
let child_batch = ctx.batch.child()?;
let diff_iter = store::DifficultyIter::from_batch(header.previous, child_batch);
let network_difficulty = consensus::next_difficulty(diff_iter)
.context(ErrorKind::Other("network difficulty".to_owned()))?;
if target_difficulty != network_difficulty.clone() {
error!(
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty {
info!(
LOGGER,
"validate_header: header target difficulty {} != {}",
target_difficulty.to_num(),
network_difficulty.to_num()
next_header_info.difficulty.to_num()
);
return Err(ErrorKind::WrongTotalDifficulty.into());
}
// check the secondary PoW scaling factor if applicable
if header.pow.scaling_difficulty != next_header_info.secondary_scaling {
return Err(ErrorKind::InvalidScaling.into());
}
}
Ok(())
@ -462,11 +452,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
fn validate_block(block: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
let prev = ctx.batch.get_block_header(&block.header.previous)?;
block
.validate(
&prev.total_kernel_offset,
&prev.total_kernel_sum,
ctx.verifier_cache.clone(),
).map_err(|e| ErrorKind::InvalidBlockProof(e))?;
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
Ok(())
}
@ -488,16 +475,6 @@ fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Er
// Retrieve the block_sums for the previous block.
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
{
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
let prev = ext.batch.get_block_header(&b.header.previous)?;
if prev.total_kernel_sum != block_sums.kernel_sum {
return Err(
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
);
}
}
// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
let overage = b.header.overage();
@ -524,6 +501,7 @@ fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Er
/// Fully validate the block by applying it to the txhashset extension.
/// Check both the txhashset roots and sizes are correct after applying the block.
fn apply_block_to_txhashset(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
ext.validate_header_root(&block.header)?;
ext.apply_block(block)?;
ext.validate_roots()?;
ext.validate_sizes()?;

View file

@ -22,7 +22,7 @@ use lru_cache::LruCache;
use util::secp::pedersen::Commitment;
use core::consensus::TargetError;
use core::consensus::HeaderInfo;
use core::core::hash::{Hash, Hashed};
use core::core::{Block, BlockHeader, BlockSums};
use core::pow::Difficulty;
@ -613,7 +613,7 @@ impl<'a> DifficultyIter<'a> {
}
impl<'a> Iterator for DifficultyIter<'a> {
type Item = Result<(u64, Difficulty), TargetError>;
type Item = HeaderInfo;
fn next(&mut self) -> Option<Self::Item> {
// Get both header and previous_header if this is the initial iteration.
@ -650,8 +650,14 @@ impl<'a> Iterator for DifficultyIter<'a> {
.clone()
.map_or(Difficulty::zero(), |x| x.total_difficulty());
let difficulty = header.total_difficulty() - prev_difficulty;
let scaling = header.pow.scaling_difficulty;
Some(Ok((header.timestamp.timestamp() as u64, difficulty)))
Some(HeaderInfo::new(
header.timestamp.timestamp() as u64,
difficulty,
scaling,
header.pow.is_secondary(),
))
} else {
return None;
}

View file

@ -701,28 +701,52 @@ impl<'a> HeaderExtension<'a> {
header_hashes.push(current.hash());
current = self.batch.get_block_header(&current.previous)?;
}
// Include the genesis header as we will re-apply it after truncating the extension.
header_hashes.push(genesis.hash());
header_hashes.reverse();
// Trucate the extension (back to pos 0).
self.truncate()?;
debug!(
LOGGER,
"Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(),
header_hashes.first().unwrap(),
header_hashes.last().unwrap(),
);
// Re-apply the genesis header after truncation.
self.apply_header(&genesis)?;
for h in header_hashes {
let header = self.batch.get_block_header(&h)?;
// self.validate_header_root()?;
self.apply_header(&header)?;
if header_hashes.len() > 0 {
debug!(
LOGGER,
"Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(),
header_hashes.first().unwrap(),
header_hashes.last().unwrap(),
);
for h in header_hashes {
let header = self.batch.get_block_header(&h)?;
self.validate_root(&header)?;
self.apply_header(&header)?;
}
}
Ok(())
}
/// The root of the header MMR for convenience.
pub fn root(&self) -> Hash {
self.pmmr.root()
}
/// Validate the prev_root of the header against the root of the current header MMR.
pub fn validate_root(&self, header: &BlockHeader) -> Result<(), Error> {
// If we are validating the genesis block then we have no prev_root.
// So we are done here.
if header.height == 1 {
return Ok(());
}
if self.root() != header.prev_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
}
/// Allows the application of new blocks on top of the sum trees in a
@ -821,14 +845,14 @@ impl<'a> Extension<'a> {
if pos > 0 {
// If we have not yet reached 1,000 / 1,440 blocks then
// we can fail immediately as coinbase cannot be mature.
if height < global::coinbase_maturity(height) {
if height < global::coinbase_maturity() {
return Err(ErrorKind::ImmatureCoinbase.into());
}
// Find the "cutoff" pos in the output MMR based on the
// header from 1,000 blocks ago.
let cutoff_height = height
.checked_sub(global::coinbase_maturity(height))
.checked_sub(global::coinbase_maturity())
.unwrap_or(0);
let cutoff_header = self.batch.get_header_by_height(cutoff_height)?;
let cutoff_pos = cutoff_header.output_mmr_size;
@ -1078,14 +1102,19 @@ impl<'a> Extension<'a> {
}
}
/// Get the root of the current header MMR.
pub fn header_root(&self) -> Hash {
self.header_pmmr.root()
}
/// Validate the following MMR roots against the latest header applied -
/// * output
/// * rangeproof
/// * kernel
///
/// Note we do not validate the header MMR roots here as we need to validate
/// a header against the state of the MMR *prior* to applying it as
/// each header commits to the root of the MMR of all previous headers,
/// Note we do not validate the header MMR root here as we need to validate
/// a header against the state of the MMR *prior* to applying it.
/// Each header commits to the root of the MMR of all previous headers,
/// not including the header itself.
///
pub fn validate_roots(&self) -> Result<(), Error> {
@ -1107,23 +1136,19 @@ impl<'a> Extension<'a> {
}
}
/// Validate the provided header by comparing its "prev_root" to the
/// Validate the provided header by comparing its prev_root to the
/// root of the current header MMR.
///
/// TODO - Implement this once we commit to prev_root in block headers.
///
pub fn validate_header_root(&self, _header: &BlockHeader) -> Result<(), Error> {
if self.header.height == 0 {
pub fn validate_header_root(&self, header: &BlockHeader) -> Result<(), Error> {
if header.height == 1 {
return Ok(());
}
let _roots = self.roots();
// TODO - validate once we commit to header MMR root in the header
// (not just previous hash)
// if roots.header_root != header.prev_root
Ok(())
let roots = self.roots();
if roots.header_root != header.prev_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
/// Validate the header, output and kernel MMR sizes against the block header.

View file

@ -33,7 +33,7 @@ use core::core::{Block, BlockHeader, Transaction};
use core::global::{self, ChainTypes};
use core::pow::{self, Difficulty};
use core::{consensus, genesis};
use keychain::{ExtKeychain, Keychain};
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use wallet::libtx;
fn clean_output_dir(dir_name: &str) {
@ -82,19 +82,22 @@ fn data_files() {
for n in 1..4 {
let prev = chain.head_header().unwrap();
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let pk = keychain.derive_key_id(n as u32).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0, prev.height).unwrap();
let mut b = core::core::Block::new(&prev, vec![], difficulty.clone(), reward).unwrap();
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
pow::pow_size(
&mut b.header,
difficulty,
next_header_info.difficulty,
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
let _bhash = b.hash();
@ -154,7 +157,7 @@ fn _prepare_block_nosum(
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let key_id = kc.derive_key_id(diff as u32).unwrap();
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(kc, &key_id, fees, prev.height).unwrap();

View file

@ -33,7 +33,7 @@ use core::core::{Block, BlockHeader, OutputFeatures, OutputIdentifier, Transacti
use core::global::ChainTypes;
use core::pow::Difficulty;
use core::{consensus, global, pow};
use keychain::{ExtKeychain, Keychain};
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use wallet::libtx::{self, build};
fn clean_output_dir(dir_name: &str) {
@ -64,22 +64,30 @@ fn mine_empty_chain() {
for n in 1..4 {
let prev = chain.head_header().unwrap();
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let pk = keychain.derive_key_id(n as u32).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0, prev.height).unwrap();
let mut b = core::core::Block::new(&prev, vec![], difficulty.clone(), reward).unwrap();
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
let sizeshift = if n == 2 {
global::min_sizeshift() + 1
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
} else {
global::min_sizeshift()
global::min_edge_bits()
};
b.header.pow.proof.cuckoo_sizeshift = sizeshift;
pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap();
b.header.pow.proof.cuckoo_sizeshift = sizeshift;
b.header.pow.proof.edge_bits = edge_bits;
pow::pow_size(
&mut b.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
).unwrap();
b.header.pow.proof.edge_bits = edge_bits;
let bhash = b.hash();
chain.process_block(b, chain::Options::MINE).unwrap();
@ -262,11 +270,14 @@ fn spend_in_fork_and_compact() {
// Check the height of the "fork block".
assert_eq!(fork_head.height, 4);
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
let tx1 = build::transaction(
vec![
build::coinbase_input(consensus::REWARD, kc.derive_key_id(2).unwrap()),
build::output(consensus::REWARD - 20000, kc.derive_key_id(30).unwrap()),
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
build::with_fee(20000),
],
&kc,
@ -281,8 +292,8 @@ fn spend_in_fork_and_compact() {
let tx2 = build::transaction(
vec![
build::input(consensus::REWARD - 20000, kc.derive_key_id(30).unwrap()),
build::output(consensus::REWARD - 40000, kc.derive_key_id(31).unwrap()),
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
build::with_fee(20000),
],
&kc,
@ -376,23 +387,31 @@ fn output_header_mappings() {
for n in 1..15 {
let prev = chain.head_header().unwrap();
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let pk = keychain.derive_key_id(n as u32).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0, prev.height).unwrap();
reward_outputs.push(reward.0.clone());
let mut b = core::core::Block::new(&prev, vec![], difficulty.clone(), reward).unwrap();
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
let sizeshift = if n == 2 {
global::min_sizeshift() + 1
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
} else {
global::min_sizeshift()
global::min_edge_bits()
};
b.header.pow.proof.cuckoo_sizeshift = sizeshift;
pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap();
b.header.pow.proof.cuckoo_sizeshift = sizeshift;
b.header.pow.proof.edge_bits = edge_bits;
pow::pow_size(
&mut b.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
).unwrap();
b.header.pow.proof.edge_bits = edge_bits;
chain.process_block(b, chain::Options::MINE).unwrap();
@ -465,7 +484,7 @@ where
K: Keychain,
{
let proof_size = global::proofsize();
let key_id = kc.derive_key_id(diff as u32).unwrap();
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(kc, &key_id, fees, prev.height).unwrap();
@ -503,18 +522,17 @@ fn actual_diff_iter_output() {
let iter = chain.difficulty_iter();
let mut last_time = 0;
let mut first = true;
for i in iter.into_iter() {
let elem = i.unwrap();
for elem in iter.into_iter() {
if first {
last_time = elem.0;
last_time = elem.timestamp;
first = false;
}
println!(
"next_difficulty time: {}, diff: {}, duration: {} ",
elem.0,
elem.1.to_num(),
last_time - elem.0
elem.timestamp,
elem.difficulty.to_num(),
last_time - elem.timestamp
);
last_time = elem.0;
last_time = elem.timestamp;
}
}

View file

@ -28,7 +28,7 @@ use core::core::hash::Hashed;
use core::core::{Block, BlockHeader};
use core::global::{self, ChainTypes};
use core::pow::{self, Difficulty};
use keychain::{ExtKeychain, Keychain};
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use wallet::libtx;
fn clean_output_dir(dir_name: &str) {
@ -45,7 +45,7 @@ fn test_various_store_indices() {
clean_output_dir(chain_dir);
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let chain_store = chain::store::ChainStore::new(db_env).unwrap();

View file

@ -32,7 +32,7 @@ use core::core::verifier_cache::LruVerifierCache;
use core::global::{self, ChainTypes};
use core::pow::Difficulty;
use core::{consensus, pow};
use keychain::{ExtKeychain, Keychain};
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use wallet::libtx::{self, build};
fn clean_output_dir(dir_name: &str) {
@ -63,24 +63,24 @@ fn test_coinbase_maturity() {
let prev = chain.head_header().unwrap();
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id4 = keychain.derive_key_id(4).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
let reward = libtx::reward::output(&keychain, &key_id1, 0, prev.height).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::one(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
block.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
pow::pow_size(
&mut block.header,
difficulty,
next_header_info.difficulty,
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
assert_eq!(block.outputs().len(), 1);
@ -99,7 +99,7 @@ fn test_coinbase_maturity() {
let amount = consensus::REWARD;
let lock_height = 1 + global::coinbase_maturity(1);
let lock_height = 1 + global::coinbase_maturity();
assert_eq!(lock_height, 4);
// here we build a tx that attempts to spend the earlier coinbase output
@ -117,9 +117,9 @@ fn test_coinbase_maturity() {
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id3, fees, prev.height).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::one(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
block.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
@ -135,9 +135,9 @@ fn test_coinbase_maturity() {
pow::pow_size(
&mut block.header,
difficulty,
next_header_info.difficulty,
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
// mine enough blocks to increase the height sufficiently for
@ -146,21 +146,21 @@ fn test_coinbase_maturity() {
let prev = chain.head_header().unwrap();
let keychain = ExtKeychain::from_random_seed().unwrap();
let pk = keychain.derive_key_id(1).unwrap();
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0, prev.height).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::one(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
block.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
pow::pow_size(
&mut block.header,
difficulty,
next_header_info.difficulty,
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
chain.process_block(block, chain::Options::MINE).unwrap();
@ -174,20 +174,20 @@ fn test_coinbase_maturity() {
let txs = vec![coinbase_txn];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter());
let reward = libtx::reward::output(&keychain, &key_id4, fees, prev.height).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::one(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
block.header.pow.scaling_difficulty = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
pow::pow_size(
&mut block.header,
difficulty,
next_header_info.difficulty,
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
let result = chain.process_block(block, chain::Options::MINE);

View file

@ -26,7 +26,10 @@ use std::sync::Arc;
use chain::store::ChainStore;
use chain::txhashset;
use core::core::BlockHeader;
use chain::types::Tip;
use core::core::{Block, BlockHeader};
use core::pow::Difficulty;
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use util::file;
fn clean_output_dir(dir_name: &str) {
@ -79,7 +82,8 @@ fn write_file(db_root: String) {
.join("txhashset")
.join("kernel")
.join("strange0"),
).unwrap();
)
.unwrap();
OpenOptions::new()
.create(true)
.write(true)
@ -94,7 +98,8 @@ fn write_file(db_root: String) {
.join("txhashset")
.join("strange_dir")
.join("strange2"),
).unwrap();
)
.unwrap();
fs::create_dir(
Path::new(&db_root)
.join("txhashset")
@ -110,7 +115,8 @@ fn write_file(db_root: String) {
.join("strange_dir")
.join("strange_subdir")
.join("strange3"),
).unwrap();
)
.unwrap();
}
fn txhashset_contains_expected_files(dirname: String, path_buf: PathBuf) -> bool {

View file

@ -1,6 +1,6 @@
[package]
name = "grin_config"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -71,6 +71,8 @@ fn comments() -> HashMap<String, String> {
#UserTesting - For regular user testing (cuckoo 16)
#Testnet1 - Testnet1 genesis block (cuckoo 16)
#Testnet2 - Testnet2 genesis block (cuckoo 30)
#Testnet3 - Testnet3 genesis block (cuckoo 30)
#Testnet4 - Testnet4 genesis block (cuckatoo 29+)
".to_string(),
);

View file

@ -1,6 +1,6 @@
[package]
name = "grin_core"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -18,7 +18,7 @@
//! enough, consensus-relevant constants and short functions should be kept
//! here.
use std::cmp::max;
use std::cmp::{max, min};
use std::fmt;
use global;
@ -33,48 +33,64 @@ pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// The block subsidy amount, one grin per second on average
pub const REWARD: u64 = 60 * GRIN_BASE;
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64) -> u64 {
REWARD + fee
}
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
/// The block subsidy amount, one grin per second on average
pub const REWARD: u64 = BLOCK_TIME_SEC * GRIN_BASE;
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64) -> u64 {
REWARD + fee
}
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = 3600 / BLOCK_TIME_SEC;
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
/// Number of blocks before a coinbase matures and can be spent
/// set to nominal number of block in one day (1440 with 1-minute blocks)
pub const COINBASE_MATURITY: u64 = 24 * 60 * 60 / BLOCK_TIME_SEC;
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// Ratio the secondary proof of work should take over the primary, as a
/// function of block height (time). Starts at 90% losing a percent
/// approximately every week. Represented as an integer between 0 and 100.
pub fn secondary_pow_ratio(height: u64) -> u64 {
90u64.saturating_sub(height / WEEK_HEIGHT)
}
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckoo Cycle size shift used for mining and validating.
pub const DEFAULT_MIN_SIZESHIFT: u8 = 30;
/// Default Cuckoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 30;
/// Secondary proof-of-work size shift, meant to be ASIC resistant.
pub const SECOND_POW_SIZESHIFT: u8 = 29;
/// Secondary proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference sizeshift to compute difficulty factors for higher
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const REFERENCE_SIZESHIFT: u8 = 30;
pub const BASE_EDGE_BITS: u8 = 24;
/// Default Cuckoo Cycle easiness, high enough to have good likeliness to find
/// a solution.
pub const EASINESS: u32 = 50;
/// Maximum scaling factor for secondary pow, enforced in diff retargetting
/// increasing scaling factor increases frequency of secondary blocks
/// ONLY IN TESTNET4 LIMITED TO ABOUT 8 TIMES THE NATURAL SCALE
pub const MAX_SECONDARY_SCALING: u64 = 8 << 11;
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 48h of blocks to make it
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32);
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const BLOCK_INPUT_WEIGHT: usize = 1;
@ -97,59 +113,63 @@ pub const BLOCK_KERNEL_WEIGHT: usize = 2;
/// outputs and a single kernel).
///
/// A more "standard" block, filled with transactions of 2 inputs, 2 outputs
/// and one kernel, should be around 2_663_333 bytes.
/// and one kernel, should be around 2.66 MB
pub const MAX_BLOCK_WEIGHT: usize = 40_000;
/// Fork every 250,000 blocks for first 2 years, simple number and just a
/// little less than 6 months.
pub const HARD_FORK_INTERVAL: u64 = 250_000;
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: u16) -> bool {
// uncomment below as we go from hard fork to hard fork
if height < HEADER_V2_HARD_FORK {
if height < HARD_FORK_INTERVAL {
version == 1
} else if height < HARD_FORK_INTERVAL {
/* } else if height < 2 * HARD_FORK_INTERVAL {
version == 2
} else if height < 2 * HARD_FORK_INTERVAL {
} else if height < 3 * HARD_FORK_INTERVAL {
version == 3
/* } else if height < 3 * HARD_FORK_INTERVAL {
version == 4 */
/* } else if height >= 4 * HARD_FORK_INTERVAL {
} else if height < 4 * HARD_FORK_INTERVAL {
version == 4
} else if height >= 5 * HARD_FORK_INTERVAL {
version > 4 */
} else {
false
}
}
/// Time window in blocks to calculate block time median
pub const MEDIAN_TIME_WINDOW: u64 = 11;
/// Index at half the desired median
pub const MEDIAN_TIME_INDEX: u64 = MEDIAN_TIME_WINDOW / 2;
/// Number of blocks used to calculate difficulty adjustments
pub const DIFFICULTY_ADJUST_WINDOW: u64 = 60;
pub const DIFFICULTY_ADJUST_WINDOW: u64 = HOUR_HEIGHT;
/// Average time span of the difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
/// Maximum size time window used for difficulty adjustments
pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 2;
/// Minimum size time window used for difficulty adjustments
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW / 2;
/// Clamp factor to use for difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for difficulty adjustment
pub const DAMP_FACTOR: u64 = 3;
/// Compute weight of a graph as number of siphash bits defining the graph
/// Must be made dependent on height to phase out smaller size over the years
/// This can wait until end of 2019 at latest
pub fn graph_weight(edge_bits: u8) -> u64 {
(2 << (edge_bits - global::base_edge_bits()) as u64) * (edge_bits as u64)
}
/// minimum possible difficulty equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const MIN_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1
/// in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000;
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
/// FOR MAINNET, use
/// pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * MIN_DIFFICULTY;
/// Pick MUCH more modest value for TESTNET4:
pub const INITIAL_DIFFICULTY: u64 = 1_000 * MIN_DIFFICULTY;
/// Consensus errors
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
@ -164,20 +184,72 @@ impl fmt::Display for Error {
}
}
/// Error when computing the next difficulty adjustment.
#[derive(Debug, Clone, Fail)]
pub struct TargetError(pub String);
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl fmt::Display for TargetError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error computing new difficulty: {}", self.0)
impl HeaderInfo {
/// Default constructor
pub fn new(
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
is_secondary: false,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
timestamp: 1,
difficulty,
secondary_scaling,
is_secondary: false,
}
}
}
/// TODO: Doc
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(1 * actual + (damp_factor - 1) * goal) / damp_factor
}
/// TODO: Doc
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply
/// with. Takes an iterator over past blocks, from latest (highest height) to
/// oldest (lowest height). The iterator produces pairs of timestamp and
/// difficulty for each block.
/// with. Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
///
/// The difficulty calculation is based on both Digishield and GravityWave
/// family of difficulty computation, coming to something very close to Zcash.
@ -185,66 +257,69 @@ impl fmt::Display for TargetError {
/// DIFFICULTY_ADJUST_WINDOW blocks. The corresponding timespan is calculated
/// by using the difference between the median timestamps at the beginning
/// and the end of the window.
pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
///
/// The secondary proof-of-work factor is calculated along the same lines, as
/// an adjustment on the deviation against the ideal value.
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
where
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data
// length will be DIFFICULTY_ADJUST_WINDOW+MEDIAN_TIME_WINDOW
// adjustment if there isn't enough window data length will be
// DIFFICULTY_ADJUST_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// Obtain the median window for the earlier time period
// the first MEDIAN_TIME_WINDOW elements
let mut window_earliest: Vec<u64> = diff_data
.iter()
.take(MEDIAN_TIME_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// pick median
window_earliest.sort();
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// First, get the ratio of secondary PoW vs primary
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data);
// Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements
let mut window_latest: Vec<u64> = diff_data
.iter()
.skip(DIFFICULTY_ADJUST_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// pick median
window_latest.sort();
let latest_ts = window_latest[MEDIAN_TIME_INDEX as usize];
// median time delta
let ts_delta = latest_ts - earliest_ts;
// Get the timestamp delta across the window
let ts_delta: u64 =
diff_data[DIFFICULTY_ADJUST_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
let diff_sum = diff_data
let diff_sum: u64 = diff_data
.iter()
.skip(MEDIAN_TIME_WINDOW as usize)
.fold(0, |sum, d| sum + d.clone().unwrap().1.to_num());
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// Apply dampening except when difficulty is near 1
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
ts_delta
} else {
(1 * ts_delta + (DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW) / DAMP_FACTOR
};
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
let difficulty = max(1, diff_sum * BLOCK_TIME_SEC / adj_ts);
// Apply time bounds
let adj_ts = if ts_damp < LOWER_TIME_BOUND {
LOWER_TIME_BOUND
} else if ts_damp > UPPER_TIME_BOUND {
UPPER_TIME_BOUND
} else {
ts_damp
};
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
}
let difficulty = diff_sum * BLOCK_TIME_SEC / adj_ts;
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
// Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction)
let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64;
Ok(Difficulty::from_num(max(difficulty, 1)))
// Get the scaling factor sum of the last DIFFICULTY_ADJUST_WINDOW elements
let scale_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.secondary_scaling as u64)
.sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DIFFICULTY_ADJUST_WINDOW * target_pct;
// adjust count toward goal subject to dampening and clamping
let adj_count = clamp(
damp(snd_count, target_count, DAMP_FACTOR),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / adj_count;
max(1, min(scale, MAX_SECONDARY_SCALING)) as u32
}
/// Consensus rule that collections of items are sorted lexicographically.
@ -252,6 +327,3 @@ pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), Error>;
}
/// Height for the v2 headers hard fork, with extended proof of work in header
pub const HEADER_V2_HARD_FORK: u64 = 95_000;

View file

@ -35,7 +35,7 @@ use global;
use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, secp_static, static_secp_instance, LOGGER};
use util::{secp, static_secp_instance, LOGGER};
/// Errors thrown by Block validation
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
@ -118,6 +118,8 @@ pub struct BlockHeader {
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: Hash,
/// Root hash of the header MMR at the previous header.
pub prev_root: Hash,
/// Timestamp at which the block was built.
pub timestamp: DateTime<Utc>,
/// Merklish root of all the commitments in the TxHashSet
@ -130,9 +132,6 @@ pub struct BlockHeader {
/// We can derive the kernel offset sum for *this* block from
/// the total kernel offset of the previous block header.
pub total_kernel_offset: BlindingFactor,
/// Total accumulated sum of kernel commitments since genesis block.
/// Should always equal the UTXO commitment sum minus supply.
pub total_kernel_sum: Commitment,
/// Total size of the output MMR after applying this block
pub output_mmr_size: u64,
/// Total size of the kernel MMR after applying this block
@ -142,33 +141,31 @@ pub struct BlockHeader {
}
/// Serialized size of fixed part of a BlockHeader, i.e. without pow
fn fixed_size_of_serialized_header(version: u16) -> usize {
fn fixed_size_of_serialized_header(_version: u16) -> usize {
let mut size: usize = 0;
size += mem::size_of::<u16>(); // version
size += mem::size_of::<u64>(); // height
size += mem::size_of::<i64>(); // timestamp
size += mem::size_of::<Hash>(); // previous
size += mem::size_of::<u64>(); // timestamp
size += mem::size_of::<Hash>(); // prev_root
size += mem::size_of::<Hash>(); // output_root
size += mem::size_of::<Hash>(); // range_proof_root
size += mem::size_of::<Hash>(); // kernel_root
size += mem::size_of::<BlindingFactor>(); // total_kernel_offset
size += mem::size_of::<Commitment>(); // total_kernel_sum
size += mem::size_of::<u64>(); // output_mmr_size
size += mem::size_of::<u64>(); // kernel_mmr_size
size += mem::size_of::<Difficulty>(); // total_difficulty
if version >= 2 {
size += mem::size_of::<u64>(); // scaling_difficulty
}
size += mem::size_of::<u32>(); // scaling_difficulty
size += mem::size_of::<u64>(); // nonce
size
}
/// Serialized size of a BlockHeader
pub fn serialized_size_of_header(version: u16, cuckoo_sizeshift: u8) -> usize {
pub fn serialized_size_of_header(version: u16, edge_bits: u8) -> usize {
let mut size = fixed_size_of_serialized_header(version);
size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift
let nonce_bits = cuckoo_sizeshift as usize - 1;
size += mem::size_of::<u8>(); // pow.edge_bits
let nonce_bits = edge_bits as usize;
let bitvec_len = global::proofsize() * nonce_bits;
size += bitvec_len / 8; // pow.nonces
if bitvec_len % 8 != 0 {
@ -182,13 +179,13 @@ impl Default for BlockHeader {
BlockHeader {
version: 1,
height: 0,
previous: ZERO_HASH,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc),
previous: ZERO_HASH,
prev_root: ZERO_HASH,
output_root: ZERO_HASH,
range_proof_root: ZERO_HASH,
kernel_root: ZERO_HASH,
total_kernel_offset: BlindingFactor::zero(),
total_kernel_sum: Commitment::from_vec(vec![0; 33]),
output_mmr_size: 0,
kernel_mmr_size: 0,
pow: ProofOfWork::default(),
@ -218,23 +215,15 @@ impl Writeable for BlockHeader {
/// Deserialization of a block header
impl Readable for BlockHeader {
fn read(reader: &mut Reader) -> Result<BlockHeader, ser::Error> {
let (version, height) = ser_multiread!(reader, read_u16, read_u64);
let (version, height, timestamp) = ser_multiread!(reader, read_u16, read_u64, read_i64);
let previous = Hash::read(reader)?;
let timestamp = reader.read_i64()?;
let mut total_difficulty = None;
if version == 1 {
total_difficulty = Some(Difficulty::read(reader)?);
}
let prev_root = Hash::read(reader)?;
let output_root = Hash::read(reader)?;
let range_proof_root = Hash::read(reader)?;
let kernel_root = Hash::read(reader)?;
let total_kernel_offset = BlindingFactor::read(reader)?;
let total_kernel_sum = Commitment::read(reader)?;
let (output_mmr_size, kernel_mmr_size) = ser_multiread!(reader, read_u64, read_u64);
let mut pow = ProofOfWork::read(version, reader)?;
if version == 1 {
pow.total_difficulty = total_difficulty.unwrap();
}
let pow = ProofOfWork::read(version, reader)?;
if timestamp > MAX_DATE.and_hms(0, 0, 0).timestamp()
|| timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp()
@ -245,13 +234,13 @@ impl Readable for BlockHeader {
Ok(BlockHeader {
version,
height,
previous,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc),
previous,
prev_root,
output_root,
range_proof_root,
kernel_root,
total_kernel_offset,
total_kernel_sum,
output_mmr_size,
kernel_mmr_size,
pow,
@ -266,20 +255,13 @@ impl BlockHeader {
writer,
[write_u16, self.version],
[write_u64, self.height],
[write_i64, self.timestamp.timestamp()],
[write_fixed_bytes, &self.previous],
[write_i64, self.timestamp.timestamp()]
);
if self.version == 1 {
// written as part of the ProofOfWork in later versions
writer.write_u64(self.pow.total_difficulty.to_num())?;
}
ser_multiwrite!(
writer,
[write_fixed_bytes, &self.prev_root],
[write_fixed_bytes, &self.output_root],
[write_fixed_bytes, &self.range_proof_root],
[write_fixed_bytes, &self.kernel_root],
[write_fixed_bytes, &self.total_kernel_offset],
[write_fixed_bytes, &self.total_kernel_sum],
[write_u64, self.output_mmr_size],
[write_u64, self.kernel_mmr_size]
);
@ -327,8 +309,8 @@ impl BlockHeader {
pub fn serialized_size(&self) -> usize {
let mut size = fixed_size_of_serialized_header(self.version);
size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift
let nonce_bits = self.pow.cuckoo_sizeshift() as usize - 1;
size += mem::size_of::<u8>(); // pow.edge_bits
let nonce_bits = self.pow.edge_bits() as usize;
let bitvec_len = global::proofsize() * nonce_bits;
size += bitvec_len / 8; // pow.nonces
if bitvec_len % 8 != 0 {
@ -514,36 +496,18 @@ impl Block {
let total_kernel_offset =
committed::sum_kernel_offsets(vec![agg_tx.offset, prev.total_kernel_offset], vec![])?;
let total_kernel_sum = {
let zero_commit = secp_static::commit_to_zero_value();
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let mut excesses = map_vec!(agg_tx.kernels(), |x| x.excess());
excesses.push(prev.total_kernel_sum);
excesses.retain(|x| *x != zero_commit);
secp.commit_sum(excesses, vec![])?
};
let now = Utc::now().timestamp();
let timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc);
let version = if prev.height + 1 < consensus::HEADER_V2_HARD_FORK {
1
} else {
2
};
// Now build the block with all the above information.
// Note: We have not validated the block here.
// Caller must validate the block as necessary.
Block {
header: BlockHeader {
version,
height: prev.height + 1,
timestamp,
previous: prev.hash(),
total_kernel_offset,
total_kernel_sum,
pow: ProofOfWork {
total_difficulty: difficulty + prev.pow.total_difficulty,
..Default::default()
@ -638,7 +602,6 @@ impl Block {
pub fn validate(
&self,
prev_kernel_offset: &BlindingFactor,
prev_kernel_sum: &Commitment,
verifier: Arc<RwLock<VerifierCache>>,
) -> Result<(Commitment), Error> {
self.body.validate(true, verifier)?;
@ -662,12 +625,6 @@ impl Block {
let (_utxo_sum, kernel_sum) =
self.verify_kernel_sums(self.header.overage(), block_kernel_offset)?;
// check the block header's total kernel sum
let total_sum = committed::sum_commits(vec![kernel_sum, prev_kernel_sum.clone()], vec![])?;
if total_sum != self.header.total_kernel_sum {
return Err(Error::InvalidTotalKernelSum);
}
Ok(kernel_sum)
}

View file

@ -170,9 +170,9 @@ mod test {
let foo = Foo(0);
let expected_hash =
Hash::from_hex("81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c")
.unwrap();
let expected_hash = Hash::from_hex(
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::default();
@ -182,9 +182,9 @@ mod test {
);
let foo = Foo(5);
let expected_hash =
Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673")
.unwrap();
let expected_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::default();
@ -194,14 +194,14 @@ mod test {
);
let foo = Foo(5);
let expected_hash =
Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673")
.unwrap();
let expected_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash =
Hash::from_hex("81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c")
.unwrap();
let other_hash = Hash::from_hex(
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
).unwrap();
assert_eq!(
foo.short_id(&other_hash, foo.0),
ShortId::from_hex("3e9cde72a687").unwrap()

View file

@ -201,7 +201,16 @@ impl TxKernel {
let sig = &self.excess_sig;
// Verify aggsig directly in libsecp
let pubkey = &self.excess.to_pubkey(&secp)?;
if !secp::aggsig::verify_single(&secp, &sig, &msg, None, &pubkey, false) {
if !secp::aggsig::verify_single(
&secp,
&sig,
&msg,
None,
&pubkey,
Some(&pubkey),
None,
false,
) {
return Err(secp::Error::IncorrectSignature);
}
Ok(())
@ -1203,7 +1212,7 @@ mod test {
#[test]
fn test_kernel_ser_deser() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit = keychain.commit(5, &key_id).unwrap();
// just some bytes for testing ser/deser
@ -1248,10 +1257,10 @@ mod test {
#[test]
fn commit_consistency() {
let keychain = ExtKeychain::from_seed(&[0; 32]).unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit = keychain.commit(1003, &key_id).unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit_2 = keychain.commit(1003, &key_id).unwrap();
@ -1261,7 +1270,7 @@ mod test {
#[test]
fn input_short_id() {
let keychain = ExtKeychain::from_seed(&[0; 32]).unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit = keychain.commit(5, &key_id).unwrap();
let input = Input {
@ -1269,14 +1278,14 @@ mod test {
commit: commit,
};
let block_hash =
Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673")
.unwrap();
let block_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
let nonce = 0;
let short_id = input.short_id(&block_hash, nonce);
assert_eq!(short_id, ShortId::from_hex("28fea5a693af").unwrap());
assert_eq!(short_id, ShortId::from_hex("df31d96e3cdb").unwrap());
// now generate the short_id for a *very* similar output (single feature flag
// different) and check it generates a different short_id
@ -1286,6 +1295,6 @@ mod test {
};
let short_id = input.short_id(&block_hash, nonce);
assert_eq!(short_id, ShortId::from_hex("2df325971ab0").unwrap());
assert_eq!(short_id, ShortId::from_hex("784fc5afd5d9").unwrap());
}
}

View file

@ -69,7 +69,8 @@ impl VerifierCache for LruVerifierCache {
.kernel_sig_verification_cache
.get_mut(&x.hash())
.unwrap_or(&mut false)
}).cloned()
})
.cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,
@ -88,7 +89,8 @@ impl VerifierCache for LruVerifierCache {
.rangeproof_verification_cache
.get_mut(&x.proof.hash())
.unwrap_or(&mut false)
}).cloned()
})
.cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,

View file

@ -105,6 +105,30 @@ pub fn genesis_testnet3() -> core::Block {
})
}
/// 4th testnet genesis block (cuckatoo29 AR, 30+ AF). Temporary values for now (Pow won't verify)
/// NB: Currently set to intenal pre-testnet values
pub fn genesis_testnet4() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 10, 17).and_hms(20, 0, 0),
pow: ProofOfWork {
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
scaling_difficulty: global::initial_graph_weight(),
nonce: 8612241555342799290,
proof: Proof::new(vec![
0x46f3b4, 0x1135f8c, 0x1a1596f, 0x1e10f71, 0x41c03ea, 0x63fe8e7, 0x65af34f,
0x73c16d3, 0x8216dc3, 0x9bc75d0, 0xae7d9ad, 0xc1cb12b, 0xc65e957, 0xf67a152,
0xfac6559, 0x100c3d71, 0x11eea08b, 0x1225dfbb, 0x124d61a1, 0x132a14b4,
0x13f4ec38, 0x1542d236, 0x155f2df0, 0x1577394e, 0x163c3513, 0x19349845,
0x19d46953, 0x19f65ed4, 0x1a0411b9, 0x1a2fa039, 0x1a72a06c, 0x1b02ddd2,
0x1b594d59, 0x1b7bffd3, 0x1befe12e, 0x1c82e4cd, 0x1d492478, 0x1de132a5,
0x1e578b3c, 0x1ed96855, 0x1f222896, 0x1fea0da6,
]),
},
..Default::default()
})
}
/// Placeholder for mainnet genesis block, will definitely change before
/// release so no use trying to pre-mine it.
pub fn genesis_main() -> core::Block {

View file

@ -16,13 +16,13 @@
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use consensus::TargetError;
use consensus::HeaderInfo;
use consensus::{
BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DEFAULT_MIN_SIZESHIFT,
DIFFICULTY_ADJUST_WINDOW, EASINESS, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE,
REFERENCE_SIZESHIFT,
graph_weight, BASE_EDGE_BITS, BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON,
DAY_HEIGHT, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, PROOFSIZE,
SECOND_POW_EDGE_BITS,
};
use pow::{self, CuckooContext, Difficulty, EdgeType, PoWContext};
use pow::{self, CuckatooContext, EdgeType, PoWContext};
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
@ -32,14 +32,14 @@ use std::sync::RwLock;
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// Automated testing sizeshift
pub const AUTOMATED_TESTING_MIN_SIZESHIFT: u8 = 10;
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 9;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4;
/// User testing sizeshift
pub const USER_TESTING_MIN_SIZESHIFT: u8 = 16;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
@ -50,15 +50,12 @@ pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Old coinbase maturity
/// TODO: obsolete for mainnet together with maturity code below
pub const OLD_COINBASE_MATURITY: u64 = 1_000;
/// soft-fork around Sep 17 2018 on testnet3
pub const COINBASE_MATURITY_FORK_HEIGHT: u64 = 100_000;
/// Testing cut through horizon in blocks
pub const TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing initial graph weight
pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
@ -73,6 +70,15 @@ pub const TESTNET3_INITIAL_DIFFICULTY: u64 = 30000;
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// Testnet 4 initial block difficulty
/// 1_000 times natural scale factor for cuckatoo29
pub const TESTNET4_INITIAL_DIFFICULTY: u64 = 1_000 * (2<<(29-24)) * 29;
/// Trigger compaction check on average every day for FAST_SYNC_NODE,
/// roll the dice on every block to decide,
/// all blocks lower than (BodyHead.height - CUT_THROUGH_HORIZON) will be removed.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
@ -87,13 +93,15 @@ pub enum ChainTypes {
Testnet2,
/// Third test network
Testnet3,
/// Fourth test network
Testnet4,
/// Main production network
Mainnet,
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Testnet3
ChainTypes::Testnet4
}
}
@ -132,35 +140,35 @@ pub fn create_pow_context<T>(
where
T: EdgeType,
{
// Perform whatever tests, configuration etc are needed to determine desired context + edge size
// + params
// Hardcode to regular cuckoo for now
CuckooContext::<T>::new(edge_bits, proof_size, EASINESS, max_sols)
// Or switch to cuckatoo as follows:
// CuckatooContext::<T>::new(edge_bits, proof_size, easiness_pct, max_sols)
CuckatooContext::<T>::new(edge_bits, proof_size, max_sols)
}
/// The minimum acceptable sizeshift
pub fn min_sizeshift() -> u8 {
/// Return the type of the pos
pub fn pow_type() -> PoWContextTypes {
PoWContextTypes::Cuckatoo
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_SIZESHIFT,
ChainTypes::UserTesting => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet1 => USER_TESTING_MIN_SIZESHIFT,
_ => DEFAULT_MIN_SIZESHIFT,
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
ChainTypes::Testnet1 => USER_TESTING_MIN_EDGE_BITS,
_ => SECOND_POW_EDGE_BITS,
}
}
/// Reference sizeshift used to compute factor on higher Cuckoo graph sizes,
/// while the min_sizeshift can be changed on a soft fork, changing
/// ref_sizeshift is a hard fork.
pub fn ref_sizeshift() -> u8 {
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_SIZESHIFT,
ChainTypes::UserTesting => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet1 => USER_TESTING_MIN_SIZESHIFT,
_ => REFERENCE_SIZESHIFT,
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
ChainTypes::Testnet1 => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
@ -174,17 +182,13 @@ pub fn proofsize() -> usize {
}
}
/// Coinbase maturity for coinbases to be spent at given height
pub fn coinbase_maturity(height: u64) -> u64 {
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => if height < COINBASE_MATURITY_FORK_HEIGHT {
OLD_COINBASE_MATURITY
} else {
COINBASE_MATURITY
},
_ => COINBASE_MATURITY,
}
}
@ -197,9 +201,23 @@ pub fn initial_block_difficulty() -> u64 {
ChainTypes::Testnet1 => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet2 => TESTNET2_INITIAL_DIFFICULTY,
ChainTypes::Testnet3 => TESTNET3_INITIAL_DIFFICULTY,
ChainTypes::Testnet4 => TESTNET4_INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::Testnet1 => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::Testnet2 => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::Testnet3 => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::Testnet4 => graph_weight(SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(SECOND_POW_EDGE_BITS) as u32,
}
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
@ -229,6 +247,7 @@ pub fn is_production_mode() -> bool {
ChainTypes::Testnet1 == *param_ref
|| ChainTypes::Testnet2 == *param_ref
|| ChainTypes::Testnet3 == *param_ref
|| ChainTypes::Testnet4 == *param_ref
|| ChainTypes::Mainnet == *param_ref
}
@ -241,9 +260,9 @@ pub fn get_genesis_nonce() -> u64 {
match *param_ref {
// won't make a difference
ChainTypes::AutomatedTesting => 0,
// Magic nonce for current genesis block at cuckoo16
// Magic nonce for current genesis block at cuckatoo15
ChainTypes::UserTesting => 27944,
// Magic nonce for genesis block for testnet2 (cuckoo30)
// Magic nonce for genesis block for testnet2 (cuckatoo29)
_ => panic!("Pre-set"),
}
}
@ -252,14 +271,13 @@ pub fn get_genesis_nonce() -> u64 {
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty), TargetError>>
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderInfo>
where
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
T: IntoIterator<Item = HeaderInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = (MEDIAN_TIME_WINDOW + DIFFICULTY_ADJUST_WINDOW) as usize;
let mut last_n: Vec<Result<(u64, Difficulty), TargetError>> =
cursor.into_iter().take(needed_block_count).collect();
let needed_block_count = DIFFICULTY_ADJUST_WINDOW as usize + 1;
let mut last_n: Vec<HeaderInfo> = cursor.into_iter().take(needed_block_count).collect();
// Sort blocks from earliest to latest (to keep conceptually easier)
last_n.reverse();
@ -269,16 +287,17 @@ where
let block_count_difference = needed_block_count - last_n.len();
if block_count_difference > 0 {
// Collect any real data we have
let mut live_intervals: Vec<(u64, Difficulty)> = last_n
let mut live_intervals: Vec<HeaderInfo> = last_n
.iter()
.map(|b| (b.clone().unwrap().0, b.clone().unwrap().1))
.map(|b| HeaderInfo::from_ts_diff(b.timestamp, b.difficulty))
.collect();
for i in (1..live_intervals.len()).rev() {
// prevents issues with very fast automated test chains
if live_intervals[i - 1].0 > live_intervals[i].0 {
live_intervals[i].0 = 0;
if live_intervals[i - 1].timestamp > live_intervals[i].timestamp {
live_intervals[i].timestamp = 0;
} else {
live_intervals[i].0 = live_intervals[i].0 - live_intervals[i - 1].0;
live_intervals[i].timestamp =
live_intervals[i].timestamp - live_intervals[i - 1].timestamp;
}
}
// Remove genesis "interval"
@ -286,16 +305,16 @@ where
live_intervals.remove(0);
} else {
//if it's just genesis, adjust the interval
live_intervals[0].0 = BLOCK_TIME_SEC;
live_intervals[0].timestamp = BLOCK_TIME_SEC;
}
let mut interval_index = live_intervals.len() - 1;
let mut last_ts = last_n.first().as_ref().unwrap().as_ref().unwrap().0;
let last_diff = live_intervals[live_intervals.len() - 1].1;
let mut last_ts = last_n.first().unwrap().timestamp;
let last_diff = live_intervals[live_intervals.len() - 1].difficulty;
// fill in simulated blocks with values from the previous real block
for _ in 0..block_count_difference {
last_ts = last_ts.saturating_sub(live_intervals[live_intervals.len() - 1].0);
last_n.insert(0, Ok((last_ts, last_diff.clone())));
last_ts = last_ts.saturating_sub(live_intervals[live_intervals.len() - 1].timestamp);
last_n.insert(0, HeaderInfo::from_ts_diff(last_ts, last_diff.clone()));
interval_index = match interval_index {
0 => live_intervals.len() - 1,
_ => interval_index - 1,

View file

@ -79,13 +79,15 @@ where
}
pub fn set_header_nonce(header: Vec<u8>, nonce: Option<u32>) -> Result<[u64; 4], Error> {
let len = header.len();
let mut header = header.clone();
if let Some(n) = nonce {
let len = header.len();
let mut header = header.clone();
header.truncate(len - mem::size_of::<u32>());
header.write_u32::<LittleEndian>(n)?;
create_siphash_keys(header)
} else {
create_siphash_keys(header)
}
create_siphash_keys(header)
}
pub fn create_siphash_keys(header: Vec<u8>) -> Result<[u64; 4], Error> {
@ -130,7 +132,7 @@ macro_rules! to_edge {
}
/// Utility struct to calculate commonly used Cuckoo parameters calculated
/// from header, nonce, sizeshift, etc.
/// from header, nonce, edge_bits, etc.
pub struct CuckooParams<T>
where
T: EdgeType,
@ -139,7 +141,6 @@ where
pub proof_size: usize,
pub num_edges: u64,
pub siphash_keys: [u64; 4],
pub easiness: T,
pub edge_mask: T,
}
@ -147,46 +148,28 @@ impl<T> CuckooParams<T>
where
T: EdgeType,
{
/// Instantiates new params and calculate easiness, edge mask, etc
/// Instantiates new params and calculate edge mask, etc
pub fn new(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
cuckatoo: bool,
) -> Result<CuckooParams<T>, Error> {
let num_edges = 1 << edge_bits;
let num_nodes = 2 * num_edges as u64;
let easiness = if cuckatoo {
to_u64!(easiness_pct) * num_nodes / 100
} else {
to_u64!(easiness_pct) * num_edges / 100
};
let edge_mask = if cuckatoo {
to_edge!(num_edges - 1)
} else {
to_edge!(num_edges / 2 - 1)
};
let num_edges = (1 as u64) << edge_bits;
let edge_mask = to_edge!(num_edges - 1);
Ok(CuckooParams {
siphash_keys: [0; 4],
easiness: to_edge!(easiness),
proof_size,
edge_mask,
num_edges,
edge_bits,
proof_size,
num_edges,
siphash_keys: [0; 4],
edge_mask,
})
}
/// Reset the main keys used for siphash from the header and nonce
pub fn reset_header_nonce(
&mut self,
mut header: Vec<u8>,
header: Vec<u8>,
nonce: Option<u32>,
) -> Result<(), Error> {
if let Some(n) = nonce {
let len = header.len();
header.truncate(len - mem::size_of::<u32>());
header.write_u32::<LittleEndian>(n)?;
}
self.siphash_keys = set_header_nonce(header, nonce)?;
Ok(())
}

View file

@ -168,13 +168,11 @@ where
fn new(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
max_sols: u32,
) -> Result<Box<Self>, Error> {
Ok(Box::new(CuckatooContext::<T>::new_impl(
edge_bits,
proof_size,
easiness_pct,
max_sols,
)?))
}
@ -189,8 +187,8 @@ where
}
fn find_cycles(&mut self) -> Result<Vec<Proof>, Error> {
let ease = to_u64!(self.params.easiness);
self.find_cycles_iter(0..ease)
let num_edges = self.params.num_edges;
self.find_cycles_iter(0..num_edges)
}
fn verify(&self, proof: &Proof) -> Result<(), Error> {
@ -206,10 +204,9 @@ where
pub fn new_impl(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
max_sols: u32,
) -> Result<CuckatooContext<T>, Error> {
let params = CuckooParams::new(edge_bits, proof_size, easiness_pct, true)?;
let params = CuckooParams::new(edge_bits, proof_size)?;
let num_edges = to_edge!(params.num_edges);
Ok(CuckatooContext {
params,
@ -384,7 +381,7 @@ mod test {
where
T: EdgeType,
{
let mut ctx = CuckatooContext::<T>::new(29, 42, 50, 10)?;
let mut ctx = CuckatooContext::<T>::new(29, 42, 10)?;
ctx.set_header_nonce([0u8; 80].to_vec(), Some(20), false)?;
assert!(ctx.verify(&Proof::new(V1_29.to_vec().clone())).is_ok());
Ok(())
@ -394,7 +391,7 @@ mod test {
where
T: EdgeType,
{
let mut ctx = CuckatooContext::<T>::new(29, 42, 50, 10)?;
let mut ctx = CuckatooContext::<T>::new(29, 42, 10)?;
let mut header = [0u8; 80];
header[0] = 1u8;
ctx.set_header_nonce(header.to_vec(), Some(20), false)?;
@ -412,7 +409,6 @@ mod test {
where
T: EdgeType,
{
let easiness_pct = 50;
let nonce = 1546569;
let _range = 1;
let header = [0u8; 80].to_vec();
@ -421,14 +417,13 @@ mod test {
let max_sols = 4;
println!(
"Looking for {}-cycle on cuckatoo{}(\"{}\",{}) with {}% edges",
"Looking for {}-cycle on cuckatoo{}(\"{}\",{})",
proof_size,
edge_bits,
String::from_utf8(header.clone()).unwrap(),
nonce,
easiness_pct
nonce
);
let mut ctx_u32 = CuckatooContext::<T>::new(edge_bits, proof_size, easiness_pct, max_sols)?;
let mut ctx_u32 = CuckatooContext::<T>::new(edge_bits, proof_size, max_sols)?;
let mut bytes = ctx_u32.byte_count()?;
let mut unit = 0;
while bytes >= 10240 {

View file

@ -43,13 +43,11 @@ where
fn new(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
max_sols: u32,
) -> Result<Box<Self>, Error> {
Ok(Box::new(CuckooContext::<T>::new_impl(
edge_bits,
proof_size,
easiness_pct,
max_sols,
)?))
}
@ -80,20 +78,20 @@ where
pub fn new_impl(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
max_sols: u32,
) -> Result<CuckooContext<T>, Error> {
let params = CuckooParams::new(edge_bits, proof_size, easiness_pct, false)?;
let num_edges = params.num_edges as usize;
let params = CuckooParams::new(edge_bits, proof_size)?;
let num_nodes = 2 * params.num_edges as usize;
Ok(CuckooContext {
params: params,
graph: vec![T::zero(); num_edges + 1],
graph: vec![T::zero(); num_nodes],
_max_sols: max_sols,
})
}
fn reset(&mut self) -> Result<(), Error> {
self.graph = vec![T::zero(); self.params.num_edges as usize + 1];
let num_nodes = 2 * self.params.num_edges as usize;
self.graph = vec![T::zero(); num_nodes];
Ok(())
}
@ -214,7 +212,7 @@ where
}
let mut n = 0;
let mut sol = vec![T::zero(); self.params.proof_size];
for nonce in 0..to_usize!(self.params.easiness) {
for nonce in 0..self.params.num_edges {
let edge = self.new_edge(to_edge!(nonce))?;
if cycle.contains(&edge) {
sol[n] = to_edge!(nonce);
@ -233,7 +231,7 @@ where
pub fn find_cycles_impl(&mut self) -> Result<Vec<Proof>, Error> {
let mut us = [T::zero(); MAXPATHLEN];
let mut vs = [T::zero(); MAXPATHLEN];
for nonce in 0..to_usize!(self.params.easiness) {
for nonce in 0..self.params.num_edges {
us[0] = self.new_node(to_edge!(nonce), 0)?;
vs[0] = self.new_node(to_edge!(nonce), 1)?;
let u = self.graph[to_usize!(us[0])];
@ -248,7 +246,7 @@ where
match sol {
Ok(s) => {
let mut proof = Proof::new(map_vec!(s.to_vec(), |&n| n.to_u64().unwrap_or(0)));
proof.cuckoo_sizeshift = self.params.edge_bits;
proof.edge_bits = self.params.edge_bits;
return Ok(vec![proof]);
}
Err(e) => match e.kind() {
@ -261,16 +259,16 @@ where
Err(ErrorKind::NoSolution)?
}
/// Assuming increasing nonces all smaller than easiness, verifies the
/// Assuming increasing nonces all smaller than #edges, verifies the
/// nonces form a cycle in a Cuckoo graph. Each nonce generates an edge, we
/// build the nodes on both side of that edge and count the connections.
pub fn verify_impl(&self, proof: &Proof) -> Result<(), Error> {
let easiness = to_u64!(self.params.easiness);
let num_nonces = self.params.num_edges;
let nonces = &proof.nonces;
let mut us = vec![T::zero(); proof.proof_size()];
let mut vs = vec![T::zero(); proof.proof_size()];
for n in 0..proof.proof_size() {
if nonces[n] >= easiness || (n != 0 && nonces[n] <= nonces[n - 1]) {
if nonces[n] >= num_nonces || (n != 0 && nonces[n] <= nonces[n - 1]) {
return Err(ErrorKind::Verification("edge wrong size".to_owned()))?;
}
us[n] = self.new_node(to_edge!(nonces[n]), 0)?;
@ -322,25 +320,25 @@ mod test {
use super::*;
static V1: [u64; 42] = [
0x3bbd, 0x4e96, 0x1013b, 0x1172b, 0x1371b, 0x13e6a, 0x1aaa6, 0x1b575, 0x1e237, 0x1ee88,
0x22f94, 0x24223, 0x25b4f, 0x2e9f3, 0x33b49, 0x34063, 0x3454a, 0x3c081, 0x3d08e, 0x3d863,
0x4285a, 0x42f22, 0x43122, 0x4b853, 0x4cd0c, 0x4f280, 0x557d5, 0x562cf, 0x58e59, 0x59a62,
0x5b568, 0x644b9, 0x657e9, 0x66337, 0x6821c, 0x7866f, 0x7e14b, 0x7ec7c, 0x7eed7, 0x80643,
0x8628c, 0x8949e,
0x8702, 0x12003, 0x2043f, 0x24cf8, 0x27631, 0x2beda, 0x325e5, 0x345b4, 0x36f5c, 0x3b3bc,
0x4cef6, 0x4dfdf, 0x5036b, 0x5d528, 0x7d76b, 0x80958, 0x81649, 0x8a064, 0x935fe, 0x93c28,
0x93fc9, 0x9aec5, 0x9c5c8, 0xa00a7, 0xa7256, 0xaa35e, 0xb9e04, 0xc8835, 0xcda49, 0xd72ea,
0xd7f80, 0xdaa3a, 0xdafce, 0xe03fe, 0xe55a2, 0xe6e60, 0xebb9d, 0xf5248, 0xf6a4b, 0xf6d32,
0xf7c61, 0xfd9e9
];
static V2: [u64; 42] = [
0x5e3a, 0x8a8b, 0x103d8, 0x1374b, 0x14780, 0x16110, 0x1b571, 0x1c351, 0x1c826, 0x28228,
0x2909f, 0x29516, 0x2c1c4, 0x334eb, 0x34cdd, 0x38a2c, 0x3ad23, 0x45ac5, 0x46afe, 0x50f43,
0x51ed6, 0x52ddd, 0x54a82, 0x5a46b, 0x5dbdb, 0x60f6f, 0x60fcd, 0x61c78, 0x63899, 0x64dab,
0x6affc, 0x6b569, 0x72639, 0x73987, 0x78806, 0x7b98e, 0x7c7d7, 0x7ddd4, 0x7fa88, 0x8277c,
0x832d9, 0x8ba6f,
0xab0, 0x403c, 0x509c, 0x127c0, 0x1a0b3, 0x1ffe4, 0x26180, 0x2a20a, 0x35559, 0x36dd3,
0x3cb20, 0x4992f, 0x55b20, 0x5b507, 0x66e58, 0x6784d, 0x6fda8, 0x7363d, 0x76dd6, 0x7f13b,
0x84672, 0x85724, 0x991cf, 0x9a6fe, 0x9b0c5, 0xa5019, 0xa7207, 0xaf32f, 0xc29f3, 0xc39d3,
0xc78ed, 0xc9e75, 0xcd0db, 0xcd81e, 0xd02e0, 0xd05c4, 0xd8f99, 0xd9359, 0xdff3b, 0xea623,
0xf9100, 0xfc966
];
static V3: [u64; 42] = [
0x308b, 0x9004, 0x91fc, 0x983e, 0x9d67, 0xa293, 0xb4cb, 0xb6c8, 0xccc8, 0xdddc, 0xf04d,
0x1372f, 0x16ec9, 0x17b61, 0x17d03, 0x1e3bc, 0x1fb0f, 0x29e6e, 0x2a2ca, 0x2a719, 0x3a078,
0x3b7cc, 0x3c71d, 0x40daa, 0x43e17, 0x46adc, 0x4b359, 0x4c3aa, 0x4ce92, 0x4d06e, 0x51140,
0x565ac, 0x56b1f, 0x58a8b, 0x5e410, 0x5e607, 0x5ebb5, 0x5f8ae, 0x7aeac, 0x7b902, 0x7d6af,
0x7f400,
0x14ca, 0x1e80, 0x587c, 0xa2d4, 0x14f6b, 0x1b100, 0x1b74c, 0x2477d, 0x29ba4, 0x33f25,
0x4c55f, 0x4d280, 0x50ffa, 0x53900, 0x5cf62, 0x63f66, 0x65623, 0x6fb19, 0x7a19e, 0x82eef,
0x83d2d, 0x88015, 0x8e6c5, 0x91086, 0x97429, 0x9aa27, 0xa01b7, 0xa304b, 0xafa06, 0xb1cb3,
0xbb9fc, 0xbf345, 0xc0761, 0xc0e78, 0xc5b99, 0xc9f09, 0xcc62c, 0xceb6e, 0xd98ad, 0xeecb3,
0xef966, 0xfef9b
];
// cuckoo28 at 50% edges of letter 'u'
static V4: [u64; 42] = [
@ -395,25 +393,26 @@ mod test {
where
T: EdgeType,
{
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 75, 10)?;
cuckoo_ctx.set_header_nonce([49].to_vec(), None, true)?;
let header = [0; 4].to_vec();
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce(header.clone(), Some(39), true)?;
let res = cuckoo_ctx.find_cycles()?;
let mut proof = Proof::new(V1.to_vec());
proof.cuckoo_sizeshift = 20;
proof.edge_bits = 20;
assert_eq!(proof, res[0]);
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 70, 10)?;
cuckoo_ctx.set_header_nonce([50].to_vec(), None, true)?;
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce(header.clone(), Some(56), true)?;
let res = cuckoo_ctx.find_cycles()?;
let mut proof = Proof::new(V2.to_vec());
proof.cuckoo_sizeshift = 20;
proof.edge_bits = 20;
assert_eq!(proof, res[0]);
//re-use context
cuckoo_ctx.set_header_nonce([51].to_vec(), None, true)?;
cuckoo_ctx.set_header_nonce(header, Some(66), true)?;
let res = cuckoo_ctx.find_cycles()?;
let mut proof = Proof::new(V3.to_vec());
proof.cuckoo_sizeshift = 20;
proof.edge_bits = 20;
assert_eq!(proof, res[0]);
Ok(())
}
@ -422,13 +421,14 @@ mod test {
where
T: EdgeType,
{
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 75, 10)?;
cuckoo_ctx.set_header_nonce([49].to_vec(), None, false)?;
let header = [0; 4].to_vec();
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce(header.clone(), Some(39), false)?;
assert!(cuckoo_ctx.verify(&Proof::new(V1.to_vec().clone())).is_ok());
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 70, 10)?;
cuckoo_ctx.set_header_nonce([50].to_vec(), None, false)?;
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce(header.clone(), Some(56), false)?;
assert!(cuckoo_ctx.verify(&Proof::new(V2.to_vec().clone())).is_ok());
cuckoo_ctx.set_header_nonce([51].to_vec(), None, false)?;
cuckoo_ctx.set_header_nonce(header.clone(), Some(66), false)?;
assert!(cuckoo_ctx.verify(&Proof::new(V3.to_vec().clone())).is_ok());
Ok(())
}
@ -438,7 +438,7 @@ mod test {
T: EdgeType,
{
// edge checks
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 75, 10)?;
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce([49].to_vec(), None, false)?;
// edge checks
assert!(!cuckoo_ctx.verify(&Proof::new(vec![0; 42])).is_ok());
@ -448,7 +448,7 @@ mod test {
assert!(!cuckoo_ctx.verify(&Proof::new(V1.to_vec().clone())).is_ok());
let mut test_header = [0; 32];
test_header[0] = 24;
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 50, 10)?;
let mut cuckoo_ctx = CuckooContext::<T>::new(20, 42, 10)?;
cuckoo_ctx.set_header_nonce(test_header.to_vec(), None, false)?;
assert!(!cuckoo_ctx.verify(&Proof::new(V4.to_vec().clone())).is_ok());
Ok(())
@ -458,10 +458,10 @@ mod test {
where
T: EdgeType,
{
for n in 1..5 {
let h = [n; 32];
let mut cuckoo_ctx = CuckooContext::<T>::new(16, 42, 75, 10)?;
cuckoo_ctx.set_header_nonce(h.to_vec(), None, false)?;
let h = [0 as u8; 32];
for n in [45 as u32, 49,131,143,151].iter() {
let mut cuckoo_ctx = CuckooContext::<T>::new(16, 42, 10)?;
cuckoo_ctx.set_header_nonce(h.to_vec(), Some(*n), false)?;
let res = cuckoo_ctx.find_cycles()?;
assert!(cuckoo_ctx.verify(&res[0]).is_ok())
}

View file

@ -31,13 +31,13 @@ pub struct Lean {
impl Lean {
/// Instantiates a new lean miner based on some Cuckatoo parameters
pub fn new(edge_bits: u8, easiness_pct: u32) -> Lean {
pub fn new(edge_bits: u8) -> Lean {
// note that proof size doesn't matter to a lean miner
let params = CuckooParams::new(edge_bits, 42, easiness_pct, true).unwrap();
let params = CuckooParams::new(edge_bits, 42).unwrap();
// edge bitmap, before trimming all of them are on
let mut edges = Bitmap::create_with_capacity(params.easiness);
edges.flip_inplace(0..params.easiness.into());
let mut edges = Bitmap::create_with_capacity(params.num_edges as u32);
edges.flip_inplace(0..params.num_edges.into());
Lean { params, edges }
}
@ -51,7 +51,7 @@ impl Lean {
/// and works well for Cuckatoo size above 18.
pub fn trim(&mut self) {
// trimming successively
while self.edges.cardinality() > (7 * (self.params.easiness >> 8) / 8) as u64 {
while self.edges.cardinality() > (7 * (self.params.num_edges >> 8) / 8) as u64 {
self.count_and_kill();
}
}
@ -88,8 +88,6 @@ impl Lean {
#[cfg(test)]
mod test {
use super::*;
use pow::common;
use pow::cuckatoo::*;
use pow::types::PoWContext;
#[test]
@ -98,11 +96,11 @@ mod test {
let header = [0u8; 84].to_vec(); // with nonce
let edge_bits = 19;
let mut lean = Lean::new(edge_bits, 50);
let mut lean = Lean::new(edge_bits);
lean.set_header_nonce(header.clone(), nonce);
lean.trim();
let mut ctx_u32 = CuckatooContext::<u32>::new_impl(edge_bits, 42, 50, 10).unwrap();
let mut ctx_u32 = CuckatooContext::<u32>::new_impl(edge_bits, 42, 10).unwrap();
ctx_u32.set_header_nonce(header, Some(nonce), true).unwrap();
lean.find_cycles(ctx_u32).unwrap();
}

View file

@ -79,7 +79,7 @@ pub fn mine_genesis_block() -> Result<Block, Error> {
// total_difficulty on the genesis header *is* the difficulty of that block
let genesis_difficulty = gen.header.pow.total_difficulty.clone();
let sz = global::min_sizeshift();
let sz = global::min_edge_bits();
let proof_size = global::proofsize();
pow_size(&mut gen.header, genesis_difficulty, proof_size, sz)?;
@ -143,10 +143,10 @@ mod test {
&mut b.header,
Difficulty::one(),
global::proofsize(),
global::min_sizeshift(),
global::min_edge_bits(),
).unwrap();
assert!(b.header.pow.nonce != 310);
assert!(b.header.pow.to_difficulty() >= Difficulty::one());
assert!(verify_size(&b.header, global::min_sizeshift()).is_ok());
assert!(verify_size(&b.header, global::min_edge_bits()).is_ok());
}
}

View file

@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// Types for a Cuckoo proof of work and its encapsulation as a fully usable
/// Types for a Cuck(at)oo proof of work and its encapsulation as a fully usable
/// proof of work within a block header.
use std::cmp::max;
use std::cmp::{min,max};
use std::ops::{Add, Div, Mul, Sub};
use std::{fmt, iter};
use rand::{thread_rng, Rng};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use consensus::SECOND_POW_SIZESHIFT;
use consensus::{graph_weight, SECOND_POW_EDGE_BITS};
use core::hash::Hashed;
use global;
use ser::{self, Readable, Reader, Writeable, Writer};
@ -39,7 +39,6 @@ where
fn new(
edge_bits: u8,
proof_size: usize,
easiness_pct: u32,
max_sols: u32,
) -> Result<Box<Self>, Error>;
/// Sets the header along with an optional nonce at the end
@ -81,24 +80,25 @@ impl Difficulty {
Difficulty { num: max(num, 1) }
}
/// Computes the difficulty from a hash. Divides the maximum target by the
/// provided hash and applies the Cuckoo sizeshift adjustment factor (see
/// https://lists.launchpad.net/mimblewimble/msg00494.html).
pub fn from_proof_adjusted(proof: &Proof) -> Difficulty {
// Adjust the difficulty based on a 2^(N-M)*(N-1) factor, with M being
// the minimum sizeshift and N the provided sizeshift
let shift = proof.cuckoo_sizeshift;
let adjust_factor = (1 << (shift - global::ref_sizeshift()) as u64) * (shift as u64 - 1);
/// Compute difficulty scaling factor for graph defined by 2 * 2^edge_bits * edge_bits bits
pub fn scale(edge_bits: u8) -> u64 {
(2 << (edge_bits - global::base_edge_bits()) as u64) * (edge_bits as u64)
}
Difficulty::from_num(proof.raw_difficulty() * adjust_factor)
/// Computes the difficulty from a hash. Divides the maximum target by the
/// provided hash and applies the Cuck(at)oo size adjustment factor (see
/// https://lists.launchpad.net/mimblewimble/msg00494.html).
fn from_proof_adjusted(proof: &Proof) -> Difficulty {
// scale with natural scaling factor
Difficulty::from_num(proof.scaled_difficulty(graph_weight(proof.edge_bits)))
}
/// Same as `from_proof_adjusted` but instead of an adjustment based on
/// cycle size, scales based on a provided factor. Used by dual PoW system
/// to scale one PoW against the other.
pub fn from_proof_scaled(proof: &Proof, scaling: u64) -> Difficulty {
fn from_proof_scaled(proof: &Proof, scaling: u32) -> Difficulty {
// Scaling between 2 proof of work algos
Difficulty::from_num(proof.raw_difficulty() * scaling)
Difficulty::from_num(proof.scaled_difficulty(scaling as u64))
}
/// Converts the difficulty into a u64
@ -219,7 +219,7 @@ pub struct ProofOfWork {
/// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty,
/// Difficulty scaling factor between the different proofs of work
pub scaling_difficulty: u64,
pub scaling_difficulty: u32,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Proof of work data.
@ -240,13 +240,9 @@ impl Default for ProofOfWork {
impl ProofOfWork {
/// Read implementation, can't define as trait impl as we need a version
pub fn read(ver: u16, reader: &mut Reader) -> Result<ProofOfWork, ser::Error> {
let (total_difficulty, scaling_difficulty) = if ver == 1 {
// read earlier in the header on older versions
(Difficulty::one(), 1)
} else {
(Difficulty::read(reader)?, reader.read_u64()?)
};
pub fn read(_ver: u16, reader: &mut Reader) -> Result<ProofOfWork, ser::Error> {
let total_difficulty = Difficulty::read(reader)?;
let scaling_difficulty = reader.read_u32()?;
let nonce = reader.read_u64()?;
let proof = Proof::read(reader)?;
Ok(ProofOfWork {
@ -269,14 +265,12 @@ impl ProofOfWork {
}
/// Write the pre-hash portion of the header
pub fn write_pre_pow<W: Writer>(&self, ver: u16, writer: &mut W) -> Result<(), ser::Error> {
if ver > 1 {
ser_multiwrite!(
writer,
[write_u64, self.total_difficulty.to_num()],
[write_u64, self.scaling_difficulty]
);
}
pub fn write_pre_pow<W: Writer>(&self, _ver: u16, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(
writer,
[write_u64, self.total_difficulty.to_num()],
[write_u32, self.scaling_difficulty]
);
Ok(())
}
@ -284,23 +278,38 @@ impl ProofOfWork {
pub fn to_difficulty(&self) -> Difficulty {
// 2 proof of works, Cuckoo29 (for now) and Cuckoo30+, which are scaled
// differently (scaling not controlled for now)
if self.proof.cuckoo_sizeshift == SECOND_POW_SIZESHIFT {
if self.proof.edge_bits == SECOND_POW_EDGE_BITS {
Difficulty::from_proof_scaled(&self.proof, self.scaling_difficulty)
} else {
Difficulty::from_proof_adjusted(&self.proof)
}
}
/// The shift used for the cuckoo cycle size on this proof
pub fn cuckoo_sizeshift(&self) -> u8 {
self.proof.cuckoo_sizeshift
/// The edge_bits used for the cuckoo cycle size on this proof
pub fn edge_bits(&self) -> u8 {
self.proof.edge_bits
}
/// Whether this proof of work is for the primary algorithm (as opposed
/// to secondary). Only depends on the edge_bits at this time.
pub fn is_primary(&self) -> bool {
// 2 conditions are redundant right now but not necessarily in
// the future
self.proof.edge_bits != SECOND_POW_EDGE_BITS
&& self.proof.edge_bits >= global::min_edge_bits()
}
/// Whether this proof of work is for the secondary algorithm (as opposed
/// to primary). Only depends on the edge_bits at this time.
pub fn is_secondary(&self) -> bool {
self.proof.edge_bits == SECOND_POW_EDGE_BITS
}
}
/// A Cuckoo Cycle proof of work, consisting of the shift to get the graph
/// size (i.e. 31 for Cuckoo31 with a 2^31 or 1<<31 graph size) and the nonces
/// of the graph solution. While being expressed as u64 for simplicity, each
/// nonce is strictly less than half the cycle size (i.e. <2^30 for Cuckoo 31).
/// A Cuck(at)oo Cycle proof of work, consisting of the edge_bits to get the graph
/// size (i.e. the 2-log of the number of edges) and the nonces
/// of the graph solution. While being expressed as u64 for simplicity,
/// nonces a.k.a. edge indices range from 0 to (1 << edge_bits) - 1
///
/// The hash of the `Proof` is the hash of its packed nonces when serializing
/// them at their exact bit size. The resulting bit sequence is padded to be
@ -309,14 +318,14 @@ impl ProofOfWork {
#[derive(Clone, PartialOrd, PartialEq)]
pub struct Proof {
/// Power of 2 used for the size of the cuckoo graph
pub cuckoo_sizeshift: u8,
pub edge_bits: u8,
/// The nonces
pub nonces: Vec<u64>,
}
impl fmt::Debug for Proof {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Cuckoo{}(", self.cuckoo_sizeshift)?;
write!(f, "Cuckoo{}(", self.edge_bits)?;
for (i, val) in self.nonces[..].iter().enumerate() {
write!(f, "{:x}", val)?;
if i < self.nonces.len() - 1 {
@ -330,11 +339,11 @@ impl fmt::Debug for Proof {
impl Eq for Proof {}
impl Proof {
/// Builds a proof with provided nonces at default sizeshift
/// Builds a proof with provided nonces at default edge_bits
pub fn new(mut in_nonces: Vec<u64>) -> Proof {
in_nonces.sort();
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
edge_bits: global::min_edge_bits(),
nonces: in_nonces,
}
}
@ -342,7 +351,7 @@ impl Proof {
/// Builds a proof with all bytes zeroed out
pub fn zero(proof_size: usize) -> Proof {
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
edge_bits: global::min_edge_bits(),
nonces: vec![0; proof_size],
}
}
@ -351,17 +360,17 @@ impl Proof {
/// needed so that tests that ignore POW
/// don't fail due to duplicate hashes
pub fn random(proof_size: usize) -> Proof {
let sizeshift = global::min_sizeshift();
let nonce_mask = (1 << (sizeshift - 1)) - 1;
let edge_bits = global::min_edge_bits();
let nonce_mask = (1 << edge_bits) - 1;
let mut rng = thread_rng();
// force the random num to be within sizeshift bits
// force the random num to be within edge_bits bits
let mut v: Vec<u64> = iter::repeat(())
.map(|()| (rng.gen::<u32>() & nonce_mask) as u64)
.take(proof_size)
.collect();
v.sort();
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
edge_bits: global::min_edge_bits(),
nonces: v,
}
}
@ -371,21 +380,22 @@ impl Proof {
self.nonces.len()
}
/// Difficulty achieved by this proof
fn raw_difficulty(&self) -> u64 {
<u64>::max_value() / self.hash().to_u64()
/// Difficulty achieved by this proof with given scaling factor
fn scaled_difficulty(&self, scale: u64) -> u64 {
let diff = ((scale as u128) << 64) / (self.hash().to_u64() as u128);
min(diff, <u64>::max_value() as u128) as u64
}
}
impl Readable for Proof {
fn read(reader: &mut Reader) -> Result<Proof, ser::Error> {
let cuckoo_sizeshift = reader.read_u8()?;
if cuckoo_sizeshift == 0 || cuckoo_sizeshift > 64 {
let edge_bits = reader.read_u8()?;
if edge_bits == 0 || edge_bits > 64 {
return Err(ser::Error::CorruptedData);
}
let mut nonces = Vec::with_capacity(global::proofsize());
let nonce_bits = cuckoo_sizeshift as usize - 1;
let nonce_bits = edge_bits as usize;
let bytes_len = BitVec::bytes_len(nonce_bits * global::proofsize());
let bits = reader.read_fixed_bytes(bytes_len)?;
let bitvec = BitVec { bits };
@ -399,7 +409,7 @@ impl Readable for Proof {
nonces.push(nonce);
}
Ok(Proof {
cuckoo_sizeshift,
edge_bits,
nonces,
})
}
@ -408,10 +418,9 @@ impl Readable for Proof {
impl Writeable for Proof {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
if writer.serialization_mode() != ser::SerializationMode::Hash {
writer.write_u8(self.cuckoo_sizeshift)?;
writer.write_u8(self.edge_bits)?;
}
let nonce_bits = self.cuckoo_sizeshift as usize - 1;
let nonce_bits = self.edge_bits as usize;
let mut bitvec = BitVec::new(nonce_bits * global::proofsize());
for (n, nonce) in self.nonces.iter().enumerate() {
for bit in 0..nonce_bits {

View file

@ -25,7 +25,7 @@ pub mod common;
use chrono::Duration;
use common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use grin_core::consensus::{self, BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use grin_core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use grin_core::core::block::Error;
use grin_core::core::hash::Hashed;
use grin_core::core::id::ShortIdentifiable;
@ -34,7 +34,7 @@ use grin_core::core::Committed;
use grin_core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures};
use grin_core::{global, ser};
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use util::{secp, secp_static};
use util::secp;
use wallet::libtx::build::{self, input, output, with_fee};
fn verifier_cache() -> Arc<RwLock<VerifierCache>> {
@ -48,11 +48,9 @@ fn too_large_block() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let max_out = MAX_BLOCK_WEIGHT / BLOCK_OUTPUT_WEIGHT;
let zero_commit = secp_static::commit_to_zero_value();
let mut pks = vec![];
for n in 0..(max_out + 1) {
pks.push(keychain.derive_key_id(n as u32).unwrap());
pks.push(ExtKeychain::derive_key_id(1, n as u32, 0, 0, 0));
}
let mut parts = vec![];
@ -66,10 +64,10 @@ fn too_large_block() {
println!("Build tx: {}", now.elapsed().as_secs());
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx], &keychain, &prev, &key_id);
assert!(
b.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache())
b.validate(&BlindingFactor::zero(), verifier_cache())
.is_err()
);
}
@ -90,11 +88,9 @@ fn very_empty_block() {
// builds a block with a tx spending another and check that cut_through occurred
fn block_with_cut_through() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let mut btx1 = tx2i1o();
let mut btx2 = build::transaction(
@ -106,7 +102,7 @@ fn block_with_cut_through() {
let mut btx3 = txspend1i1o(5, &keychain, key_id2.clone(), key_id3);
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(
vec![&mut btx1, &mut btx2, &mut btx3],
&keychain,
@ -117,7 +113,7 @@ fn block_with_cut_through() {
// block should have been automatically compacted (including reward
// output) and should still be valid
println!("3");
b.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache())
b.validate(&BlindingFactor::zero(), verifier_cache())
.unwrap();
assert_eq!(b.inputs().len(), 3);
assert_eq!(b.outputs().len(), 3);
@ -127,9 +123,8 @@ fn block_with_cut_through() {
#[test]
fn empty_block_with_coinbase_is_valid() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &prev, &key_id);
assert_eq!(b.inputs().len(), 0);
@ -155,7 +150,7 @@ fn empty_block_with_coinbase_is_valid() {
// the block should be valid here (single coinbase output with corresponding
// txn kernel)
assert!(
b.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache())
b.validate(&BlindingFactor::zero(), verifier_cache())
.is_ok()
);
}
@ -166,9 +161,8 @@ fn empty_block_with_coinbase_is_valid() {
// additionally verifying the merkle_inputs_outputs also fails
fn remove_coinbase_output_flag() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let mut b = new_block(vec![], &keychain, &prev, &key_id);
assert!(
@ -186,7 +180,7 @@ fn remove_coinbase_output_flag() {
.is_ok()
);
assert_eq!(
b.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache()),
b.validate(&BlindingFactor::zero(), verifier_cache()),
Err(Error::CoinbaseSumMismatch)
);
}
@ -196,9 +190,8 @@ fn remove_coinbase_output_flag() {
// invalidates the block and specifically it causes verify_coinbase to fail
fn remove_coinbase_kernel_flag() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let mut b = new_block(vec![], &keychain, &prev, &key_id);
assert!(
@ -216,7 +209,7 @@ fn remove_coinbase_kernel_flag() {
);
assert_eq!(
b.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache()),
b.validate(&BlindingFactor::zero(), verifier_cache()),
Err(Error::Secp(secp::Error::IncorrectCommitSum))
);
}
@ -225,7 +218,7 @@ fn remove_coinbase_kernel_flag() {
fn serialize_deserialize_block_header() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &prev, &key_id);
let header1 = b.header;
@ -242,7 +235,7 @@ fn serialize_deserialize_block() {
let tx1 = tx1i2o();
let keychain = ExtKeychain::from_random_seed().unwrap();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut vec = Vec::new();
@ -260,11 +253,11 @@ fn serialize_deserialize_block() {
fn empty_block_serialized_size() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_252;
let target_len = 1_255;
assert_eq!(vec.len(), target_len);
}
@ -273,11 +266,11 @@ fn block_single_tx_serialized_size() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 2_834;
let target_len = 2_837;
assert_eq!(vec.len(), target_len);
}
@ -285,12 +278,12 @@ fn block_single_tx_serialized_size() {
fn empty_compact_block_serialized_size() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_260;
let target_len = 1_263;
assert_eq!(vec.len(), target_len);
}
@ -299,12 +292,12 @@ fn compact_block_single_tx_serialized_size() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_266;
let target_len = 1_269;
assert_eq!(vec.len(), target_len);
}
@ -319,11 +312,11 @@ fn block_10_tx_serialized_size() {
txs.push(tx);
}
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 17_072;
let target_len = 17_075;
assert_eq!(vec.len(), target_len,);
}
@ -337,12 +330,12 @@ fn compact_block_10_tx_serialized_size() {
txs.push(tx);
}
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_320;
let target_len = 1_323;
assert_eq!(vec.len(), target_len,);
}
@ -351,7 +344,7 @@ fn compact_block_hash_with_nonce() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx], &keychain, &prev, &key_id);
let cb1: CompactBlock = b.clone().into();
let cb2: CompactBlock = b.clone().into();
@ -381,7 +374,7 @@ fn convert_block_to_compact_block() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let cb: CompactBlock = b.clone().into();
@ -403,7 +396,7 @@ fn convert_block_to_compact_block() {
fn hydrate_empty_compact_block() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &prev, &key_id);
let cb: CompactBlock = b.clone().into();
let hb = Block::hydrate_from(cb, vec![]).unwrap();
@ -417,7 +410,7 @@ fn serialize_deserialize_compact_block() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut cb1: CompactBlock = b.into();
@ -436,26 +429,3 @@ fn serialize_deserialize_compact_block() {
assert_eq!(cb1.header, cb2.header);
assert_eq!(cb1.kern_ids(), cb2.kern_ids());
}
#[test]
fn empty_block_v2_switch() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let mut prev = BlockHeader::default();
prev.height = consensus::HEADER_V2_HARD_FORK - 1;
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_260;
assert_eq!(b.header.version, 2);
assert_eq!(vec.len(), target_len);
// another try right before v2
prev.height = consensus::HEADER_V2_HARD_FORK - 2;
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_252;
assert_eq!(b.header.version, 1);
assert_eq!(vec.len(), target_len);
}

View file

@ -29,9 +29,9 @@ use wallet::libtx::reward;
// utility producing a transaction with 2 inputs and a single outputs
pub fn tx2i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
build::transaction(
vec![
@ -47,8 +47,8 @@ pub fn tx2i1o() -> Transaction {
// utility producing a transaction with a single input and output
pub fn tx1i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
build::transaction(
vec![input(5, key_id1), output(3, key_id2), with_fee(2)],
@ -61,9 +61,9 @@ pub fn tx1i1o() -> Transaction {
// Note: this tx has an "offset" kernel
pub fn tx1i2o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
build::transaction(
vec![

View file

@ -17,10 +17,7 @@ extern crate grin_core as core;
extern crate chrono;
use chrono::prelude::Utc;
use core::consensus::{
next_difficulty, valid_header_version, TargetError, BLOCK_TIME_WINDOW, DAMP_FACTOR,
DIFFICULTY_ADJUST_WINDOW, MEDIAN_TIME_INDEX, MEDIAN_TIME_WINDOW, UPPER_TIME_BOUND,
};
use core::consensus::*;
use core::global;
use core::pow::Difficulty;
use std::fmt::{self, Display};
@ -77,84 +74,61 @@ impl Display for DiffBlock {
// Builds an iterator for next difficulty calculation with the provided
// constant time interval, difficulty and total length.
fn repeat(
interval: u64,
diff: u64,
len: u64,
cur_time: Option<u64>,
) -> Vec<Result<(u64, Difficulty), TargetError>> {
fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option<u64>) -> Vec<HeaderInfo> {
let cur_time = match cur_time {
Some(t) => t,
None => Utc::now().timestamp() as u64,
};
// watch overflow here, length shouldn't be ridiculous anyhow
assert!(len < std::usize::MAX as u64);
let diffs = vec![Difficulty::from_num(diff); len as usize];
let diffs = vec![diff.difficulty.clone(); len as usize];
let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
let pairs = times.zip(diffs.iter());
pairs
.map(|(t, d)| Ok((cur_time + t as u64, d.clone())))
.collect::<Vec<_>>()
.map(|(t, d)| {
HeaderInfo::new(
cur_time + t as u64,
d.clone(),
diff.secondary_scaling,
diff.is_secondary,
)
}).collect::<Vec<_>>()
}
// Creates a new chain with a genesis at a simulated difficulty
fn create_chain_sim(diff: u64) -> Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)> {
fn create_chain_sim(diff: u64) -> Vec<(HeaderInfo, DiffStats)> {
println!(
"adding create: {}, {}",
Utc::now().timestamp(),
Difficulty::from_num(diff)
);
let return_vec = vec![Ok((
let return_vec = vec![HeaderInfo::from_ts_diff(
Utc::now().timestamp() as u64,
Difficulty::from_num(diff),
))];
)];
let diff_stats = get_diff_stats(&return_vec);
vec![(
Ok((Utc::now().timestamp() as u64, Difficulty::from_num(diff))),
HeaderInfo::from_ts_diff(Utc::now().timestamp() as u64, Difficulty::from_num(diff)),
diff_stats,
)]
}
fn get_diff_stats(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) -> DiffStats {
fn get_diff_stats(chain_sim: &Vec<HeaderInfo>) -> DiffStats {
// Fill out some difficulty stats for convenience
let diff_iter = chain_sim.clone();
let last_blocks: Vec<Result<(u64, Difficulty), TargetError>> =
global::difficulty_data_to_vector(diff_iter.clone());
let last_blocks: Vec<HeaderInfo> = global::difficulty_data_to_vector(diff_iter.iter().cloned());
let mut last_time = last_blocks[0].clone().unwrap().0;
let mut last_time = last_blocks[0].timestamp;
let tip_height = chain_sim.len();
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
// Obtain the median window for the earlier time period
// the first MEDIAN_TIME_WINDOW elements
let mut window_earliest: Vec<u64> = last_blocks
.clone()
.iter()
.take(MEDIAN_TIME_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// pick median
window_earliest.sort();
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements
let mut window_latest: Vec<u64> = last_blocks
.clone()
.iter()
.skip(DIFFICULTY_ADJUST_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// pick median
window_latest.sort();
let latest_ts = window_latest[MEDIAN_TIME_INDEX as usize];
let earliest_ts = last_blocks[0].timestamp;
let latest_ts = last_blocks[last_blocks.len()-1].timestamp;
let mut i = 1;
let sum_blocks: Vec<Result<(u64, Difficulty), TargetError>> = global::difficulty_data_to_vector(
diff_iter,
).into_iter()
.skip(MEDIAN_TIME_WINDOW as usize)
let sum_blocks: Vec<HeaderInfo> = global::difficulty_data_to_vector(diff_iter.iter().cloned())
.into_iter()
.take(DIFFICULTY_ADJUST_WINDOW as usize)
.collect();
@ -162,15 +136,14 @@ fn get_diff_stats(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) -> Di
.iter()
//.skip(1)
.map(|n| {
let (time, diff) = n.clone().unwrap();
let dur = time - last_time;
let dur = n.timestamp - last_time;
let height = earliest_block_height + i + 1;
i += 1;
last_time = time;
last_time = n.timestamp;
DiffBlock {
block_number: height,
difficulty: diff.to_num(),
time: time,
difficulty: n.difficulty.to_num(),
time: n.timestamp,
duration: dur,
}
})
@ -180,25 +153,23 @@ fn get_diff_stats(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) -> Di
let block_diff_sum = sum_entries.iter().fold(0, |sum, d| sum + d.difficulty);
i = 1;
last_time = last_blocks[0].clone().unwrap().0;
last_time = last_blocks[0].clone().timestamp;
let diff_entries: Vec<DiffBlock> = last_blocks
.iter()
.skip(1)
.map(|n| {
let (time, diff) = n.clone().unwrap();
let dur = time - last_time;
let dur = n.timestamp - last_time;
let height = earliest_block_height + i;
i += 1;
last_time = time;
last_time = n.timestamp;
DiffBlock {
block_number: height,
difficulty: diff.to_num(),
time: time,
difficulty: n.difficulty.to_num(),
time: n.timestamp,
duration: dur,
}
})
.collect();
}).collect();
DiffStats {
height: tip_height as u64,
@ -218,26 +189,28 @@ fn get_diff_stats(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) -> Di
// from the difficulty adjustment at interval seconds from the previous block
fn add_block(
interval: u64,
chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)>,
) -> Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)> {
chain_sim: Vec<(HeaderInfo, DiffStats)>,
) -> Vec<(HeaderInfo, DiffStats)> {
let mut ret_chain_sim = chain_sim.clone();
let mut return_chain: Vec<(Result<(u64, Difficulty), TargetError>)> =
chain_sim.clone().iter().map(|e| e.0.clone()).collect();
let mut return_chain: Vec<HeaderInfo> = chain_sim.clone().iter().map(|e| e.0.clone()).collect();
// get last interval
let diff = next_difficulty(return_chain.clone()).unwrap();
let last_elem = chain_sim.first().as_ref().unwrap().0.as_ref().unwrap();
let time = last_elem.0 + interval;
return_chain.insert(0, Ok((time, diff)));
let diff = next_difficulty(1, return_chain.clone());
let last_elem = chain_sim.first().unwrap().clone().0;
let time = last_elem.timestamp + interval;
return_chain.insert(0, HeaderInfo::from_ts_diff(time, diff.difficulty));
let diff_stats = get_diff_stats(&return_chain);
ret_chain_sim.insert(0, (Ok((time, diff)), diff_stats));
ret_chain_sim.insert(
0,
(HeaderInfo::from_ts_diff(time, diff.difficulty), diff_stats),
);
ret_chain_sim
}
// Adds many defined blocks
fn add_blocks(
intervals: Vec<u64>,
chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)>,
) -> Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)> {
chain_sim: Vec<(HeaderInfo, DiffStats)>,
) -> Vec<(HeaderInfo, DiffStats)> {
let mut return_chain = chain_sim.clone();
for i in intervals {
return_chain = add_block(i, return_chain.clone());
@ -248,9 +221,9 @@ fn add_blocks(
// Adds another n 'blocks' to the iterator, with difficulty calculated
fn add_block_repeated(
interval: u64,
chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)>,
chain_sim: Vec<(HeaderInfo, DiffStats)>,
iterations: usize,
) -> Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)> {
) -> Vec<(HeaderInfo, DiffStats)> {
let mut return_chain = chain_sim.clone();
for _ in 0..iterations {
return_chain = add_block(interval, return_chain.clone());
@ -260,7 +233,7 @@ fn add_block_repeated(
// Prints the contents of the iterator and its difficulties.. useful for
// tweaking
fn print_chain_sim(chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), DiffStats)>) {
fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) {
let mut chain_sim = chain_sim.clone();
chain_sim.reverse();
let mut last_time = 0;
@ -268,22 +241,21 @@ fn print_chain_sim(chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), Dif
println!("Constants");
println!("DIFFICULTY_ADJUST_WINDOW: {}", DIFFICULTY_ADJUST_WINDOW);
println!("BLOCK_TIME_WINDOW: {}", BLOCK_TIME_WINDOW);
println!("MEDIAN_TIME_WINDOW: {}", MEDIAN_TIME_WINDOW);
println!("UPPER_TIME_BOUND: {}", UPPER_TIME_BOUND);
println!("CLAMP_FACTOR: {}", CLAMP_FACTOR);
println!("DAMP_FACTOR: {}", DAMP_FACTOR);
chain_sim.iter().enumerate().for_each(|(i, b)| {
let block = b.0.as_ref().unwrap();
let block = b.0.clone();
let stats = b.1.clone();
if first {
last_time = block.0;
last_time = block.timestamp;
first = false;
}
println!(
"Height: {}, Time: {}, Interval: {}, Network difficulty:{}, Average Block Time: {}, Average Difficulty {}, Block Time Sum: {}, Block Diff Sum: {}, Latest Timestamp: {}, Earliest Timestamp: {}, Timestamp Delta: {}",
i,
block.0,
block.0 - last_time,
block.1,
block.timestamp,
block.timestamp - last_time,
block.difficulty,
stats.average_block_time,
stats.average_difficulty,
stats.block_time_sum,
@ -297,22 +269,17 @@ fn print_chain_sim(chain_sim: Vec<((Result<(u64, Difficulty), TargetError>), Dif
for i in sb {
println!(" {}", i);
}
last_time = block.0;
last_time = block.timestamp;
});
}
fn repeat_offs(
from: u64,
interval: u64,
diff: u64,
len: u64,
) -> Vec<Result<(u64, Difficulty), TargetError>> {
map_vec!(repeat(interval, diff, len, Some(from)), |e| {
match e.clone() {
Err(e) => Err(e),
Ok((t, d)) => Ok((t, d)),
}
})
fn repeat_offs(from: u64, interval: u64, diff: u64, len: u64) -> Vec<HeaderInfo> {
repeat(
interval,
HeaderInfo::from_ts_diff(1, Difficulty::from_num(diff)),
len,
Some(from),
)
}
/// Checks different next_target adjustments and difficulty boundaries
@ -348,7 +315,7 @@ fn adjustment_scenarios() {
println!("*********************************************************");
print_chain_sim(chain_sim);
println!("*********************************************************");
let just_enough = (DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW) as usize;
let just_enough = (DIFFICULTY_ADJUST_WINDOW) as usize;
// Steady difficulty for a good while, then a sudden drop
let chain_sim = create_chain_sim(global::initial_block_difficulty());
@ -415,32 +382,51 @@ fn next_target_adjustment() {
global::set_mining_mode(global::ChainTypes::AutomatedTesting);
let cur_time = Utc::now().timestamp() as u64;
let diff_one = Difficulty::one();
assert_eq!(
next_difficulty(vec![Ok((cur_time, Difficulty::one()))]).unwrap(),
Difficulty::one()
next_difficulty(1, vec![HeaderInfo::from_ts_diff(cur_time, diff_one)]),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
assert_eq!(
next_difficulty(1, vec![HeaderInfo::new(cur_time, diff_one, 10, true)]),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
let mut hi = HeaderInfo::from_diff_scaling(diff_one, 1);
assert_eq!(
next_difficulty(repeat(60, 1, DIFFICULTY_ADJUST_WINDOW, None)).unwrap(),
Difficulty::one()
next_difficulty(1, repeat(60, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
hi.is_secondary = true;
assert_eq!(
next_difficulty(1, repeat(60, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
hi.secondary_scaling = 100;
assert_eq!(
next_difficulty(1, repeat(60, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)),
HeaderInfo::from_diff_scaling(Difficulty::one(), 96),
);
// Check we don't get stuck on difficulty 1
let mut hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 1);
assert_ne!(
next_difficulty(repeat(1, 10, DIFFICULTY_ADJUST_WINDOW, None)).unwrap(),
next_difficulty(1, repeat(1, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)).difficulty,
Difficulty::one()
);
// just enough data, right interval, should stay constant
let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
let just_enough = DIFFICULTY_ADJUST_WINDOW + 1;
hi.difficulty = Difficulty::from_num(1000);
assert_eq!(
next_difficulty(repeat(60, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(60, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1000)
);
// checking averaging works
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + MEDIAN_TIME_WINDOW;
let mut s1 = repeat(60, 500, sec, Some(cur_time));
hi.difficulty = Difficulty::from_num(500);
let sec = DIFFICULTY_ADJUST_WINDOW / 2;
let mut s1 = repeat(60, hi.clone(), sec, Some(cur_time));
let mut s2 = repeat_offs(
cur_time + (sec * 60) as u64,
60,
@ -448,66 +434,136 @@ fn next_target_adjustment() {
DIFFICULTY_ADJUST_WINDOW / 2,
);
s2.append(&mut s1);
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(1000));
assert_eq!(
next_difficulty(1, s2).difficulty,
Difficulty::from_num(1000)
);
// too slow, diff goes down
hi.difficulty = Difficulty::from_num(1000);
assert_eq!(
next_difficulty(repeat(90, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(90, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(857)
);
assert_eq!(
next_difficulty(repeat(120, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(120, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(750)
);
// too fast, diff goes up
assert_eq!(
next_difficulty(repeat(55, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(55, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1028)
);
assert_eq!(
next_difficulty(repeat(45, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(45, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1090)
);
// hitting lower time bound, should always get the same result below
assert_eq!(
next_difficulty(repeat(0, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(0, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1500)
);
assert_eq!(
next_difficulty(repeat(0, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(0, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1500)
);
// hitting higher time bound, should always get the same result above
assert_eq!(
next_difficulty(repeat(300, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(300, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(500)
);
assert_eq!(
next_difficulty(repeat(400, 1000, just_enough, None)).unwrap(),
next_difficulty(1, repeat(400, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(500)
);
// We should never drop below 1
hi.difficulty = Difficulty::zero();
assert_eq!(
next_difficulty(repeat(90, 0, just_enough, None)).unwrap(),
next_difficulty(1, repeat(90, hi.clone(), just_enough, None)).difficulty,
Difficulty::from_num(1)
);
}
#[test]
fn secondary_pow_scale() {
let window = DIFFICULTY_ADJUST_WINDOW;
let mut hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 100);
// all primary, factor should increase so it becomes easier to find a high
// difficulty block
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
147
);
// all secondary on 90%, factor should go down a bit
hi.is_secondary = true;
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
94
);
// all secondary on 1%, factor should go down to bound (divide by 2)
assert_eq!(
secondary_pow_scaling(890_000, &(0..window).map(|_| hi.clone()).collect()),
49
);
// same as above, testing lowest bound
let mut low_hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 3);
low_hi.is_secondary = true;
assert_eq!(
secondary_pow_scaling(890_000, &(0..window).map(|_| low_hi.clone()).collect()),
1
);
// just about the right ratio, also no longer playing with median
let primary_hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 50);
assert_eq!(
secondary_pow_scaling(
1,
&(0..(window / 10))
.map(|_| primary_hi.clone())
.chain((0..(window * 9 / 10)).map(|_| hi.clone()))
.collect()
),
94
);
// 95% secondary, should come down based on 100 median
assert_eq!(
secondary_pow_scaling(
1,
&(0..(window / 20))
.map(|_| primary_hi.clone())
.chain((0..(window * 95 / 100)).map(|_| hi.clone()))
.collect()
),
94
);
// 40% secondary, should come up based on 50 median
assert_eq!(
secondary_pow_scaling(
1,
&(0..(window * 6 / 10))
.map(|_| primary_hi.clone())
.chain((0..(window * 4 / 10)).map(|_| hi.clone()))
.collect()
),
84
);
}
#[test]
fn hard_forks() {
assert!(valid_header_version(0, 1));
assert!(valid_header_version(10, 1));
assert!(!valid_header_version(10, 2));
assert!(valid_header_version(100_000, 2));
assert!(valid_header_version(249_999, 2));
assert!(valid_header_version(250_000, 3));
assert!(!valid_header_version(250_000, 1));
assert!(!valid_header_version(500_000, 1));
assert!(!valid_header_version(250_001, 2));
assert!(valid_header_version(YEAR_HEIGHT/2-1, 1));
// v2 not active yet
assert!(!valid_header_version(YEAR_HEIGHT/2, 2));
assert!(!valid_header_version(YEAR_HEIGHT/2, 1));
assert!(!valid_header_version(YEAR_HEIGHT, 1));
assert!(!valid_header_version(YEAR_HEIGHT/2+1, 2));
}
// #[test]

View file

@ -30,7 +30,7 @@ use grin_core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use grin_core::core::{aggregate, deaggregate, KernelFeatures, Output, Transaction};
use grin_core::ser;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use util::{secp_static, static_secp_instance};
use util::static_secp_instance;
use wallet::libtx::build::{
self, initial_tx, input, output, with_excess, with_fee, with_lock_height,
};
@ -77,7 +77,7 @@ fn tx_double_ser_deser() {
#[should_panic(expected = "InvalidSecretKey")]
fn test_zero_commit_fails() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
// blinding should fail as signing with a zero r*G shouldn't work
build::transaction(
@ -97,9 +97,9 @@ fn verifier_cache() -> Arc<RwLock<VerifierCache>> {
#[test]
fn build_tx_kernel() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
// first build a valid tx with corresponding blinding factor
let tx = build::transaction(
@ -318,9 +318,9 @@ fn basic_transaction_deaggregation() {
#[test]
fn hash_output() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx = build::transaction(
vec![
@ -372,10 +372,10 @@ fn tx_hash_diff() {
#[test]
fn tx_build_exchange() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id4 = keychain.derive_key_id(4).unwrap();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let key_id4 = ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let (tx_alice, blind_sum) = {
// Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they
@ -409,9 +409,7 @@ fn tx_build_exchange() {
#[test]
fn reward_empty_block() {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let previous_header = BlockHeader::default();
@ -419,19 +417,17 @@ fn reward_empty_block() {
b.cut_through()
.unwrap()
.validate(&BlindingFactor::zero(), &zero_commit, verifier_cache())
.validate(&BlindingFactor::zero(), verifier_cache())
.unwrap();
}
#[test]
fn reward_with_tx_block() {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let vc = verifier_cache();
let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o();
tx1.validate(vc.clone()).unwrap();
@ -441,19 +437,17 @@ fn reward_with_tx_block() {
block
.cut_through()
.unwrap()
.validate(&BlindingFactor::zero(), &zero_commit, vc.clone())
.validate(&BlindingFactor::zero(), vc.clone())
.unwrap();
}
#[test]
fn simple_block() {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let vc = verifier_cache();
let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o();
let mut tx2 = tx1i1o();
@ -465,22 +459,19 @@ fn simple_block() {
&key_id,
);
b.validate(&BlindingFactor::zero(), &zero_commit, vc.clone())
.unwrap();
b.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
}
#[test]
fn test_block_with_timelocked_tx() {
let keychain = keychain::ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let vc = verifier_cache();
let zero_commit = secp_static::commit_to_zero_value();
// first check we can add a timelocked tx where lock height matches current
// block height and that the resulting block is valid
let tx1 = build::transaction(
@ -496,8 +487,7 @@ fn test_block_with_timelocked_tx() {
let previous_header = BlockHeader::default();
let b = new_block(vec![&tx1], &keychain, &previous_header, &key_id3.clone());
b.validate(&BlindingFactor::zero(), &zero_commit, vc.clone())
.unwrap();
b.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
// now try adding a timelocked tx where lock height is greater than current
// block height
@ -514,7 +504,7 @@ fn test_block_with_timelocked_tx() {
let previous_header = BlockHeader::default();
let b = new_block(vec![&tx1], &keychain, &previous_header, &key_id3.clone());
match b.validate(&BlindingFactor::zero(), &zero_commit, vc.clone()) {
match b.validate(&BlindingFactor::zero(), vc.clone()) {
Err(KernelLockHeight(height)) => {
assert_eq!(height, 2);
}

View file

@ -28,7 +28,7 @@ use wallet::libtx::proof;
#[test]
fn test_output_ser_deser() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit = keychain.commit(5, &key_id).unwrap();
let proof = proof::create(&keychain, 5, &key_id, commit, None).unwrap();

View file

@ -36,7 +36,7 @@ fn test_verifier_cache_rangeproofs() {
let cache = verifier_cache();
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let commit = keychain.commit(5, &key_id).unwrap();
let proof = proof::create(&keychain, 5, &key_id, commit, None).unwrap();

View file

@ -40,9 +40,9 @@ Optionally return results as "compact blocks" by passing `?compact` query.
* **URL**
/v1/blocks/hash
/v1/blocks/height
/v1/blocks/commit
* /v1/blocks/hash
* /v1/blocks/height
* /v1/blocks/commit
* **Method:**
@ -73,14 +73,16 @@ Optionally return results as "compact blocks" by passing `?compact` query.
| - version | number | Version of the block |
| - height | number | Height of this block since the genesis block (height 0) |
| - previous | string | Hash of the block previous to this in the chain |
| - prev_root | string | Root hash of the header MMR at the previous header |
| - timestamp | string | RFC3339 timestamp at which the block was built |
| - output_root | string | Merklish root of all the commitments in the TxHashSet |
| - range_proof_root | string | Merklish root of all range proofs in the TxHashSet |
| - kernel_root | string | Merklish root of all transaction kernels in the TxHashSet |
| - nonce | number | Nonce increment used to mine this block |
| - cuckoo_size | number | Size of the cuckoo graph |
| - edge_bits | number | Size of the cuckoo graph (2_log of number of edges) |
| - cuckoo_solution | []number | The Cuckoo solution for this block |
| - total_difficulty | number | Total accumulated difficulty since genesis block |
| - scaling_difficulty | number | Difficulty scaling factor between the different proofs of work |
| - total_kernel_offset | string | Total kernel offset since genesis block |
| inputs | []string | Input transactions |
| outputs | []object | Outputs transactions |
@ -125,9 +127,9 @@ Returns data about a block headers given either a hash or height or an output co
* **URL**
/v1/headers/hash
/v1/headers/height
/v1/headers/commit
* /v1/headers/hash
* /v1/headers/height
* /v1/headers/commit
* **Method:**
@ -158,12 +160,13 @@ Returns data about a block headers given either a hash or height or an output co
| - version | number | Version of the block |
| - height | number | Height of this block since the genesis block (height 0) |
| - previous | string | Hash of the block previous to this in the chain |
| - prev_root | string | Root hash of the header MMR at the previous header |
| - timestamp | string | RFC3339 timestamp at which the block was built |
| - output_root | string | Merklish root of all the commitments in the TxHashSet |
| - range_proof_root | string | Merklish root of all range proofs in the TxHashSet |
| - kernel_root | string | Merklish root of all transaction kernels in the TxHashSet |
| - nonce | number | Nonce increment used to mine this block |
| - cuckoo_size | number | Size of the cuckoo graph |
| - edge_bits | number | Size of the cuckoo graph (2_log of number of edges) |
| - cuckoo_solution | []number | The Cuckoo solution for this block |
| - total_difficulty | number | Total accumulated difficulty since genesis block |
| - total_kernel_offset | string | Total kernel offset since genesis block |
@ -326,9 +329,9 @@ Retrieves details about specifics outputs. Supports retrieval of multiple output
* **URL**
/v1/chain/outputs/byids?id=x
/v1/chain/outputs/byids?id=x,y,z
/v1/chain/outputs/byids?id=x&id=y&id=z
* /v1/chain/outputs/byids?id=x
* /v1/chain/outputs/byids?id=x,y,z
* /v1/chain/outputs/byids?id=x&id=y&id=z
* **Method:**
@ -549,8 +552,8 @@ Retrieves the last n outputs inserted into the tree.
* **URL**
/v1/txhashset/lastoutputs (gets last 10)
/v1/txhashset/lastoutputs?n=x
* /v1/txhashset/lastoutputs (gets last 10)
* /v1/txhashset/lastoutputs?n=x
* **Method:**
@ -599,8 +602,8 @@ Retrieves the last n rangeproofs inserted in to the tree.
* **URL**
/v1/txhashset/lastrangeproofs (gets last 10)
/v1/txhashset/lastrangeproofs?n=x
* /v1/txhashset/lastrangeproofs (gets last 10)
* /v1/txhashset/lastrangeproofs?n=x
* **Method:**
@ -649,8 +652,8 @@ Retrieves the last n kernels inserted in to the tree.
* **URL**
/v1/txhashset/lastkernels (gets last 10)
/v1/txhashset/lastkernels?n=x
* /v1/txhashset/lastkernels (gets last 10)
* /v1/txhashset/lastkernels?n=x
* **Method:**
@ -1146,4 +1149,4 @@ Retrieves information about a specific peer.
console.log(r);
}
});
```
```

View file

@ -21,9 +21,8 @@ Attempt to update and retrieve outputs.
* **URL**
/v1/wallet/owner/retrieve_outputs
or
/v1/wallet/owner/retrieve_outputs?refresh&show_spent&tx_id=x&tx_id=y
* /v1/wallet/owner/retrieve_outputs
* /v1/wallet/owner/retrieve_outputs?refresh&show_spent&tx_id=x&tx_id=y
* **Method:**
@ -86,8 +85,8 @@ Attempt to update and retrieve outputs.
* **URL**
/v1/wallet/owner/retrieve_summary_info
/v1/wallet/owner/retrieve_summary_info?refresh
* /v1/wallet/owner/retrieve_summary_info
* /v1/wallet/owner/retrieve_summary_info?refresh
* **Method:**
@ -190,9 +189,8 @@ Return whether the outputs were validated against a node and an array of TxLogEn
* **URL**
/v1/wallet/owner/retrieve_txs
or
/v1/wallet/owner/retrieve_txs?refresh?id=x
*/v1/wallet/owner/retrieve_txs
*/v1/wallet/owner/retrieve_txs?refresh?id=x
* **Method:**

View file

@ -22,7 +22,7 @@ In this section, we detail each message and the potential response.
At any point, if miner the tries to do one of the following request (except login) and login is required, the miner will receive the following error message.
| Field | Content |
| ------------- |:---------------------------------------:|
| :------------ | :-------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | method sent by the miner |
@ -45,7 +45,7 @@ Example:
if the request is not one of the following, the stratum server will give this error response:
| Field | Content |
| ------------- |:--------------------------------------------:|
| :------------ | :------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | method sent by the miner |
@ -65,7 +65,7 @@ Example:
}
```
### ```getjobtemplate```
### `getjobtemplate`
A message initiated by the miner.
Miner can request a job with this message.
@ -73,7 +73,7 @@ Miner can request a job with this message.
#### Request
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "getjobtemplate" |
@ -82,14 +82,12 @@ Miner can request a job with this message.
Example:
``` JSON
{
"id":"2",
"jsonrpc":"2.0",
"method":"getjobtemplate",
"params":null
}
```
#### Response
@ -101,7 +99,6 @@ The response can be of two types:
Example:
``` JSON
{
"id":"0",
"jsonrpc":"2.0",
@ -113,15 +110,14 @@ Example:
"pre_pow":"00010000000000003c4d0171369781424b39c81eb39de10cdf4a7cc27bbc6769203c7c9bc02cc6a1dfc6000000005b50f8210000000000395f123c6856055aab2369fe325c3d709b129dee5c96f2db60cdbc0dc123a80cb0b89e883ae2614f8dbd169888a95c0513b1ac7e069de82e5d479cf838281f7838b4bf75ea7c9222a1ad7406a4cab29af4e018c402f70dc8e9ef3d085169391c78741c656ec0f11f62d41b463c82737970afaa431c5cabb9b759cdfa52d761ac451276084366d1ba9efff2db9ed07eec1bcd8da352b32227f452dfa987ad249f689d9780000000000000b9e00000000000009954"
}
}
```
```
##### Error response
If the node is syncing, it will send the following message:
| Field | Content |
| ------------- |:---------------------------------------------------------:|
| :------------ | :-------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "getjobtemplate" |
@ -141,7 +137,7 @@ Example:
}
```
### ```job```
### `job`
A message initiated by the Stratum server.
Stratum server will send job automatically to connected miners.
@ -150,16 +146,15 @@ The miner SHOULD interrupt current job if job_id = 0, and SHOULD replace the cur
#### Request
| Field | Content |
| ------------- |:-------------------------------------------------------------------------:|
| :------------ | :------------------------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "job" |
| params | Int ```difficulty```, ```height```, ```job_id``` and string ```pre_pow``` |
| params | Int `difficulty`, `height`, `job_id` and string `pre_pow` |
Example:
``` JSON
{
"id":"Stratum",
"jsonrpc":"2.0",
@ -171,21 +166,20 @@ Example:
"pre_pow":"00010000000000003ff723bc8c987b0c594794a0487e52260c5343288749c7e288de95a80afa558c5fb8000000005b51f15f00000000003cadef6a45edf92d2520bf45cbd4f36b5ef283c53d8266bbe9aa1b8daaa1458ce5578fcb0978b3995dd00e3bfc5a9277190bb9407a30d66aec26ff55a2b50214b22cdc1f3894f27374f568b2fe94d857b6b3808124888dd5eff7e8de7e451ac805a4ebd6551fa7a529a1b9f35f761719ed41bfef6ab081defc45a64a374dfd8321feac083741f29207b044071d93904986fa322df610e210c543c2f95522c9bdaef5f598000000000000c184000000000000a0cf"
}
}
```
#### Response
No response is required for this message.
### ```keepalive```
### `keepalive`
A message initiated by the miner in order to keep the connection alive.
#### Request
| Field | Content |
| ------------- |:----------------------:|
| :------------ | :--------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "keepalive" |
@ -194,20 +188,18 @@ A message initiated by the miner in order to keep the connection alive.
Example:
``` JSON
{
"id":"2",
"jsonrpc":"2.0",
"method":"keepalive",
"params":null
}
```
#### Response
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "keepalive" |
@ -217,7 +209,6 @@ Example:
Example:
``` JSON
{
"id":"2",
"jsonrpc":"2.0",
@ -225,10 +216,9 @@ Example:
"result":"ok",
"error":null
}
```
### ```login```
### `login`
***
@ -238,7 +228,7 @@ Miner can log in on a Grin Stratum server with a login, password and agent (usua
#### Request
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "login" |
@ -260,7 +250,6 @@ Example:
}
```
#### Response
The response can be of two types:
@ -268,7 +257,7 @@ The response can be of two types:
##### OK response
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "login" |
@ -278,7 +267,6 @@ The response can be of two types:
Example:
``` JSON
{
"id":"1",
"jsonrpc":"2.0",
@ -286,14 +274,13 @@ Example:
"result":"ok",
"error":null
}
```
##### Error response
Not yet implemented. Should return error -32500 "Login first".
Not yet implemented. Should return error -32500 "Login first" when login is required.
### ```status```
### `status`
A message initiated by the miner.
This message allows a miner to get the status of its current worker and the network.
@ -301,7 +288,7 @@ This message allows a miner to get the status of its current worker and the netw
#### Request
| Field | Content |
| ------------- |:----------------------:|
| :------------ | :--------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "status" |
@ -310,14 +297,12 @@ This message allows a miner to get the status of its current worker and the netw
Example:
``` JSON
{
"id":"2",
"jsonrpc":"2.0",
"method":"status",
"params":null
}
```
#### Response
@ -325,11 +310,11 @@ Example:
The response is the following:
| Field | Content |
| ------------- |:--------------------------------------------------------------------------------------------------------:|
| :------------ | :------------------------------------------------------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "status" |
| result | String ```id```. Integers ```height```, ```difficulty```, ```accepted```, ```rejected``` and ```stale``` |
| result | String `id`. Integers `height`, `difficulty`, `accepted`, `rejected` and `stale` |
| error | null |
Example:
@ -351,7 +336,7 @@ Example:
}
```
### ```submit```
### `submit`
A message initiated by the miner.
When a miner find a share, it will submit it to the node.
@ -361,21 +346,21 @@ When a miner find a share, it will submit it to the node.
The miner submit a solution to a job to the Stratum server.
| Field | Content |
| ------------- |:---------------------------------------------------------------------------:|
| :------------ | :-------------------------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
| params | Int ```nonce```, ```height```, ```job_id``` and array of integers ```pow``` |
| params | Int `edge_bits`,`nonce`, `height`, `job_id` and array of integers `pow` |
Example:
``` JSON
{
{
"id":"0",
"jsonrpc":"2.0",
"method":"submit",
"params":{
"params":{
"edge_bits":29,
"height":16419,
"job_id":0,
"nonce":8895699060858340771,
@ -384,7 +369,6 @@ Example:
]
}
}
```
#### Response
@ -393,10 +377,10 @@ The response can be of three types.
##### OK response
The share is accepted by the Stratum but is not a valid cuckoo solution at the network target difficulty.
The share is accepted by the Stratum but is not a valid cuck(at)oo solution at the network target difficulty.
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
@ -406,7 +390,6 @@ The share is accepted by the Stratum but is not a valid cuckoo solution at the n
Example:
``` JSON
{
"id":"2",
"jsonrpc":"2.0",
@ -414,15 +397,14 @@ Example:
"result":"ok",
"error":null
}
```
##### Blockfound response
The share is accepted by the Stratum and is a valid cuckoo solution at the network target difficulty.
The share is accepted by the Stratum and is a valid cuck(at)oo solution at the network target difficulty.
| Field | Content |
| ------------- |:------------------------------:|
| :------------ | :----------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
@ -432,7 +414,6 @@ The share is accepted by the Stratum and is a valid cuckoo solution at the netwo
Example:
``` JSON
{
"id":"6",
"jsonrpc":"2.0",
@ -440,7 +421,6 @@ Example:
"result":"blockfound - 23025af9032de812d15228121d5e4b0e977d30ad8036ab07131104787b9dcf10",
"error":null
}
```
##### Error response
@ -452,7 +432,7 @@ The error response can be of two types: stale and rejected.
The share is a valid solution to a previous job not the current one.
| Field | Content |
| ------------- |:---------------------------------------------------------:|
| :------------ | :-------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
@ -481,7 +461,7 @@ Two possibilities: the solution cannot be validated or the solution is of too lo
The submitted solution cannot be validated.
| Field | Content |
| ------------- |:---------------------------------------------------------:|
| :------------ | :-------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
@ -506,7 +486,7 @@ Example:
The submitted solution is of too low difficulty.
| Field | Content |
| ------------- |:----------------------------------------------------------------:|
| :------------ | :--------------------------------------------------------------- |
| id | ID of the request |
| jsonrpc | "2.0" |
| method | "submit" |
@ -531,7 +511,7 @@ Example:
Grin Stratum protocol implementation contains the following error message:
| Error code | Error Message |
| ----------- |:--------------------------------------:|
| :---------- | :------------------------------------- |
| -32000 | Node is syncing - please wait |
| -32500 | Login first |
| -32501 | Share rejected due to low difficulty |

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

After

Width:  |  Height:  |  Size: 154 KiB

View file

@ -2,7 +2,7 @@
title
**Current Grin Tranaction Workflow**
Accurate as of Aug 1, 2018 - Master branch only
Accurate as of Oct 10, 2018 - Master branch only
end title
actor "Sender" as sender
@ -44,7 +44,7 @@ note right of recipient
4: Calculate message **M** = **fee | lock_height **
5: Choose random nonce **kR** (private scalar)
6: Multiply **xR** and **kR** by generator G to create public curve points **xRG** and **kRG**
7: Compute Schnorr challenge **e** = SHA256(**M** | **kRG** + **kSG**)
7: Compute Schnorr challenge **e** = SHA256(**kRG** + **kSG** | **xRG** + **xSG** | **M**)
8: Compute Recipient Schnorr signature **sR** = **kR** + **e** * **xR**
9: Add **sR, xRG, kRG** to **Slate**
10: Create wallet output function **rF** that stores **receiver_output** in wallet with status "Unconfirmed"
@ -61,10 +61,10 @@ end
== Finalize Transaction ==
note left of sender
1: Calculate message **M** = **fee | lock_height **
2: Compute Schnorr challenge **e** = SHA256(**M** | **kRG** + **kSG**)
2: Compute Schnorr challenge **e** = SHA256(**kRG** + **kSG** | **xRG** + **xSG** | **M**)
3: Verify **sR** by verifying **kRG** + **e** * **xRG** = **sRG**
4: Compute Sender Schnorr signature **sS** = **kS** + **e** * **xS**
5: Calculate final signature **s** = (**sS**+**sR**, **kSG**+**kRG**)
5: Calculate final signature **s** = (**kSG**+**kRG**, **sS**+**sR**)
6: Calculate public key for **s**: **xG** = **xRG** + **xSG**
7: Verify **s** against excess values in final transaction using **xG**
8: Create Transaction Kernel Containing:

View file

@ -33,11 +33,22 @@ Logging configuration for the wallet is read from `grin-wallet.toml`.
#### Switches common to all wallet commands
### Wallet Account
The wallet supports multiple accounts. To set the active account for a wallet command, use the '-a' switch, e.g:
```
[host]$ grin wallet -a account_1 info
```
All output creation, transaction building, and querying is done against a particular account in the wallet.
If the '-a' switch is not provided for a command, the account named 'default' is used.
##### Grin Node Address
The wallet generally needs to talk to a running grin node in order to remain up-to-date and verify its contents. By default, the wallet
tries to contact a node at `127.0.0.1:13413`. To change this, modify the value in the wallet's `grin_wallet.toml` file. Alternatively,
you can provide the `-a` switch to the wallet command, e.g.:
you can provide the `-r` (seRver) switch to the wallet command, e.g.:
```sh
[host]$ grin wallet -a "http://192.168.0.2:1341" info
@ -79,6 +90,27 @@ This will create a `grin-wallet.toml` file in the current directory configured t
as well as all needed data files. When running any `grin wallet` command, grin will check the current directory to see if
a `grin-wallet.toml` file exists. If not it will use the default in `~/.grin`
### account
To create a new account, use the 'grin wallet account' command with the argument '-c', e.g.:
```
[host]$ grin wallet account -c my_account
```
This will create a new account called 'my_account'. To use this account in subsequent commands, provide the '-a' flag to
all wallet commands:
```
[host]$ grin wallet -a my_account info
```
To display a list of created accounts in the wallet, use the 'account' command with no flags:
```
[host]$ grin wallet -a my_account info
```
### info
A summary of the wallet's contents can be retrieved from the wallet using the `info` command. Note that the `Total` sum may appear
@ -86,7 +118,7 @@ inflated if you have a lot of unconfirmed outputs in your wallet (especially one
who then never it by posting to the chain). `Currently Spendable` is the most accurate field to look at here.
```sh
____ Wallet Summary Info as of 49 ____
____ Wallet Summary Info - Account 'default' as of 49 ____
Total | 3000.000000000
Awaiting Confirmation | 60.000000000
@ -177,7 +209,7 @@ Simply displays all the the outputs in your wallet: e.g:
```sh
[host]$ grin wallet outputs
Wallet Outputs - Block Height: 49
Wallet Outputs - Account 'default' - Block Height: 49
------------------------------------------------------------------------------------------------------------------------------------------------
Key Id Child Key Index Block Height Locked Until Status Is Coinbase? Num. of Confirmations Value Transaction
================================================================================================================================================
@ -209,8 +241,7 @@ transaction log, use the `txs`
```sh
[host]$ grin wallet txs
Transaction Log - Block Height: 49
Transaction Log - Account 'default' - Block Height: 49
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Id Type Shared Transaction Id Creation Time Confirmed? Confirmation Time Num. Inputs Num. Outputs Amount Credited Amount Debited Fee Net Difference
==========================================================================================================================================================================================================================================
@ -226,13 +257,13 @@ Transaction Log - Block Height: 49
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
6 Received Tx 03715cf6-f29b-4a3a-bda5-b02cba6bf0d9 2018-07-20 19:46:46.120244904 UTC false None 0 1 60.000000000 0.000000000 None 60.000000000
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
```
>>>>>>> master
To see the inputs/outputs associated with a particular transaction, use the `-i` switch providing the Id of the given transaction, e.g:
```sh
[host]$ grin wallet txs -i 6
Transaction Log - Block Height: 49
Transaction Log - Account 'default' - Block Height: 49
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Id Type Shared Transaction Id Creation Time Confirmed? Confirmation Time Num. Inputs Num. Outputs Amount Credited Amount Debited Fee Net Difference
===========================================================================================================================================================================================================
@ -263,7 +294,7 @@ Running against the data above:
```sh
[host]$ grin wallet cancel -i 6
[host]$ grin wallet txs -i 6
Transaction Log - Block Height: 49
Transaction Log - Account 'default' - Block Height: 49
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Id Type Shared Transaction Id Creation Time Confirmed? Confirmation Time Num. Inputs Num. Outputs Amount Credited Amount Debited Fee Net Difference
=======================================================================================================================================================================================================================
@ -326,4 +357,4 @@ grin wallet restore
```
Note this operation can potentially take a long time. Once it's done, your wallet outputs should be restored, and you can
transact with your restored wallet as before the backup.
transact with your restored wallet as before the backup.

View file

@ -1,6 +1,6 @@
[package]
name = "grin_keychain"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = '..'
publish = false

View file

@ -1,192 +0,0 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use blake2::blake2b::blake2b;
use byteorder::{BigEndian, ByteOrder};
use types::{Error, Identifier};
use util::secp::key::SecretKey;
use util::secp::Secp256k1;
#[derive(Debug, Clone)]
pub struct ChildKey {
/// Child number of the key (n derivations)
pub n_child: u32,
/// Root key id
pub root_key_id: Identifier,
/// Key id
pub key_id: Identifier,
/// The private key
pub key: SecretKey,
}
/// An ExtendedKey is a secret key which can be used to derive new
/// secret keys to blind the commitment of a transaction output.
/// To be usable, a secret key should have an amount assigned to it,
/// but when the key is derived, the amount is not known and must be
/// given.
#[derive(Debug, Clone)]
pub struct ExtendedKey {
/// Child number of the extended key
pub n_child: u32,
/// Root key id
pub root_key_id: Identifier,
/// Key id
pub key_id: Identifier,
/// The secret key
pub key: SecretKey,
/// The chain code for the key derivation chain
pub chain_code: [u8; 32],
}
impl ExtendedKey {
/// Creates a new extended master key from a seed
pub fn from_seed(secp: &Secp256k1, seed: &[u8]) -> Result<ExtendedKey, Error> {
match seed.len() {
16 | 32 | 64 => (),
_ => {
return Err(Error::KeyDerivation(
"seed size must be 128, 256 or 512".to_owned(),
))
}
}
let derived = blake2b(64, b"Grin/MW Seed", seed);
let slice = derived.as_bytes();
let key =
SecretKey::from_slice(&secp, &slice[0..32]).expect("Error deriving key (from_slice)");
let mut chain_code: [u8; 32] = Default::default();
(&mut chain_code).copy_from_slice(&slice[32..64]);
let key_id = Identifier::from_secret_key(secp, &key)?;
let ext_key = ExtendedKey {
n_child: 0,
root_key_id: key_id.clone(),
key_id: key_id.clone(),
// key and extended chain code for the key itself
key,
chain_code,
};
Ok(ext_key)
}
/// Derive a child key from this extended key
pub fn derive(&self, secp: &Secp256k1, n: u32) -> Result<ChildKey, Error> {
let mut n_bytes: [u8; 4] = [0; 4];
BigEndian::write_u32(&mut n_bytes, n);
let mut seed = self.key[..].to_vec();
seed.extend_from_slice(&n_bytes);
// only need a 32 byte digest here as we only need the bytes for the key itself
// we do not need additional bytes for a derived (and unused) chain code
let derived = blake2b(32, &self.chain_code[..], &seed[..]);
let mut key = SecretKey::from_slice(&secp, &derived.as_bytes()[..])
.expect("Error deriving key (from_slice)");
key.add_assign(secp, &self.key)
.expect("Error deriving key (add_assign)");
let key_id = Identifier::from_secret_key(secp, &key)?;
Ok(ChildKey {
n_child: n,
root_key_id: self.root_key_id.clone(),
key_id,
key,
})
}
}
#[cfg(test)]
mod test {
use serde_json;
use super::{ExtendedKey, Identifier};
use util;
use util::secp::key::SecretKey;
use util::secp::Secp256k1;
fn from_hex(hex_str: &str) -> Vec<u8> {
util::from_hex(hex_str.to_string()).unwrap()
}
#[test]
fn test_identifier_json_ser_deser() {
let hex = "942b6c0bd43bdcb24f3edfe7fadbc77054ecc4f2";
let identifier = Identifier::from_hex(hex).unwrap();
#[derive(Debug, Serialize, Deserialize, PartialEq)]
struct HasAnIdentifier {
identifier: Identifier,
}
let has_an_identifier = HasAnIdentifier { identifier };
let json = serde_json::to_string(&has_an_identifier).unwrap();
assert_eq!(json, "{\"identifier\":\"942b6c0bd43bdcb24f3e\"}");
let deserialized: HasAnIdentifier = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized, has_an_identifier);
}
#[test]
fn extkey_from_seed() {
// TODO More test vectors
let s = Secp256k1::new();
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let sec = from_hex("2878a92133b0a7c2fbfb0bd4520ed2e55ea3fa2913200f05c30077d30b193480");
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let chain_code =
from_hex("3ad40dd836c5ce25dfcbdee5044d92cf6b65bd5475717fa7a56dd4a032cca7c0");
let identifier = from_hex("6f7c1a053ca54592e783");
let n_child = 0;
assert_eq!(extk.key, secret_key);
assert_eq!(extk.key_id, Identifier::from_bytes(identifier.as_slice()));
assert_eq!(
extk.root_key_id,
Identifier::from_bytes(identifier.as_slice())
);
assert_eq!(extk.chain_code, chain_code.as_slice());
assert_eq!(extk.n_child, n_child);
}
#[test]
fn extkey_derivation() {
let s = Secp256k1::new();
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let derived = extk.derive(&s, 0).unwrap();
let sec = from_hex("55f1a2b67ec58933bf954fdc721327afe486e8989af923c3ae298e45a84ef597");
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let root_key_id = from_hex("6f7c1a053ca54592e783");
let identifier = from_hex("8fa188b56cefe66be154");
let n_child = 0;
assert_eq!(derived.key, secret_key);
assert_eq!(
derived.key_id,
Identifier::from_bytes(identifier.as_slice())
);
assert_eq!(
derived.root_key_id,
Identifier::from_bytes(root_key_id.as_slice())
);
assert_eq!(derived.n_child, n_child);
}
}

View file

@ -88,30 +88,30 @@ pub trait BIP32Hasher {
}
/// Implementation of the above that uses the standard BIP32 Hash algorithms
pub struct BIP32ReferenceHasher {
pub struct BIP32GrinHasher {
hmac_sha512: Hmac<Sha512>,
}
impl BIP32ReferenceHasher {
impl BIP32GrinHasher {
/// New empty hasher
pub fn new() -> BIP32ReferenceHasher {
BIP32ReferenceHasher {
pub fn new() -> BIP32GrinHasher {
BIP32GrinHasher {
hmac_sha512: HmacSha512::new(GenericArray::from_slice(&[0u8; 128])),
}
}
}
impl BIP32Hasher for BIP32ReferenceHasher {
impl BIP32Hasher for BIP32GrinHasher {
fn network_priv() -> [u8; 4] {
// bitcoin network (xprv) (for test vectors)
[0x04, 0x88, 0xAD, 0xE4]
// gprv
[0x03, 0x3C, 0x04, 0xA4]
}
fn network_pub() -> [u8; 4] {
// bitcoin network (xpub) (for test vectors)
[0x04, 0x88, 0xB2, 0x1E]
// gpub
[0x03, 0x3C, 0x08, 0xDF]
}
fn master_seed() -> [u8; 12] {
b"Bitcoin seed".to_owned()
b"IamVoldemort".to_owned()
}
fn init_sha512(&mut self, seed: &[u8]) {
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");;
@ -175,7 +175,7 @@ pub struct ExtendedPubKey {
}
/// A child number for a derived key
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum ChildNumber {
/// Non-hardened key
Normal {
@ -409,8 +409,7 @@ impl ExtendedPrivKey {
hasher.append_sha512(&be_n);
let result = hasher.result_sha512();
let mut sk = SecretKey::from_slice(secp, &result[..32]).map_err(Error::Ecdsa)?;
sk.add_assign(secp, &self.secret_key)
.map_err(Error::Ecdsa)?;
sk.add_assign(secp, &self.secret_key).map_err(Error::Ecdsa)?;
Ok(ExtendedPrivKey {
network: self.network,
@ -653,11 +652,66 @@ mod tests {
use util::from_hex;
use util::secp::Secp256k1;
use super::ChildNumber::{Hardened, Normal};
use super::Error;
use super::{ChildNumber, ExtendedPrivKey, ExtendedPubKey};
use super::*;
use super::BIP32ReferenceHasher;
use digest::generic_array::GenericArray;
use digest::Digest;
use hmac::{Hmac, Mac};
use ripemd160::Ripemd160;
use sha2::{Sha256, Sha512};
/// Implementation of the above that uses the standard BIP32 Hash algorithms
pub struct BIP32ReferenceHasher {
hmac_sha512: Hmac<Sha512>,
}
impl BIP32ReferenceHasher {
/// New empty hasher
pub fn new() -> BIP32ReferenceHasher {
BIP32ReferenceHasher {
hmac_sha512: HmacSha512::new(GenericArray::from_slice(&[0u8; 128])),
}
}
}
impl BIP32Hasher for BIP32ReferenceHasher {
fn network_priv() -> [u8; 4] {
// bitcoin network (xprv) (for test vectors)
[0x04, 0x88, 0xAD, 0xE4]
}
fn network_pub() -> [u8; 4] {
// bitcoin network (xpub) (for test vectors)
[0x04, 0x88, 0xB2, 0x1E]
}
fn master_seed() -> [u8; 12] {
b"Bitcoin seed".to_owned()
}
fn init_sha512(&mut self, seed: &[u8]) {
self.hmac_sha512 = HmacSha512::new_varkey(seed).expect("HMAC can take key of any size");;
}
fn append_sha512(&mut self, value: &[u8]) {
self.hmac_sha512.input(value);
}
fn result_sha512(&mut self) -> [u8; 64] {
let mut result = [0; 64];
result.copy_from_slice(self.hmac_sha512.result().code().as_slice());
result
}
fn sha_256(&self, input: &[u8]) -> [u8; 32] {
let mut sha2_res = [0; 32];
let mut sha2 = Sha256::new();
sha2.input(input);
sha2_res.copy_from_slice(sha2.result().as_slice());
sha2_res
}
fn ripemd_160(&self, input: &[u8]) -> [u8; 20] {
let mut ripemd_res = [0; 20];
let mut ripemd = Ripemd160::new();
ripemd.input(input);
ripemd_res.copy_from_slice(ripemd.result().as_slice());
ripemd_res
}
}
fn test_path(
secp: &Secp256k1,
@ -694,12 +748,12 @@ mod tests {
for &num in path.iter() {
sk = sk.ckd_priv(secp, &mut h, num).unwrap();
match num {
Normal { .. } => {
ChildNumber::Normal { .. } => {
let pk2 = pk.ckd_pub(secp, &mut h, num).unwrap();
pk = ExtendedPubKey::from_private::<BIP32ReferenceHasher>(secp, &sk);
assert_eq!(pk, pk2);
}
Hardened { .. } => {
ChildNumber::Hardened { .. } => {
assert_eq!(
pk.ckd_pub(secp, &mut h, num),
Err(Error::CannotDeriveFromHardenedKey)

View file

@ -16,14 +16,11 @@
/// scheme.
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use blake2;
use extkey;
use types::{BlindSum, BlindingFactor, Error, Identifier, Keychain};
use util::logger::LOGGER;
use extkey_bip32::{BIP32GrinHasher, ExtendedPrivKey};
use types::{BlindSum, BlindingFactor, Error, ExtKeychainPath, Identifier, Keychain};
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::secp::{self, Message, Secp256k1, Signature};
@ -31,20 +28,17 @@ use util::secp::{self, Message, Secp256k1, Signature};
#[derive(Clone, Debug)]
pub struct ExtKeychain {
secp: Secp256k1,
extkey: extkey::ExtendedKey,
key_overrides: HashMap<Identifier, SecretKey>,
key_derivation_cache: Arc<RwLock<HashMap<Identifier, u32>>>,
master: ExtendedPrivKey,
}
impl Keychain for ExtKeychain {
fn from_seed(seed: &[u8]) -> Result<ExtKeychain, Error> {
let mut h = BIP32GrinHasher::new();
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
let extkey = extkey::ExtendedKey::from_seed(&secp, seed)?;
let master = ExtendedPrivKey::new_master(&secp, &mut h, seed)?;
let keychain = ExtKeychain {
secp: secp,
extkey: extkey,
key_overrides: HashMap::new(),
key_derivation_cache: Arc::new(RwLock::new(HashMap::new())),
master: master,
};
Ok(keychain)
}
@ -56,39 +50,27 @@ impl Keychain for ExtKeychain {
ExtKeychain::from_seed(seed.as_bytes())
}
fn root_key_id(&self) -> Identifier {
self.extkey.root_key_id.clone()
fn root_key_id() -> Identifier {
ExtKeychainPath::new(0, 0, 0, 0, 0).to_identifier()
}
fn derive_key_id(&self, derivation: u32) -> Result<Identifier, Error> {
let child_key = self.extkey.derive(&self.secp, derivation)?;
Ok(child_key.key_id)
fn derive_key_id(depth: u8, d1: u32, d2: u32, d3: u32, d4: u32) -> Identifier {
ExtKeychainPath::new(depth, d1, d2, d3, d4).to_identifier()
}
fn derived_key(&self, key_id: &Identifier) -> Result<SecretKey, Error> {
// first check our overrides and just return the key if we have one in there
if let Some(key) = self.key_overrides.get(key_id) {
trace!(
LOGGER,
"... Derived Key (using override) key_id: {}",
key_id
);
return Ok(*key);
fn derive_key(&self, id: &Identifier) -> Result<ExtendedPrivKey, Error> {
let mut h = BIP32GrinHasher::new();
let p = id.to_path();
let mut sk = self.master;
for i in 0..p.depth {
sk = sk.ckd_priv(&self.secp, &mut h, p.path[i as usize])?;
}
let child_key = self.derived_child_key(key_id)?;
Ok(child_key.key)
Ok(sk)
}
fn commit(&self, amount: u64, key_id: &Identifier) -> Result<Commitment, Error> {
let skey = self.derived_key(key_id)?;
let commit = self.secp.commit(amount, skey)?;
Ok(commit)
}
fn commit_with_key_index(&self, amount: u64, derivation: u32) -> Result<Commitment, Error> {
let child_key = self.derived_key_from_index(derivation)?;
let commit = self.secp.commit(amount, child_key.key)?;
fn commit(&self, amount: u64, id: &Identifier) -> Result<Commitment, Error> {
let key = self.derive_key(id)?;
let commit = self.secp.commit(amount, key.secret_key)?;
Ok(commit)
}
@ -96,13 +78,27 @@ impl Keychain for ExtKeychain {
let mut pos_keys: Vec<SecretKey> = blind_sum
.positive_key_ids
.iter()
.filter_map(|k| self.derived_key(&k).ok())
.filter_map(|k| {
let res = self.derive_key(&Identifier::from_path(&k));
if let Ok(s) = res {
Some(s.secret_key)
} else {
None
}
})
.collect();
let mut neg_keys: Vec<SecretKey> = blind_sum
.negative_key_ids
.iter()
.filter_map(|k| self.derived_key(&k).ok())
.filter_map(|k| {
let res = self.derive_key(&Identifier::from_path(&k));
if let Ok(s) = res {
Some(s.secret_key)
} else {
None
}
})
.collect();
pos_keys.extend(
@ -125,9 +121,9 @@ impl Keychain for ExtKeychain {
Ok(BlindingFactor::from_secret_key(sum))
}
fn sign(&self, msg: &Message, key_id: &Identifier) -> Result<Signature, Error> {
let skey = self.derived_key(key_id)?;
let sig = self.secp.sign(msg, &skey)?;
fn sign(&self, msg: &Message, id: &Identifier) -> Result<Signature, Error> {
let skey = self.derive_key(id)?;
let sig = self.secp.sign(msg, &skey.secret_key)?;
Ok(sig)
}
@ -146,82 +142,10 @@ impl Keychain for ExtKeychain {
}
}
impl ExtKeychain {
// For tests and burn only, associate a key identifier with a known secret key.
pub fn burn_enabled(keychain: &ExtKeychain, burn_key_id: &Identifier) -> ExtKeychain {
let mut key_overrides = HashMap::new();
key_overrides.insert(
burn_key_id.clone(),
SecretKey::from_slice(&keychain.secp, &[1; 32]).unwrap(),
);
ExtKeychain {
key_overrides: key_overrides,
..keychain.clone()
}
}
fn derived_child_key(&self, key_id: &Identifier) -> Result<extkey::ChildKey, Error> {
trace!(LOGGER, "Derived Key by key_id: {}", key_id);
// then check the derivation cache to see if we have previously derived this key
// if so use the derivation from the cache to derive the key
{
let cache = self.key_derivation_cache.read().unwrap();
if let Some(derivation) = cache.get(key_id) {
trace!(
LOGGER,
"... Derived Key (cache hit) key_id: {}, derivation: {}",
key_id,
derivation
);
return Ok(self.derived_key_from_index(*derivation)?);
}
}
// otherwise iterate over a large number of derivations looking for our key
// cache the resulting derivations by key_id for faster lookup later
// TODO - remove hard limit (within reason)
// TODO - do we benefit here if we track our max known n_child?
{
let mut cache = self.key_derivation_cache.write().unwrap();
for i in 1..100_000 {
let child_key = self.extkey.derive(&self.secp, i)?;
// let child_key_id = extkey.identifier(&self.secp)?;
if !cache.contains_key(&child_key.key_id) {
trace!(
LOGGER,
"... Derived Key (cache miss) key_id: {}, derivation: {}",
child_key.key_id,
child_key.n_child,
);
cache.insert(child_key.key_id.clone(), child_key.n_child);
}
if child_key.key_id == *key_id {
return Ok(child_key);
}
}
}
Err(Error::KeyDerivation(format!(
"failed to derive child_key for {:?}",
key_id
)))
}
// if we know the derivation index we can just straight to deriving the key
fn derived_key_from_index(&self, derivation: u32) -> Result<extkey::ChildKey, Error> {
trace!(LOGGER, "Derived Key (fast) by derivation: {}", derivation);
let child_key = self.extkey.derive(&self.secp, derivation)?;
return Ok(child_key);
}
}
#[cfg(test)]
mod test {
use keychain::ExtKeychain;
use types::{BlindSum, BlindingFactor, Keychain};
use types::{BlindSum, BlindingFactor, ExtKeychainPath, Keychain};
use util::secp;
use util::secp::key::SecretKey;
@ -230,8 +154,8 @@ mod test {
let keychain = ExtKeychain::from_random_seed().unwrap();
let secp = keychain.secp();
// use the keychain to derive a "key_id" based on the underlying seed
let key_id = keychain.derive_key_id(1).unwrap();
let path = ExtKeychainPath::new(1, 1, 0, 0, 0);
let key_id = path.to_identifier();
let msg_bytes = [0; 32];
let msg = secp::Message::from_slice(&msg_bytes[..]).unwrap();
@ -296,7 +220,8 @@ mod test {
&BlindSum::new()
.add_blinding_factor(BlindingFactor::from_secret_key(skey1))
.add_blinding_factor(BlindingFactor::from_secret_key(skey2))
).unwrap(),
)
.unwrap(),
BlindingFactor::from_secret_key(skey3),
);
}

View file

@ -22,20 +22,21 @@ extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate digest;
extern crate hmac;
extern crate ripemd160;
extern crate serde_json;
extern crate sha2;
extern crate slog;
extern crate uuid;
mod base58;
pub mod extkey;
pub mod extkey_bip32;
mod types;
pub mod keychain;
pub use extkey_bip32::ChildNumber;
pub use keychain::ExtKeychain;
pub use types::{BlindSum, BlindingFactor, Error, Identifier, Keychain, IDENTIFIER_SIZE};
pub use types::{
BlindSum, BlindingFactor, Error, ExtKeychainPath, Identifier, Keychain, IDENTIFIER_SIZE,
};

View file

@ -14,6 +14,7 @@
use rand::thread_rng;
use std::cmp::min;
use std::io::Cursor;
use std::ops::Add;
/// Keychain trait and its main supporting types. The Identifier is a
/// semi-opaque structure (just bytes) to track keys within the Keychain.
@ -22,7 +23,8 @@ use std::ops::Add;
use std::{error, fmt};
use blake2::blake2b::blake2b;
use serde::{de, ser};
use extkey_bip32::{self, ChildNumber, ExtendedPrivKey};
use serde::{de, ser}; //TODO: Convert errors to use ErrorKind
use util;
use util::secp::constants::SECRET_KEY_SIZE;
@ -31,13 +33,15 @@ use util::secp::pedersen::Commitment;
use util::secp::{self, Message, Secp256k1, Signature};
use util::static_secp_instance;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
// Size of an identifier in bytes
pub const IDENTIFIER_SIZE: usize = 10;
pub const IDENTIFIER_SIZE: usize = 17;
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Error {
Secp(secp::Error),
KeyDerivation(String),
KeyDerivation(extkey_bip32::Error),
Transaction(String),
RangeProof(String),
}
@ -48,6 +52,12 @@ impl From<secp::Error> for Error {
}
}
impl From<extkey_bip32::Error> for Error {
fn from(e: extkey_bip32::Error) -> Error {
Error::KeyDerivation(e)
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
@ -108,6 +118,42 @@ impl Identifier {
Identifier::from_bytes(&[0; IDENTIFIER_SIZE])
}
pub fn from_path(path: &ExtKeychainPath) -> Identifier {
path.to_identifier()
}
pub fn to_path(&self) -> ExtKeychainPath {
ExtKeychainPath::from_identifier(&self)
}
/// output the path itself, for insertion into bulletproof
/// recovery processes can grind through possiblities to find the
/// correct length if required
pub fn serialize_path(&self) -> [u8; IDENTIFIER_SIZE - 1] {
let mut retval = [0u8; IDENTIFIER_SIZE - 1];
retval.copy_from_slice(&self.0[1..IDENTIFIER_SIZE]);
retval
}
/// restore from a serialized path
pub fn from_serialized_path(len: u8, p: &[u8]) -> Identifier {
let mut id = [0; IDENTIFIER_SIZE];
id[0] = len;
for i in 1..IDENTIFIER_SIZE {
id[i] = p[i - 1];
}
Identifier(id)
}
/// Return the parent path
pub fn parent_path(&self) -> Identifier {
let mut p = ExtKeychainPath::from_identifier(&self);
if p.depth > 0 {
p.path[p.depth as usize - 1] = ChildNumber::from(0);
p.depth = p.depth - 1;
}
Identifier::from_path(&p)
}
pub fn from_bytes(bytes: &[u8]) -> Identifier {
let mut identifier = [0; IDENTIFIER_SIZE];
for i in 0..min(IDENTIFIER_SIZE, bytes.len()) {
@ -142,6 +188,15 @@ impl Identifier {
pub fn to_hex(&self) -> String {
util::to_hex(self.0.to_vec())
}
pub fn to_bip_32_string(&self) -> String {
let p = ExtKeychainPath::from_identifier(&self);
let mut retval = String::from("m");
for i in 0..p.depth {
retval.push_str(&format!("/{}", <u32>::from(p.path[i as usize])));
}
retval
}
}
impl AsRef<[u8]> for Identifier {
@ -272,8 +327,8 @@ pub struct SplitBlindingFactor {
/// factor as well as the "sign" with which they should be combined.
#[derive(Clone, Debug, PartialEq)]
pub struct BlindSum {
pub positive_key_ids: Vec<Identifier>,
pub negative_key_ids: Vec<Identifier>,
pub positive_key_ids: Vec<ExtKeychainPath>,
pub negative_key_ids: Vec<ExtKeychainPath>,
pub positive_blinding_factors: Vec<BlindingFactor>,
pub negative_blinding_factors: Vec<BlindingFactor>,
}
@ -289,13 +344,13 @@ impl BlindSum {
}
}
pub fn add_key_id(mut self, key_id: Identifier) -> BlindSum {
self.positive_key_ids.push(key_id);
pub fn add_key_id(mut self, path: ExtKeychainPath) -> BlindSum {
self.positive_key_ids.push(path);
self
}
pub fn sub_key_id(mut self, key_id: Identifier) -> BlindSum {
self.negative_key_ids.push(key_id);
pub fn sub_key_id(mut self, path: ExtKeychainPath) -> BlindSum {
self.negative_key_ids.push(path);
self
}
@ -312,16 +367,78 @@ impl BlindSum {
}
}
/// Encapsulates a max 4-level deep BIP32 path, which is the
/// most we can currently fit into a rangeproof message
#[derive(Copy, Clone, PartialEq, Eq, Debug, Deserialize)]
pub struct ExtKeychainPath {
pub depth: u8,
pub path: [extkey_bip32::ChildNumber; 4],
}
impl ExtKeychainPath {
/// Return a new chain path with given derivation and depth
pub fn new(depth: u8, d0: u32, d1: u32, d2: u32, d3: u32) -> ExtKeychainPath {
ExtKeychainPath {
depth: depth,
path: [
ChildNumber::from(d0),
ChildNumber::from(d1),
ChildNumber::from(d2),
ChildNumber::from(d3),
],
}
}
/// from an Indentifier [manual deserialization]
pub fn from_identifier(id: &Identifier) -> ExtKeychainPath {
let mut rdr = Cursor::new(id.0.to_vec());
ExtKeychainPath {
depth: rdr.read_u8().unwrap(),
path: [
ChildNumber::from(rdr.read_u32::<BigEndian>().unwrap()),
ChildNumber::from(rdr.read_u32::<BigEndian>().unwrap()),
ChildNumber::from(rdr.read_u32::<BigEndian>().unwrap()),
ChildNumber::from(rdr.read_u32::<BigEndian>().unwrap()),
],
}
}
/// to an Identifier [manual serialization]
pub fn to_identifier(&self) -> Identifier {
let mut wtr = vec![];
wtr.write_u8(self.depth).unwrap();
wtr.write_u32::<BigEndian>(<u32>::from(self.path[0]))
.unwrap();
wtr.write_u32::<BigEndian>(<u32>::from(self.path[1]))
.unwrap();
wtr.write_u32::<BigEndian>(<u32>::from(self.path[2]))
.unwrap();
wtr.write_u32::<BigEndian>(<u32>::from(self.path[3]))
.unwrap();
let mut retval = [0u8; IDENTIFIER_SIZE];
retval.copy_from_slice(&wtr[0..IDENTIFIER_SIZE]);
Identifier(retval)
}
/// Last part of the path (for last n_child)
pub fn last_path_index(&self) -> u32 {
if self.depth == 0 {
0
} else {
<u32>::from(self.path[self.depth as usize - 1])
}
}
}
pub trait Keychain: Sync + Send + Clone {
fn from_seed(seed: &[u8]) -> Result<Self, Error>;
fn from_random_seed() -> Result<Self, Error>;
fn root_key_id(&self) -> Identifier;
fn derive_key_id(&self, derivation: u32) -> Result<Identifier, Error>;
fn derived_key(&self, key_id: &Identifier) -> Result<SecretKey, Error>;
fn commit(&self, amount: u64, key_id: &Identifier) -> Result<Commitment, Error>;
fn commit_with_key_index(&self, amount: u64, derivation: u32) -> Result<Commitment, Error>;
fn root_key_id() -> Identifier;
fn derive_key_id(depth: u8, d1: u32, d2: u32, d3: u32, d4: u32) -> Identifier;
fn derive_key(&self, id: &Identifier) -> Result<ExtendedPrivKey, Error>;
fn commit(&self, amount: u64, id: &Identifier) -> Result<Commitment, Error>;
fn blind_sum(&self, blind_sum: &BlindSum) -> Result<BlindingFactor, Error>;
fn sign(&self, msg: &Message, key_id: &Identifier) -> Result<Signature, Error>;
fn sign(&self, msg: &Message, id: &Identifier) -> Result<Signature, Error>;
fn sign_with_blinding(&self, &Message, &BlindingFactor) -> Result<Signature, Error>;
fn secp(&self) -> &Secp256k1;
}
@ -330,7 +447,7 @@ pub trait Keychain: Sync + Send + Clone {
mod test {
use rand::thread_rng;
use types::BlindingFactor;
use types::{BlindingFactor, ExtKeychainPath, Identifier};
use util::secp::key::{SecretKey, ZERO_KEY};
use util::secp::Secp256k1;
@ -361,4 +478,34 @@ mod test {
assert_eq!(skey_in, skey_out);
}
// Check path identifiers
#[test]
fn path_identifier() {
let path = ExtKeychainPath::new(4, 1, 2, 3, 4);
let id = Identifier::from_path(&path);
let ret_path = id.to_path();
assert_eq!(path, ret_path);
let path = ExtKeychainPath::new(
1,
<u32>::max_value(),
<u32>::max_value(),
3,
<u32>::max_value(),
);
let id = Identifier::from_path(&path);
let ret_path = id.to_path();
assert_eq!(path, ret_path);
println!("id: {:?}", id);
println!("ret_path {:?}", ret_path);
let path = ExtKeychainPath::new(3, 0, 0, 10, 0);
let id = Identifier::from_path(&path);
let parent_id = id.parent_path();
let expected_path = ExtKeychainPath::new(2, 0, 0, 0, 0);
let expected_id = Identifier::from_path(&expected_path);
assert_eq!(expected_id, parent_id);
}
}

View file

@ -1,6 +1,6 @@
[package]
name = "grin_p2p"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -142,6 +142,9 @@ impl<'a> Response<'a> {
}
}
pub const SEND_CHANNEL_CAP: usize = 10;
// TODO count sent and received
pub struct Tracker {
/// Bytes we've sent.
pub sent_bytes: Arc<RwLock<u64>>,
@ -180,7 +183,7 @@ pub fn listen<H>(stream: TcpStream, handler: H) -> Tracker
where
H: MessageHandler,
{
let (send_tx, send_rx) = mpsc::sync_channel(10);
let (send_tx, send_rx) = mpsc::sync_channel(SEND_CHANNEL_CAP);
let (close_tx, close_rx) = mpsc::channel();
let (error_tx, error_rx) = mpsc::channel();

View file

@ -50,6 +50,7 @@ mod serv;
mod store;
pub mod types;
pub use conn::SEND_CHANNEL_CAP;
pub use peer::Peer;
pub use peers::Peers;
pub use serv::{DummyAdapter, Server};

View file

@ -35,7 +35,7 @@ pub const PROTOCOL_VERSION: u32 = 1;
pub const USER_AGENT: &'static str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION"));
/// Magic number expected in the header of every message
const MAGIC: [u8; 2] = [0x1e, 0xc5];
const MAGIC: [u8; 2] = [0x54, 0x34];
/// Size in bytes of a message header
pub const HEADER_LEN: u64 = 11;

View file

@ -159,7 +159,6 @@ impl Peer {
return Some(*sent_bytes);
}
}
None
}
@ -170,7 +169,6 @@ impl Peer {
return Some(*received_bytes);
}
}
None
}

View file

@ -335,25 +335,25 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
}
let average_header_size = (msg_len - 2) / total_headers;
// support size of Cuckoo: from Cuckoo 30 to Cuckoo 36, with version 2
// support size of Cuck(at)oo: from Cuck(at)oo 29 to Cuck(at)oo 35, with version 2
// having slightly larger headers
let minimum_size = core::serialized_size_of_header(1, global::min_sizeshift());
let maximum_size = core::serialized_size_of_header(2, global::min_sizeshift() + 6);
if average_header_size < minimum_size as u64 || average_header_size > maximum_size as u64 {
let min_size = core::serialized_size_of_header(1, global::min_edge_bits());
let max_size = min_size + 6;
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
debug!(
LOGGER,
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
total_headers,
average_header_size,
minimum_size,
maximum_size,
min_size,
max_size,
);
return Err(Error::Connection(io::Error::new(
io::ErrorKind::InvalidData,
"headers_header_size",
)));
}
return Ok(maximum_size as u64);
return Ok(max_size as u64);
}
/// Read the Headers streaming body from the underlying connection

View file

@ -1,6 +1,6 @@
[package]
name = "grin_pool"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = '..'
publish = false

View file

@ -31,9 +31,6 @@ use util::LOGGER;
const MAX_MINEABLE_WEIGHT: usize =
consensus::MAX_BLOCK_WEIGHT - consensus::BLOCK_OUTPUT_WEIGHT - consensus::BLOCK_KERNEL_WEIGHT;
// longest chain of dependent transactions that can be included in a block
const MAX_TX_CHAIN: usize = 20;
pub struct Pool {
/// Entries in the pool (tx + info + timer) in simple insertion order.
pub entries: Vec<PoolEntry>,
@ -118,10 +115,8 @@ impl Pool {
// flatten buckets using aggregate (with cut-through)
let mut flat_txs: Vec<Transaction> = tx_buckets
.into_iter()
.filter_map(|mut bucket| {
bucket.truncate(MAX_TX_CHAIN);
transaction::aggregate(bucket).ok()
}).filter(|x| x.validate(self.verifier_cache.clone()).is_ok())
.filter_map(|bucket| transaction::aggregate(bucket).ok())
.filter(|x| x.validate(self.verifier_cache.clone()).is_ok())
.collect();
// sort by fees over weight, multiplying by 1000 to keep some precision

View file

@ -51,13 +51,12 @@ fn test_transaction_pool_block_building() {
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = keychain.derive_key_id(height as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee, height).unwrap();
let block = Block::new(&prev_header, txs, Difficulty::one(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
@ -113,7 +112,7 @@ fn test_transaction_pool_block_building() {
assert_eq!(txs.len(), 3);
let block = {
let key_id = keychain.derive_key_id(2).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
Block::new(&header, txs, Difficulty::one(), reward)

View file

@ -50,7 +50,7 @@ fn test_transaction_pool_block_reconciliation() {
let header = {
let height = 1;
let key_id = keychain.derive_key_id(height as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();
@ -64,7 +64,7 @@ fn test_transaction_pool_block_reconciliation() {
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
let block = {
let key_id = keychain.derive_key_id(2).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = initial_tx.fee();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
let block = Block::new(&header, vec![initial_tx], Difficulty::one(), reward).unwrap();
@ -154,7 +154,7 @@ fn test_transaction_pool_block_reconciliation() {
// Now apply this block.
let block = {
let key_id = keychain.derive_key_id(3).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
let block = Block::new(&header, block_txs, Difficulty::one(), reward).unwrap();

View file

@ -38,7 +38,7 @@ use chain::store::ChainStore;
use chain::types::Tip;
use pool::*;
use keychain::Keychain;
use keychain::{ExtKeychain, Keychain};
use wallet::libtx;
use pool::types::*;
@ -192,12 +192,12 @@ where
// single input spending a single coinbase (deterministic key_id aka height)
{
let key_id = keychain.derive_key_id(header.height as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, header.height as u32, 0, 0, 0);
tx_elements.push(libtx::build::coinbase_input(coinbase_reward, key_id));
}
for output_value in output_values {
let key_id = keychain.derive_key_id(output_value as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id));
}
@ -223,12 +223,12 @@ where
let mut tx_elements = Vec::new();
for input_value in input_values {
let key_id = keychain.derive_key_id(input_value as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::input(input_value, key_id));
}
for output_value in output_values {
let key_id = keychain.derive_key_id(output_value as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id));
}
tx_elements.push(libtx::build::with_fee(fees as u64));

View file

@ -50,7 +50,7 @@ fn test_the_transaction_pool() {
let header = {
let height = 1;
let key_id = keychain.derive_key_id(height as u32).unwrap();
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
let mut block =
Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();

View file

@ -1,6 +1,6 @@
[package]
name = "grin_servers"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -21,7 +21,7 @@ use std::sync::{Arc, RwLock, Weak};
use std::thread;
use std::time::Instant;
use chain::{self, ChainAdapter, Options, Tip};
use chain::{self, ChainAdapter, Options};
use chrono::prelude::{DateTime, Utc};
use common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed};
@ -32,6 +32,7 @@ use core::pow::Difficulty;
use core::{core, global};
use p2p;
use pool;
use rand::prelude::*;
use store;
use util::{OneTime, LOGGER};
@ -164,11 +165,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
if let Ok(prev) = self.chain().get_block_header(&cb.header.previous) {
if block
.validate(
&prev.total_kernel_offset,
&prev.total_kernel_sum,
self.verifier_cache.clone(),
).is_ok()
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok()
{
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr)
@ -473,9 +471,9 @@ impl NetToChainAdapter {
let prev_hash = b.header.previous;
let bhash = b.hash();
match self.chain().process_block(b, self.chain_opts()) {
Ok(tip) => {
Ok(_) => {
self.validate_chain(bhash);
self.check_compact(tip);
self.check_compact();
true
}
Err(ref e) if e.is_bad_data() => {
@ -544,25 +542,24 @@ impl NetToChainAdapter {
}
}
fn check_compact(&self, tip: Option<Tip>) {
fn check_compact(&self) {
// no compaction during sync or if we're in historical mode
if self.archive_mode || self.sync_state.is_syncing() {
return;
}
if let Some(tip) = tip {
// trigger compaction every 2000 blocks, uses a different thread to avoid
// blocking the caller thread (likely a peer)
if tip.height % 2000 == 0 {
let chain = self.chain().clone();
let _ = thread::Builder::new()
.name("compactor".to_string())
.spawn(move || {
if let Err(e) = chain.compact() {
error!(LOGGER, "Could not compact chain: {:?}", e);
}
});
}
// Roll the dice to trigger compaction at 1/COMPACTION_CHECK chance per block,
// uses a different thread to avoid blocking the caller thread (likely a peer)
let mut rng = thread_rng();
if 0 == rng.gen_range(0, global::COMPACTION_CHECK) {
let chain = self.chain().clone();
let _ = thread::Builder::new()
.name("compactor".to_string())
.spawn(move || {
if let Err(e) = chain.compact() {
error!(LOGGER, "Could not compact chain: {:?}", e);
}
});
}
}

View file

@ -19,6 +19,8 @@ use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::time::SystemTime;
use core::pow::Difficulty;
use chrono::prelude::*;
use chain;
@ -98,7 +100,7 @@ pub struct StratumStats {
/// current network difficulty we're working on
pub network_difficulty: u64,
/// cuckoo size used for mining
pub cuckoo_size: u16,
pub edge_bits: u16,
/// Individual worker status
pub worker_stats: Vec<WorkerStats>,
}
@ -129,6 +131,10 @@ pub struct DiffBlock {
pub time: u64,
/// Duration since previous block (epoch seconds)
pub duration: u64,
/// secondary scaling
pub secondary_scaling: u32,
/// is secondary
pub is_secondary: bool,
}
/// Struct to return relevant information about peers
@ -157,7 +163,8 @@ pub struct PeerStats {
impl StratumStats {
/// Calculate network hashrate
pub fn network_hashrate(&self) -> f64 {
42.0 * (self.network_difficulty as f64 / (self.cuckoo_size - 1) as f64) / 60.0
42.0 * (self.network_difficulty as f64 / Difficulty::scale(self.edge_bits as u8) as f64)
/ 60.0
}
}
@ -213,7 +220,7 @@ impl Default for StratumStats {
num_workers: 0,
block_height: 0,
network_difficulty: 1000,
cuckoo_size: 30,
edge_bits: 29,
worker_stats: Vec::new(),
}
}

View file

@ -31,9 +31,7 @@ use util::LOGGER;
// DNS Seeds with contact email associated
const DNS_SEEDS: &'static [&'static str] = &[
"t3.seed.grin-tech.org", // igno.peverell@protonmail.com
"seed.grin.lesceller.com", // q.lesceller@gmail.com
"t3.grin-seed.prokapi.com", // info@prokapi.com
"t4.seed.grin-tech.org", // igno.peverell@protonmail.com
];
pub fn connect_and_monitor(

View file

@ -30,7 +30,6 @@ use common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStat
use common::types::{Error, ServerConfig, StratumServerConfig, SyncState};
use core::core::hash::Hashed;
use core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use core::pow::Difficulty;
use core::{consensus, genesis, global, pow};
use grin::{dandelion_monitor, seed, sync};
use mining::stratumserver;
@ -150,6 +149,7 @@ impl Server {
global::ChainTypes::Testnet1 => genesis::genesis_testnet1(),
global::ChainTypes::Testnet2 => genesis::genesis_testnet2(),
global::ChainTypes::Testnet3 => genesis::genesis_testnet3(),
global::ChainTypes::Testnet4 => genesis::genesis_testnet4(),
global::ChainTypes::AutomatedTesting => genesis::genesis_dev(),
global::ChainTypes::UserTesting => genesis::genesis_dev(),
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
@ -313,7 +313,7 @@ impl Server {
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let cuckoo_size = global::min_sizeshift();
let edge_bits = global::min_edge_bits();
let proof_size = global::proofsize();
let sync_state = self.sync_state.clone();
@ -327,7 +327,7 @@ impl Server {
let _ = thread::Builder::new()
.name("stratum_server".to_string())
.spawn(move || {
stratum_server.run_loop(stratum_stats, cuckoo_size as u32, proof_size, sync_state);
stratum_server.run_loop(stratum_stats, edge_bits as u32, proof_size, sync_state);
});
}
@ -397,14 +397,13 @@ impl Server {
// code clean. This may be handy for testing but not really needed
// for release
let diff_stats = {
let last_blocks: Vec<Result<(u64, Difficulty), consensus::TargetError>> =
let last_blocks: Vec<consensus::HeaderInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter())
.into_iter()
.skip(consensus::MEDIAN_TIME_WINDOW as usize)
.take(consensus::DIFFICULTY_ADJUST_WINDOW as usize)
.collect();
let mut last_time = last_blocks[0].clone().unwrap().0;
let mut last_time = last_blocks[0].timestamp;
let tip_height = self.chain.head().unwrap().height as i64;
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
@ -414,16 +413,17 @@ impl Server {
.iter()
.skip(1)
.map(|n| {
let (time, diff) = n.clone().unwrap();
let dur = time - last_time;
let dur = n.timestamp - last_time;
let height = earliest_block_height + i + 1;
i += 1;
last_time = time;
last_time = n.timestamp;
DiffBlock {
block_number: height,
difficulty: diff.to_num(),
time: time,
difficulty: n.difficulty.to_num(),
time: n.timestamp,
duration: dur,
secondary_scaling: n.secondary_scaling,
is_secondary: n.is_secondary,
}
}).collect();

View file

@ -126,9 +126,14 @@ impl BodySync {
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
// 10) max will be 80 if all 8 peers are advertising more work
// also if the chain is already saturated with orphans, throttle
let peer_count = self.peers.more_work_peers().len();
let peers = if oldest_height < header_head.height.saturating_sub(horizon) {
self.peers.more_work_archival_peers()
} else {
self.peers.more_work_peers()
};
let block_count = cmp::min(
cmp::min(100, peer_count * 10),
cmp::min(100, peers.len() * p2p::SEND_CHANNEL_CAP),
chain::MAX_ORPHAN_SIZE.saturating_sub(self.chain.orphans_len()) + 1,
);
@ -148,17 +153,13 @@ impl BodySync {
body_head.height,
header_head.height,
hashes_to_get,
peer_count,
peers.len(),
);
let mut peers_iter = peers.iter().cycle();
for hash in hashes_to_get.clone() {
// only archival peers can be expected to have blocks older than horizon
let peer = if oldest_height < header_head.height.saturating_sub(horizon) {
self.peers.more_work_archival_peer()
} else {
self.peers.more_work_peer()
};
if let Some(peer) = peer {
if let Some(peer) = peers_iter.next() {
if let Err(e) = peer.send_block_request(*hash) {
debug!(LOGGER, "Skipped request to {}: {:?}", peer.info.addr, e);
} else {

View file

@ -185,7 +185,7 @@ fn needs_syncing(
// sum the last 5 difficulties to give us the threshold
let threshold = chain
.difficulty_iter()
.filter_map(|x| x.map(|(_, x)| x).ok())
.map(|x| x.difficulty)
.take(5)
.fold(Difficulty::zero(), |sum, val| sum + val);

View file

@ -95,9 +95,9 @@ fn build_block(
key_id: Option<Identifier>,
wallet_listener_url: Option<String>,
) -> Result<(core::Block, BlockFees), Error> {
// prepare the block header timestamp
let head = chain.head_header()?;
// prepare the block header timestamp
let mut now_sec = Utc::now().timestamp();
let head_sec = head.timestamp.timestamp();
if now_sec <= head_sec {
@ -106,7 +106,7 @@ fn build_block(
// Determine the difficulty our block should be at.
// Note: do not keep the difficulty_iter in scope (it has an active batch).
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let difficulty = consensus::next_difficulty(1, chain.difficulty_iter());
// extract current transaction from the pool
// TODO - we have a lot of unwrap() going on in this fn...
@ -126,17 +126,14 @@ fn build_block(
};
let (output, kernel, block_fees) = get_coinbase(wallet_listener_url, block_fees)?;
let mut b = core::Block::with_reward(&head, txs, output, kernel, difficulty.clone())?;
let mut b = core::Block::with_reward(&head, txs, output, kernel, difficulty.difficulty)?;
// making sure we're not spending time mining a useless block
b.validate(
&head.total_kernel_offset,
&head.total_kernel_sum,
verifier_cache,
)?;
b.validate(&head.total_kernel_offset, verifier_cache)?;
b.header.pow.nonce = thread_rng().gen();
b.header.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now_sec, 0), Utc);;
b.header.pow.scaling_difficulty = difficulty.secondary_scaling;
b.header.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now_sec, 0), Utc);
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
debug!(
@ -184,7 +181,7 @@ fn build_block(
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
warn!(LOGGER, "Burning block fees: {:?}", block_fees);
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let (out, kernel) =
wallet::libtx::reward::output(&keychain, &key_id, block_fees.fees, block_fees.height)
.unwrap();

View file

@ -30,7 +30,7 @@ use common::stats::{StratumStats, WorkerStats};
use common::types::{StratumServerConfig, SyncState};
use core::core::verifier_cache::VerifierCache;
use core::core::Block;
use core::{global, pow, ser};
use core::{pow, ser};
use keychain;
use mining::mine_block;
use pool;
@ -75,6 +75,7 @@ struct SubmitParams {
height: u64,
job_id: u64,
nonce: u64,
edge_bits: u32,
pow: Vec<u64>,
}
@ -480,6 +481,7 @@ impl StratumServer {
}
let mut b: Block = b.unwrap().clone();
// Reconstruct the block header with this nonce and pow added
b.header.pow.proof.edge_bits = params.edge_bits as u8;
b.header.pow.nonce = params.nonce;
b.header.pow.proof.nonces = params.pow;
// Get share difficulty
@ -509,10 +511,11 @@ impl StratumServer {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: {:?}",
"(Server ID: {}) Failed to validate solution at height {}: {}: {}",
self.id,
params.height,
e
e,
e.backtrace().unwrap(),
);
worker_stats.num_rejected += 1;
let e = RpcError {
@ -529,7 +532,7 @@ impl StratumServer {
);
} else {
// Do some validation but dont submit
if !pow::verify_size(&b.header, global::min_sizeshift()).is_ok() {
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
// Return error status
error!(
LOGGER,
@ -650,15 +653,15 @@ impl StratumServer {
pub fn run_loop(
&mut self,
stratum_stats: Arc<RwLock<StratumStats>>,
cuckoo_size: u32,
edge_bits: u32,
proof_size: usize,
sync_state: Arc<SyncState>,
) {
info!(
LOGGER,
"(Server ID: {}) Starting stratum server with cuckoo_size = {}, proof_size = {}",
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
self.id,
cuckoo_size,
edge_bits,
proof_size
);
@ -690,7 +693,7 @@ impl StratumServer {
{
let mut stratum_stats = stratum_stats.write().unwrap();
stratum_stats.is_running = true;
stratum_stats.cuckoo_size = cuckoo_size as u16;
stratum_stats.edge_bits = edge_bits as u16;
}
warn!(

View file

@ -87,7 +87,7 @@ impl Miner {
LOGGER,
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
self.debug_output_id,
global::min_sizeshift(),
global::min_edge_bits(),
attempt_time_per_block,
b.header.total_difficulty(),
b.header.height,
@ -97,7 +97,7 @@ impl Miner {
while head.hash() == *latest_hash && Utc::now().timestamp() < deadline {
let mut ctx =
global::create_pow_context::<u32>(global::min_sizeshift(), global::proofsize(), 10)
global::create_pow_context::<u32>(global::min_edge_bits(), global::proofsize(), 10)
.unwrap();
ctx.set_header_nonce(b.header.pre_pow(), None, true)
.unwrap();

View file

@ -28,7 +28,8 @@ use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::{fs, thread, time};
use wallet::{FileWallet, HTTPWalletClient, WalletConfig};
use framework::keychain::Keychain;
use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig};
/// Just removes all results from previous runs
pub fn clean_all_output(test_name_dir: &str) {
@ -269,8 +270,8 @@ impl LocalServerContainer {
//panic!("Error initializing wallet seed: {}", e);
}
let wallet: FileWallet<HTTPWalletClient, keychain::ExtKeychain> =
FileWallet::new(self.wallet_config.clone(), "", client).unwrap_or_else(|e| {
let wallet: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
LMDBBackend::new(self.wallet_config.clone(), "", client).unwrap_or_else(|e| {
panic!(
"Error creating wallet: {:?} Config: {:?}",
e, self.wallet_config
@ -307,11 +308,12 @@ impl LocalServerContainer {
.derive_keychain("")
.expect("Failed to derive keychain from seed file and passphrase.");
let client = HTTPWalletClient::new(&config.check_node_api_http_addr, None);
let mut wallet = FileWallet::new(config.clone(), "", client)
let mut wallet = LMDBBackend::new(config.clone(), "", client)
.unwrap_or_else(|e| panic!("Error creating wallet: {:?} Config: {:?}", e, config));
wallet.keychain = Some(keychain);
let _ = wallet::libwallet::internal::updater::refresh_outputs(&mut wallet);
wallet::libwallet::internal::updater::retrieve_info(&mut wallet).unwrap()
let parent_id = keychain::ExtKeychain::derive_key_id(2, 0, 0, 0, 0);
let _ = wallet::libwallet::internal::updater::refresh_outputs(&mut wallet, &parent_id);
wallet::libwallet::internal::updater::retrieve_info(&mut wallet, &parent_id).unwrap()
}
pub fn send_amount_to(
@ -337,7 +339,7 @@ impl LocalServerContainer {
let max_outputs = 500;
let change_outputs = 1;
let mut wallet = FileWallet::new(config.clone(), "", client)
let mut wallet = LMDBBackend::new(config.clone(), "", client)
.unwrap_or_else(|e| panic!("Error creating wallet: {:?} Config: {:?}", e, config));
wallet.keychain = Some(keychain);
let _ =

View file

@ -19,8 +19,8 @@ use std::path::PathBuf;
/// Wallet commands processing
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use std::{process, thread};
use clap::ArgMatches;
@ -28,7 +28,9 @@ use api::TLSConfig;
use config::GlobalWalletConfig;
use core::{core, global};
use grin_wallet::{self, controller, display, libwallet};
use grin_wallet::{HTTPWalletClient, LMDBBackend, WalletConfig, WalletInst, WalletSeed};
use grin_wallet::{
HTTPWalletClient, LMDBBackend, WalletBackend, WalletConfig, WalletInst, WalletSeed,
};
use keychain;
use servers::start_webwallet_server;
use util::file::get_first_line;
@ -53,29 +55,23 @@ pub fn seed_exists(wallet_config: WalletConfig) -> bool {
pub fn instantiate_wallet(
wallet_config: WalletConfig,
passphrase: &str,
account: &str,
node_api_secret: Option<String>,
) -> Box<WalletInst<HTTPWalletClient, keychain::ExtKeychain>> {
if grin_wallet::needs_migrate(&wallet_config.data_file_dir) {
// Migrate wallet automatically
warn!(LOGGER, "Migrating legacy File-Based wallet to LMDB Format");
if let Err(e) = grin_wallet::migrate(&wallet_config.data_file_dir, passphrase) {
error!(LOGGER, "Error while trying to migrate wallet: {:?}", e);
error!(LOGGER, "Please ensure your file wallet files exist and are not corrupted, and that your password is correct");
panic!();
} else {
warn!(LOGGER, "Migration successful. Using LMDB Wallet backend");
}
warn!(LOGGER, "Please check the results of the migration process using `grin wallet info` and `grin wallet outputs`");
warn!(LOGGER, "If anything went wrong, you can try again by deleting the `db` directory and running a wallet command");
warn!(LOGGER, "If all is okay, you can move/backup/delete all files in the wallet directory EXCEPT FOR wallet.seed");
}
let client = HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
let db_wallet = LMDBBackend::new(wallet_config.clone(), "", client).unwrap_or_else(|e| {
panic!(
"Error creating DB wallet: {} Config: {:?}",
e, wallet_config
);
});
let mut db_wallet =
LMDBBackend::new(wallet_config.clone(), passphrase, client).unwrap_or_else(|e| {
panic!(
"Error creating DB wallet: {} Config: {:?}",
e, wallet_config
);
});
db_wallet
.set_parent_key_id_by_name(account)
.unwrap_or_else(|e| {
println!("Error starting wallet: {}", e);
process::exit(0);
});
info!(LOGGER, "Using LMDB Backend for wallet");
Box::new(db_wallet)
}
@ -130,9 +126,19 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let passphrase = wallet_args
.value_of("pass")
.expect("Failed to read passphrase.");
let account = wallet_args
.value_of("account")
.expect("Failed to read account.");
// Handle listener startup commands
{
let wallet = instantiate_wallet(wallet_config.clone(), passphrase, node_api_secret.clone());
let wallet = instantiate_wallet(
wallet_config.clone(),
passphrase,
account,
node_api_secret.clone(),
);
let api_secret = get_first_line(wallet_config.api_secret_path.clone());
let tls_conf = match wallet_config.tls_certificate_file.clone() {
@ -187,10 +193,40 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let wallet = Arc::new(Mutex::new(instantiate_wallet(
wallet_config.clone(),
passphrase,
account,
node_api_secret,
)));
let res = controller::owner_single_use(wallet.clone(), |api| {
match wallet_args.subcommand() {
("account", Some(acct_args)) => {
let create = acct_args.value_of("create");
if create.is_none() {
let res = controller::owner_single_use(wallet, |api| {
let acct_mappings = api.accounts()?;
// give logging thread a moment to catch up
thread::sleep(Duration::from_millis(200));
display::accounts(acct_mappings);
Ok(())
});
if res.is_err() {
panic!("Error listing accounts: {}", res.unwrap_err());
}
} else {
let label = create.unwrap();
let res = controller::owner_single_use(wallet, |api| {
api.new_account_path(label)?;
thread::sleep(Duration::from_millis(200));
println!("Account: '{}' Created!", label);
Ok(())
});
if res.is_err() {
thread::sleep(Duration::from_millis(200));
println!("Error creating account '{}': {}", label, res.unwrap_err());
exit(1);
}
}
Ok(())
}
("send", Some(send_args)) => {
let amount = send_args
.value_of("amount")
@ -352,18 +388,19 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
e, wallet_config
)
});
display::info(&wallet_info, validated);
display::info(account, &wallet_info, validated);
Ok(())
}
("outputs", Some(_)) => {
let (height, _) = api.node_height()?;
let (validated, outputs) = api.retrieve_outputs(show_spent, true, None)?;
let _res = display::outputs(height, validated, outputs).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {:?} Config: {:?}",
e, wallet_config
)
});
let _res =
display::outputs(account, height, validated, outputs).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {:?} Config: {:?}",
e, wallet_config
)
});
Ok(())
}
("txs", Some(txs_args)) => {
@ -377,8 +414,8 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let (height, _) = api.node_height()?;
let (validated, txs) = api.retrieve_txs(true, tx_id)?;
let include_status = !tx_id.is_some();
let _res =
display::txs(height, validated, txs, include_status).unwrap_or_else(|e| {
let _res = display::txs(account, height, validated, txs, include_status)
.unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {} Config: {:?}",
e, wallet_config
@ -388,12 +425,13 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
// inputs/outputs
if tx_id.is_some() {
let (_, outputs) = api.retrieve_outputs(true, false, tx_id)?;
let _res = display::outputs(height, validated, outputs).unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {} Config: {:?}",
e, wallet_config
)
});
let _res = display::outputs(account, height, validated, outputs)
.unwrap_or_else(|e| {
panic!(
"Error getting wallet outputs: {} Config: {:?}",
e, wallet_config
)
});
};
Ok(())
}

View file

@ -97,7 +97,7 @@ fn main() {
.help("Port to start the P2P server on")
.takes_value(true))
.arg(Arg::with_name("api_port")
.short("a")
.short("api")
.long("api_port")
.help("Port on which to start the api server (e.g. transaction pool api)")
.takes_value(true))
@ -154,6 +154,12 @@ fn main() {
.help("Wallet passphrase used to generate the private key seed")
.takes_value(true)
.default_value(""))
.arg(Arg::with_name("account")
.short("a")
.long("account")
.help("Wallet account to use for this operation")
.takes_value(true)
.default_value("default"))
.arg(Arg::with_name("data_dir")
.short("dd")
.long("data_dir")
@ -171,11 +177,19 @@ fn main() {
.help("Show spent outputs on wallet output command")
.takes_value(false))
.arg(Arg::with_name("api_server_address")
.short("a")
.short("r")
.long("api_server_address")
.help("Api address of running node on which to check inputs and post transactions")
.takes_value(true))
.subcommand(SubCommand::with_name("account")
.about("List wallet accounts or create a new account")
.arg(Arg::with_name("create")
.short("c")
.long("create")
.help("Name of new wallet account")
.takes_value(true)))
.subcommand(SubCommand::with_name("listen")
.about("Runs the wallet in listening mode waiting for transactions.")
.arg(Arg::with_name("port")

View file

@ -63,11 +63,13 @@ pub fn create() -> Box<View> {
let mut s: ViewRef<SelectView<&str>> = c.find_id(MAIN_MENU).unwrap();
s.select_down(1)(c);
Some(EventResult::Consumed(None));
}).on_pre_event('k', move |c| {
})
.on_pre_event('k', move |c| {
let mut s: ViewRef<SelectView<&str>> = c.find_id(MAIN_MENU).unwrap();
s.select_up(1)(c);
Some(EventResult::Consumed(None));
}).on_pre_event(Key::Tab, move |c| {
})
.on_pre_event(Key::Tab, move |c| {
let mut s: ViewRef<SelectView<&str>> = c.find_id(MAIN_MENU).unwrap();
if s.selected_id().unwrap() == s.len() - 1 {
s.set_selection(0)(c);

View file

@ -100,7 +100,9 @@ impl TableViewItem<StratumWorkerColumn> for WorkerStats {
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
enum DiffColumn {
BlockNumber,
PoWType,
Difficulty,
SecondaryScaling,
Time,
Duration,
}
@ -109,7 +111,9 @@ impl DiffColumn {
fn _as_str(&self) -> &str {
match *self {
DiffColumn::BlockNumber => "Block Number",
DiffColumn::PoWType => "Type",
DiffColumn::Difficulty => "Network Difficulty",
DiffColumn::SecondaryScaling => "Sec. Scaling",
DiffColumn::Time => "Block Time",
DiffColumn::Duration => "Duration",
}
@ -120,10 +124,16 @@ impl TableViewItem<DiffColumn> for DiffBlock {
fn to_column(&self, column: DiffColumn) -> String {
let naive_datetime = NaiveDateTime::from_timestamp(self.time as i64, 0);
let datetime: DateTime<Utc> = DateTime::from_utc(naive_datetime, Utc);
let pow_type = match self.is_secondary {
true => String::from("Secondary"),
false => String::from("Primary"),
};
match column {
DiffColumn::BlockNumber => self.block_number.to_string(),
DiffColumn::PoWType => pow_type,
DiffColumn::Difficulty => self.difficulty.to_string(),
DiffColumn::SecondaryScaling => self.secondary_scaling.to_string(),
DiffColumn::Time => format!("{}", datetime).to_string(),
DiffColumn::Duration => format!("{}s", self.duration).to_string(),
}
@ -135,7 +145,9 @@ impl TableViewItem<DiffColumn> for DiffBlock {
{
match column {
DiffColumn::BlockNumber => Ordering::Equal,
DiffColumn::PoWType => Ordering::Equal,
DiffColumn::Difficulty => Ordering::Equal,
DiffColumn::SecondaryScaling => Ordering::Equal,
DiffColumn::Time => Ordering::Equal,
DiffColumn::Duration => Ordering::Equal,
}
@ -205,7 +217,7 @@ impl TUIStatusListener for TUIMiningView {
.child(TextView::new(" ").with_id("stratum_network_hashrate")),
).child(
LinearLayout::new(Orientation::Horizontal)
.child(TextView::new(" ").with_id("stratum_cuckoo_size_status")),
.child(TextView::new(" ").with_id("stratum_edge_bits_status")),
);
let mining_device_view = LinearLayout::new(Orientation::Vertical)
@ -236,9 +248,12 @@ impl TUIStatusListener for TUIMiningView {
let diff_table_view = TableView::<DiffBlock, DiffColumn>::new()
.column(DiffColumn::BlockNumber, "Block Number", |c| {
c.width_percent(25)
}).column(DiffColumn::Difficulty, "Network Difficulty", |c| {
c.width_percent(25)
c.width_percent(15)
}).column(DiffColumn::PoWType, "Type", |c| c.width_percent(10))
.column(DiffColumn::Difficulty, "Network Difficulty", |c| {
c.width_percent(15)
}).column(DiffColumn::SecondaryScaling, "Sec. Scaling", |c| {
c.width_percent(10)
}).column(DiffColumn::Time, "Block Time", |c| c.width_percent(25))
.column(DiffColumn::Duration, "Duration", |c| c.width_percent(25));
@ -301,7 +316,7 @@ impl TUIStatusListener for TUIMiningView {
let stratum_block_height = format!("Solving Block Height: {}", stratum_stats.block_height);
let stratum_network_difficulty =
format!("Network Difficulty: {}", stratum_stats.network_difficulty);
let stratum_cuckoo_size = format!("Cuckoo Size: {}", stratum_stats.cuckoo_size);
let stratum_edge_bits = format!("Cuckoo Size: {}", stratum_stats.edge_bits);
c.call_on_id("stratum_config_status", |t: &mut TextView| {
t.set_content(stratum_enabled);
@ -321,8 +336,8 @@ impl TUIStatusListener for TUIMiningView {
c.call_on_id("stratum_network_hashrate", |t: &mut TextView| {
t.set_content(stratum_network_hashrate);
});
c.call_on_id("stratum_cuckoo_size_status", |t: &mut TextView| {
t.set_content(stratum_cuckoo_size);
c.call_on_id("stratum_edge_bits_status", |t: &mut TextView| {
t.set_content(stratum_edge_bits);
});
let _ = c.call_on_id(
TABLE_MINING_STATUS,

View file

@ -201,7 +201,7 @@ impl TUIStatusListener for TUIStatusView {
),
format!(
"Cuckoo {} - Network Difficulty {}",
stats.mining_stats.cuckoo_size,
stats.mining_stats.edge_bits,
stats.mining_stats.network_difficulty.to_string()
),
)

View file

@ -1,6 +1,6 @@
[package]
name = "grin_store"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false

View file

@ -59,6 +59,15 @@ pub fn to_key(prefix: u8, k: &mut Vec<u8>) -> Vec<u8> {
res
}
/// Build a db key from a prefix and a byte vector identifier and numeric identifier
pub fn to_key_u64(prefix: u8, k: &mut Vec<u8>, val: u64) -> Vec<u8> {
let mut res = vec![];
res.push(prefix);
res.push(SEP);
res.append(k);
res.write_u64::<BigEndian>(val).unwrap();
res
}
/// Build a db key from a prefix and a numeric identifier.
pub fn u64_to_key<'a>(prefix: u8, val: u64) -> Vec<u8> {
let mut u64_vec = vec![];

View file

@ -1,6 +1,6 @@
[package]
name = "grin_util"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = ".."
publish = false
@ -21,6 +21,6 @@ zip = "0.4"
[dependencies.secp256k1zkp]
git = "https://github.com/mimblewimble/rust-secp256k1-zkp"
tag = "grin_integration_23a"
tag = "grin_integration_28"
#path = "../../rust-secp256k1-zkp"
features = ["bullet-proof-sizing"]

View file

@ -77,7 +77,7 @@ pub fn get_first_line(file_path: Option<String>) -> Option<String> {
Some(path) => match fs::File::open(path) {
Ok(file) => {
let buf_reader = io::BufReader::new(file);
let mut lines_iter = buf_reader.lines().map(|l| l.unwrap());;
let mut lines_iter = buf_reader.lines().map(|l| l.unwrap());
lines_iter.next()
}
Err(_) => None,

View file

@ -35,7 +35,5 @@ pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> {
/// Convenient way to generate a commitment to zero.
pub fn commit_to_zero_value() -> secp::pedersen::Commitment {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_value(0).unwrap()
secp::pedersen::Commitment::from_vec(vec![0])
}

View file

@ -1,6 +1,6 @@
[package]
name = "grin_wallet"
version = "0.3.0"
version = "0.4.0"
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
workspace = '..'
publish = false

View file

@ -1,150 +0,0 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Temporary utility to migrate wallet data from file to a database
use keychain::{ExtKeychain, Identifier, Keychain};
use std::fs::File;
use std::io::Read;
use std::path::{Path, MAIN_SEPARATOR};
/// Migrate wallet data. Assumes current directory contains a set of wallet
/// files
use std::sync::Arc;
use error::{Error, ErrorKind};
use failure::ResultExt;
use serde_json;
use libwallet::types::WalletDetails;
use types::WalletSeed;
use libwallet::types::OutputData;
use store::{self, to_key};
const DETAIL_FILE: &'static str = "wallet.det";
const DAT_FILE: &'static str = "wallet.dat";
const SEED_FILE: &'static str = "wallet.seed";
const DB_DIR: &'static str = "db";
const OUTPUT_PREFIX: u8 = 'o' as u8;
const DERIV_PREFIX: u8 = 'd' as u8;
const CONFIRMED_HEIGHT_PREFIX: u8 = 'c' as u8;
// determine whether we have wallet files but no file wallet
pub fn needs_migrate(data_dir: &str) -> bool {
let db_path = Path::new(data_dir).join(DB_DIR);
let data_path = Path::new(data_dir).join(DAT_FILE);
if !db_path.exists() && data_path.exists() {
return true;
}
false
}
pub fn migrate(data_dir: &str, pwd: &str) -> Result<(), Error> {
let data_file_path = format!("{}{}{}", data_dir, MAIN_SEPARATOR, DAT_FILE);
let details_file_path = format!("{}{}{}", data_dir, MAIN_SEPARATOR, DETAIL_FILE);
let seed_file_path = format!("{}{}{}", data_dir, MAIN_SEPARATOR, SEED_FILE);
let outputs = read_outputs(&data_file_path)?;
let details = read_details(&details_file_path)?;
let mut file = File::open(seed_file_path).context(ErrorKind::IO)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer).context(ErrorKind::IO)?;
let wallet_seed = WalletSeed::from_hex(&buffer)?;
let keychain: ExtKeychain = wallet_seed.derive_keychain(pwd)?;
let root_key_id = keychain.root_key_id();
//open db
let db_path = Path::new(data_dir).join(DB_DIR);
let lmdb_env = Arc::new(store::new_env(db_path.to_str().unwrap().to_string()));
// open store
let store = store::Store::open(lmdb_env, DB_DIR);
let batch = store.batch().unwrap();
// write
for out in outputs {
save_output(&batch, out.clone())?;
}
save_details(&batch, root_key_id, details)?;
let res = batch.commit();
if let Err(e) = res {
panic!("Unable to commit db: {:?}", e);
}
Ok(())
}
/// save output in db
fn save_output(batch: &store::Batch, out: OutputData) -> Result<(), Error> {
let key = to_key(OUTPUT_PREFIX, &mut out.key_id.to_bytes().to_vec());
if let Err(e) = batch.put_ser(&key, &out) {
Err(ErrorKind::GenericError(format!(
"Error inserting output: {:?}",
e
)))?;
}
Ok(())
}
/// save details in db
fn save_details(
batch: &store::Batch,
root_key_id: Identifier,
d: WalletDetails,
) -> Result<(), Error> {
let deriv_key = to_key(DERIV_PREFIX, &mut root_key_id.to_bytes().to_vec());
let height_key = to_key(
CONFIRMED_HEIGHT_PREFIX,
&mut root_key_id.to_bytes().to_vec(),
);
if let Err(e) = batch.put_ser(&deriv_key, &d.last_child_index) {
Err(ErrorKind::GenericError(format!(
"Error saving last_child_index: {:?}",
e
)))?;
}
if let Err(e) = batch.put_ser(&height_key, &d.last_confirmed_height) {
Err(ErrorKind::GenericError(format!(
"Error saving last_confirmed_height: {:?}",
e
)))?;
}
Ok(())
}
/// Read output_data vec from disk.
fn read_outputs(data_file_path: &str) -> Result<Vec<OutputData>, Error> {
let data_file = File::open(data_file_path.clone())
.context(ErrorKind::FileWallet(&"Could not open wallet file"))?;
serde_json::from_reader(data_file)
.context(ErrorKind::Format)
.map_err(|e| e.into())
}
/// Read details file from disk
fn read_details(details_file_path: &str) -> Result<WalletDetails, Error> {
let details_file = File::open(details_file_path.clone())
.context(ErrorKind::FileWallet(&"Could not open wallet details file"))?;
serde_json::from_reader(details_file)
.context(ErrorKind::Format)
.map_err(|e| e.into())
}
#[ignore]
#[test]
fn migrate_db() {
let _ = migrate("test_wallet", "");
}

View file

@ -13,7 +13,7 @@
// limitations under the License.
use core::core::{self, amount_to_hr_string};
use libwallet::types::{OutputData, TxLogEntry, WalletInfo};
use libwallet::types::{AcctPathMapping, OutputData, TxLogEntry, WalletInfo};
use libwallet::Error;
use prettytable;
use std::io::prelude::Write;
@ -23,11 +23,15 @@ use util::secp::pedersen;
/// Display outputs in a pretty way
pub fn outputs(
account: &str,
cur_height: u64,
validated: bool,
outputs: Vec<(OutputData, pedersen::Commitment)>,
) -> Result<(), Error> {
let title = format!("Wallet Outputs - Block Height: {}", cur_height);
let title = format!(
"Wallet Outputs - Account '{}' - Block Height: {}",
account, cur_height
);
println!();
let mut t = term::stdout().unwrap();
t.fg(term::color::MAGENTA).unwrap();
@ -87,12 +91,16 @@ pub fn outputs(
/// Display transaction log in a pretty way
pub fn txs(
account: &str,
cur_height: u64,
validated: bool,
txs: Vec<TxLogEntry>,
include_status: bool,
) -> Result<(), Error> {
let title = format!("Transaction Log - Block Height: {}", cur_height);
let title = format!(
"Transaction Log - Account '{}' - Block Height: {}",
account, cur_height
);
println!();
let mut t = term::stdout().unwrap();
t.fg(term::color::MAGENTA).unwrap();
@ -181,10 +189,10 @@ pub fn txs(
Ok(())
}
/// Display summary info in a pretty way
pub fn info(wallet_info: &WalletInfo, validated: bool) {
pub fn info(account: &str, wallet_info: &WalletInfo, validated: bool) {
println!(
"\n____ Wallet Summary Info as of {} ____\n",
wallet_info.last_confirmed_height
"\n____ Wallet Summary Info - Account '{}' as of height {} ____\n",
account, wallet_info.last_confirmed_height
);
let mut table = table!(
[bFG->"Total", FG->amount_to_hr_string(wallet_info.total, false)],
@ -205,3 +213,22 @@ pub fn info(wallet_info: &WalletInfo, validated: bool) {
);
}
}
/// Display list of wallet accounts in a pretty way
pub fn accounts(acct_mappings: Vec<AcctPathMapping>) {
println!("\n____ Wallet Accounts ____\n",);
let mut table = table!();
table.set_titles(row![
mMG->"Name",
bMG->"Parent BIP-32 Derivation Path",
]);
for m in acct_mappings {
table.add_row(row![
bFC->m.label,
bGC->m.path.to_bip_32_string(),
]);
}
table.set_format(*prettytable::format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.printstd();
println!();
}

View file

@ -444,9 +444,9 @@ where
// write details file
let mut details_file =
File::create(details_file_path).context(ErrorKind::FileWallet(&"Could not create "))?;
let res_json = serde_json::to_string_pretty(&self.details).context(
ErrorKind::FileWallet("Error serializing wallet details file"),
)?;
let res_json = serde_json::to_string_pretty(&self.details).context(ErrorKind::FileWallet(
"Error serializing wallet details file",
))?;
details_file
.write_all(res_json.into_bytes().as_slice())
.context(ErrorKind::FileWallet(&"Error writing wallet details file"))

View file

@ -46,10 +46,8 @@ extern crate grin_store as store;
extern crate grin_util as util;
mod client;
mod db_migrate;
pub mod display;
mod error;
pub mod file_wallet;
pub mod libtx;
pub mod libwallet;
pub mod lmdb_wallet;
@ -57,13 +55,9 @@ mod types;
pub use client::{create_coinbase, HTTPWalletClient};
pub use error::{Error, ErrorKind};
pub use file_wallet::FileWallet;
pub use libwallet::controller;
pub use libwallet::types::{
BlockFees, CbData, WalletBackend, WalletClient, WalletInfo, WalletInst,
};
pub use lmdb_wallet::{wallet_db_exists, LMDBBackend};
pub use types::{WalletConfig, WalletSeed, SEED_FILE};
// temporary
pub use db_migrate::{migrate, needs_migrate};

View file

@ -33,6 +33,7 @@ pub fn calculate_partial_sig(
sec_key: &SecretKey,
sec_nonce: &SecretKey,
nonce_sum: &PublicKey,
pubkey_sum: Option<&PublicKey>,
fee: u64,
lock_height: u64,
) -> Result<Signature, Error> {
@ -45,7 +46,9 @@ pub fn calculate_partial_sig(
&msg,
sec_key,
Some(sec_nonce),
None,
Some(nonce_sum),
pubkey_sum,
Some(nonce_sum),
)?;
Ok(sig)
@ -57,11 +60,20 @@ pub fn verify_partial_sig(
sig: &Signature,
pub_nonce_sum: &PublicKey,
pubkey: &PublicKey,
pubkey_sum: Option<&PublicKey>,
fee: u64,
lock_height: u64,
) -> Result<(), Error> {
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?;
if !verify_single(secp, sig, &msg, Some(&pub_nonce_sum), pubkey, true) {
if !verify_single(
secp,
sig,
&msg,
Some(&pub_nonce_sum),
pubkey,
pubkey_sum,
true,
) {
Err(ErrorKind::Signature(
"Signature validation error".to_string(),
))?
@ -75,12 +87,22 @@ pub fn sign_from_key_id<K>(
k: &K,
msg: &Message,
key_id: &Identifier,
blind_sum: Option<&PublicKey>,
) -> Result<Signature, Error>
where
K: Keychain,
{
let skey = k.derived_key(key_id)?;
let sig = aggsig::sign_single(secp, &msg, &skey, None, None, None)?;
let skey = k.derive_key(key_id)?;
let sig = aggsig::sign_single(
secp,
&msg,
&skey.secret_key,
None,
None,
None,
blind_sum,
None,
)?;
Ok(sig)
}
@ -91,10 +113,8 @@ pub fn verify_single_from_commit(
msg: &Message,
commit: &Commitment,
) -> Result<(), Error> {
// Extract the pubkey, unfortunately we need this hack for now, (we just hope
// one is valid)
let pubkey = commit.to_pubkey(secp)?;
if !verify_single(secp, sig, &msg, None, &pubkey, false) {
if !verify_single(secp, sig, &msg, None, &pubkey, Some(&pubkey), false) {
Err(ErrorKind::Signature(
"Signature validation error".to_string(),
))?
@ -107,11 +127,12 @@ pub fn verify_sig_build_msg(
secp: &Secp256k1,
sig: &Signature,
pubkey: &PublicKey,
pubkey_sum: Option<&PublicKey>,
fee: u64,
lock_height: u64,
) -> Result<(), Error> {
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?;
if !verify_single(secp, sig, &msg, None, pubkey, true) {
if !verify_single(secp, sig, &msg, None, pubkey, pubkey_sum, true) {
Err(ErrorKind::Signature(
"Signature validation error".to_string(),
))?
@ -126,9 +147,12 @@ pub fn verify_single(
msg: &Message,
pubnonce: Option<&PublicKey>,
pubkey: &PublicKey,
pubkey_sum: Option<&PublicKey>,
is_partial: bool,
) -> bool {
aggsig::verify_single(secp, sig, msg, pubnonce, pubkey, is_partial)
aggsig::verify_single(
secp, sig, msg, pubnonce, pubkey, pubkey_sum, None, is_partial,
)
}
/// Adds signatures
@ -147,8 +171,10 @@ pub fn sign_with_blinding(
secp: &Secp256k1,
msg: &Message,
blinding: &BlindingFactor,
pubkey_sum: Option<&PublicKey>,
) -> Result<Signature, Error> {
let skey = &blinding.secret_key(&secp)?;
let sig = aggsig::sign_single(secp, &msg, skey, None, None, None)?;
//let pubkey_sum = PublicKey::from_secret_key(&secp, &skey)?;
let sig = aggsig::sign_single(secp, &msg, skey, None, None, None, pubkey_sum, None)?;
Ok(sig)
}

View file

@ -55,7 +55,7 @@ where
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
let commit = build.keychain.commit(value, &key_id).unwrap();
let input = Input::new(features, commit);
(tx.with_input(input), kern, sum.sub_key_id(key_id.clone()))
(tx.with_input(input), kern, sum.sub_key_id(key_id.to_path()))
},
)
}
@ -106,7 +106,7 @@ where
proof: rproof,
}),
kern,
sum.add_key_id(key_id.clone()),
sum.add_key_id(key_id.to_path()),
)
},
)
@ -236,7 +236,9 @@ where
// Generate kernel excess and excess_sig using the split key k1.
let skey = k1.secret_key(&keychain.secp())?;
kern.excess = ctx.keychain.secp().commit(0, skey)?;
kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &k1).unwrap();
let pubkey = &kern.excess.to_pubkey(&keychain.secp())?;
kern.excess_sig =
aggsig::sign_with_blinding(&keychain.secp(), &msg, &k1, Some(&pubkey)).unwrap();
// Store the kernel offset (k2) on the tx.
// Commitments will sum correctly when accounting for the offset.
@ -257,7 +259,7 @@ mod test {
use super::*;
use core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use keychain::ExtKeychain;
use keychain::{ExtKeychain, ExtKeychainPath};
fn verifier_cache() -> Arc<RwLock<VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
@ -266,9 +268,9 @@ mod test {
#[test]
fn blind_simple_tx() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let vc = verifier_cache();
@ -288,9 +290,9 @@ mod test {
#[test]
fn blind_simple_tx_with_offset() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let vc = verifier_cache();
@ -310,8 +312,8 @@ mod test {
#[test]
fn blind_simpler_tx() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let vc = verifier_cache();

View file

@ -25,9 +25,9 @@ fn create_nonce<K>(k: &K, commit: &Commitment) -> Result<SecretKey, Error>
where
K: Keychain,
{
// hash(commit|masterkey) as nonce
let root_key = k.root_key_id();
let res = blake2::blake2b::blake2b(32, &commit.0, &root_key.to_bytes()[..]);
// hash(commit|wallet root secret key (m)) as nonce
let root_key = k.derive_key(&K::root_key_id())?.secret_key;
let res = blake2::blake2b::blake2b(32, &commit.0, &root_key.0[..]);
let res = res.as_bytes();
let mut ret_val = [0; 32];
for i in 0..res.len() {
@ -53,9 +53,11 @@ where
K: Keychain,
{
let commit = k.commit(amount, key_id)?;
let skey = k.derived_key(key_id)?;
let skey = k.derive_key(key_id)?;
let nonce = create_nonce(k, &commit)?;
Ok(k.secp().bullet_proof(amount, skey, nonce, extra_data))
let message = ProofMessage::from_bytes(&key_id.serialize_path());
Ok(k.secp()
.bullet_proof(amount, skey.secret_key, nonce, extra_data, Some(message)))
}
/// Verify a proof

View file

@ -51,6 +51,7 @@ where
let over_commit = secp.commit_value(reward(fees))?;
let out_commit = output.commitment();
let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?;
let pubkey = excess.to_pubkey(&secp)?;
// NOTE: Remember we sign the fee *and* the lock_height.
// For a coinbase output the fee is 0 and the lock_height is
@ -59,7 +60,7 @@ where
// This output will not be spendable earlier than lock_height (and we sign this
// here).
let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?;
let sig = aggsig::sign_from_key_id(&secp, keychain, &msg, &key_id)?;
let sig = aggsig::sign_from_key_id(&secp, keychain, &msg, &key_id, Some(&pubkey))?;
let proof = TxKernel {
features: KernelFeatures::COINBASE_KERNEL,

View file

@ -162,6 +162,7 @@ impl Slate {
sec_key,
sec_nonce,
&self.pub_nonce_sum(keychain.secp())?,
Some(&self.pub_blind_sum(keychain.secp())?),
self.fee,
self.lock_height,
)?;
@ -304,6 +305,7 @@ impl Slate {
p.part_sig.as_ref().unwrap(),
&self.pub_nonce_sum(secp)?,
&p.public_blind_excess,
Some(&self.pub_blind_sum(secp)?),
self.fee,
self.lock_height,
)?;
@ -348,6 +350,7 @@ impl Slate {
&keychain.secp(),
&final_sig,
&final_pubkey,
Some(&final_pubkey),
self.fee,
self.lock_height,
)?;

View file

@ -27,11 +27,12 @@ use serde_json as json;
use core::core::hash::Hashed;
use core::core::Transaction;
use core::ser;
use keychain::Keychain;
use keychain::{Identifier, Keychain};
use libtx::slate::Slate;
use libwallet::internal::{selection, tx, updater};
use libwallet::internal::{keys, selection, tx, updater};
use libwallet::types::{
BlockFees, CbData, OutputData, TxLogEntry, TxWrapper, WalletBackend, WalletClient, WalletInfo,
AcctPathMapping, BlockFees, CbData, OutputData, TxLogEntry, TxWrapper, WalletBackend,
WalletClient, WalletInfo,
};
use libwallet::{Error, ErrorKind};
use util::secp::pedersen;
@ -78,6 +79,7 @@ where
) -> Result<(bool, Vec<(OutputData, pedersen::Commitment)>), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
let mut validated = false;
if refresh_from_node {
@ -86,7 +88,7 @@ where
let res = Ok((
validated,
updater::retrieve_outputs(&mut **w, include_spent, tx_id)?,
updater::retrieve_outputs(&mut **w, include_spent, tx_id, &parent_key_id)?,
));
w.close()?;
@ -102,13 +104,17 @@ where
) -> Result<(bool, Vec<TxLogEntry>), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
let mut validated = false;
if refresh_from_node {
validated = self.update_outputs(&mut w);
}
let res = Ok((validated, updater::retrieve_txs(&mut **w, tx_id)?));
let res = Ok((
validated,
updater::retrieve_txs(&mut **w, tx_id, &parent_key_id)?,
));
w.close()?;
res
@ -121,19 +127,32 @@ where
) -> Result<(bool, WalletInfo), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
let mut validated = false;
if refresh_from_node {
validated = self.update_outputs(&mut w);
}
let wallet_info = updater::retrieve_info(&mut **w)?;
let wallet_info = updater::retrieve_info(&mut **w, &parent_key_id)?;
let res = Ok((validated, wallet_info));
w.close()?;
res
}
/// Return list of existing account -> Path mappings
pub fn accounts(&mut self) -> Result<Vec<AcctPathMapping>, Error> {
let mut w = self.wallet.lock().unwrap();
keys::accounts(&mut **w)
}
/// Create a new account path
pub fn new_account_path(&mut self, label: &str) -> Result<Identifier, Error> {
let mut w = self.wallet.lock().unwrap();
keys::new_acct_path(&mut **w, label)
}
/// Issues a send transaction and sends to recipient
pub fn issue_send_tx(
&mut self,
@ -146,6 +165,7 @@ where
) -> Result<Slate, Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
let client;
let mut slate_out: Slate;
@ -159,6 +179,7 @@ where
max_outputs,
num_change_outputs,
selection_strategy_is_use_all,
&parent_key_id,
)?;
lock_fn_out = lock_fn;
@ -197,6 +218,7 @@ where
) -> Result<Slate, Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
let (slate, context, lock_fn) = tx::create_send_tx(
&mut **w,
@ -205,6 +227,7 @@ where
max_outputs,
num_change_outputs,
selection_strategy_is_use_all,
&parent_key_id,
)?;
if write_to_disk {
let mut pub_tx = File::create(dest)?;
@ -254,12 +277,13 @@ where
pub fn cancel_tx(&mut self, tx_id: u32) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
if !self.update_outputs(&mut w) {
return Err(ErrorKind::TransactionCancellationError(
"Can't contact running Grin node. Not Cancelling.",
))?;
}
tx::cancel_tx(&mut **w, tx_id)?;
tx::cancel_tx(&mut **w, &parent_key_id, tx_id)?;
w.close()?;
Ok(())
}
@ -273,7 +297,14 @@ where
) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let tx_burn = tx::issue_burn_tx(&mut **w, amount, minimum_confirmations, max_outputs)?;
let parent_key_id = w.parent_key_id();
let tx_burn = tx::issue_burn_tx(
&mut **w,
amount,
minimum_confirmations,
max_outputs,
&parent_key_id,
)?;
let tx_hex = util::to_hex(ser::ser_vec(&tx_burn).unwrap());
w.client().post_tx(&TxWrapper { tx_hex: tx_hex }, false)?;
w.close()?;
@ -312,7 +343,8 @@ where
let (confirmed, tx_hex) = {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let res = tx::retrieve_tx_hex(&mut **w, tx_id)?;
let parent_key_id = w.parent_key_id();
let res = tx::retrieve_tx_hex(&mut **w, &parent_key_id, tx_id)?;
w.close()?;
res
};
@ -345,8 +377,9 @@ where
let (confirmed, tx_hex) = {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let parent_key_id = w.parent_key_id();
client = w.client().clone();
let res = tx::retrieve_tx_hex(&mut **w, tx_id)?;
let res = tx::retrieve_tx_hex(&mut **w, &parent_key_id, tx_id)?;
w.close()?;
res
};
@ -400,19 +433,13 @@ where
w.client().get_chain_height()
};
match res {
Ok(height) => {
let mut w = self.wallet.lock().unwrap();
w.close()?;
Ok((height, true))
}
Ok(height) => Ok((height, true)),
Err(_) => {
let outputs = self.retrieve_outputs(true, false, None)?;
let height = match outputs.1.iter().map(|(out, _)| out.height).max() {
Some(height) => height,
None => 0,
};
let mut w = self.wallet.lock().unwrap();
w.close()?;
Ok((height, false))
}
}
@ -420,7 +447,8 @@ where
/// Attempt to update outputs in wallet, return whether it was successful
fn update_outputs(&self, w: &mut W) -> bool {
match updater::refresh_outputs(&mut *w) {
let parent_key_id = w.parent_key_id();
match updater::refresh_outputs(&mut *w, &parent_key_id) {
Ok(_) => true,
Err(_) => false,
}
@ -477,10 +505,11 @@ where
let mut wallet = self.wallet.lock().unwrap();
wallet.open_with_credentials()?;
let parent_key_id = wallet.parent_key_id();
// create an output using the amount in the slate
let (_, mut context, receiver_create_fn) =
selection::build_recipient_output_with_slate(&mut **wallet, &mut slate)?;
selection::build_recipient_output_with_slate(&mut **wallet, &mut slate, parent_key_id)?;
// fill public keys
let _ = slate.fill_round_1(
@ -506,7 +535,8 @@ where
pub fn receive_tx(&mut self, slate: &mut Slate) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap();
w.open_with_credentials()?;
let res = tx::receive_tx(&mut **w, slate);
let parent_key_id = w.parent_key_id();
let res = tx::receive_tx(&mut **w, slate, &parent_key_id);
w.close()?;
if let Err(e) = res {

View file

@ -96,11 +96,11 @@ where
let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Owner API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread =
apis.start(socket_addr, router, tls_config)
.context(ErrorKind::GenericError(
"API thread failed to start".to_string(),
))?;
let api_thread = apis
.start(socket_addr, router, tls_config)
.context(ErrorKind::GenericError(
"API thread failed to start".to_string(),
))?;
api_thread
.join()
.map_err(|e| ErrorKind::GenericError(format!("API thread panicked :{:?}", e)).into())
@ -128,11 +128,11 @@ where
let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Foreign API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread =
apis.start(socket_addr, router, tls_config)
.context(ErrorKind::GenericError(
"API thread failed to start".to_string(),
))?;
let api_thread = apis
.start(socket_addr, router, tls_config)
.context(ErrorKind::GenericError(
"API thread failed to start".to_string(),
))?;
api_thread
.join()
@ -339,20 +339,20 @@ where
Ok(id) => match api.cancel_tx(id) {
Ok(_) => ok(()),
Err(e) => {
error!(LOGGER, "finalize_tx: failed with error: {}", e);
error!(LOGGER, "cancel_tx: failed with error: {}", e);
err(e)
}
},
Err(e) => {
error!(LOGGER, "finalize_tx: could not parse id: {}", e);
error!(LOGGER, "cancel_tx: could not parse id: {}", e);
err(ErrorKind::TransactionCancellationError(
"finalize_tx: cannot cancel transaction. Could not parse id in request.",
"cancel_tx: cannot cancel transaction. Could not parse id in request.",
).into())
}
})
} else {
Box::new(err(ErrorKind::TransactionCancellationError(
"finalize_tx: Cannot cancel transaction. Missing id param in request.",
"cancel_tx: Cannot cancel transaction. Missing id param in request.",
).into()))
}
}

View file

@ -164,6 +164,18 @@ pub enum ErrorKind {
#[fail(display = "Transaction building not completed: {}", _0)]
TransactionBuildingNotCompleted(u32),
/// Invalid BIP-32 Depth
#[fail(display = "Invalid BIP32 Depth (must be 1 or greater)")]
InvalidBIP32Depth,
/// Attempt to add an account that exists
#[fail(display = "Account Label '{}' already exists", _0)]
AccountLabelAlreadyExists(String),
/// Reference unknown account label
#[fail(display = "Unknown Account Label '{}'", _0)]
UnknownAccountLabel(String),
/// Other
#[fail(display = "Generic error: {}", _0)]
GenericError(String),

View file

@ -13,21 +13,19 @@
// limitations under the License.
//! Wallet key management functions
use keychain::{Identifier, Keychain};
use libwallet::error::Error;
use libwallet::types::{WalletBackend, WalletClient};
use keychain::{ChildNumber, ExtKeychain, Identifier, Keychain};
use libwallet::error::{Error, ErrorKind};
use libwallet::types::{AcctPathMapping, WalletBackend, WalletClient};
/// Get next available key in the wallet
pub fn next_available_key<T: ?Sized, C, K>(wallet: &mut T) -> Result<(Identifier, u32), Error>
/// Get next available key in the wallet for a given parent
pub fn next_available_key<T: ?Sized, C, K>(wallet: &mut T) -> Result<Identifier, Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
let root_key_id = wallet.keychain().root_key_id();
let derivation = wallet.next_child(root_key_id.clone())?;
let key_id = wallet.keychain().derive_key_id(derivation)?;
Ok((key_id, derivation))
let child = wallet.next_child()?;
Ok(child)
}
/// Retrieve an existing key from a wallet
@ -45,3 +43,77 @@ where
let derivation = existing.n_child;
Ok((key_id, derivation))
}
/// Returns a list of account to BIP32 path mappings
pub fn accounts<T: ?Sized, C, K>(wallet: &mut T) -> Result<Vec<AcctPathMapping>, Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
Ok(wallet.acct_path_iter().collect())
}
/// Adds an new parent account path with a given label
pub fn new_acct_path<T: ?Sized, C, K>(wallet: &mut T, label: &str) -> Result<Identifier, Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
let label = label.to_owned();
if let Some(_) = wallet.acct_path_iter().find(|l| l.label == label) {
return Err(ErrorKind::AccountLabelAlreadyExists(label.clone()).into());
}
// We're always using paths at m/k/0 for parent keys for output derivations
// so find the highest of those, then increment (to conform with external/internal
// derivation chains in BIP32 spec)
let highest_entry = wallet.acct_path_iter().max_by(|a, b| {
<u32>::from(a.path.to_path().path[0]).cmp(&<u32>::from(b.path.to_path().path[0]))
});
let return_id = {
if let Some(e) = highest_entry {
let mut p = e.path.to_path();
p.path[0] = ChildNumber::from(<u32>::from(p.path[0]) + 1);
p.to_identifier()
} else {
ExtKeychain::derive_key_id(2, 0, 0, 0, 0)
}
};
let save_path = AcctPathMapping {
label: label.to_owned(),
path: return_id.clone(),
};
let mut batch = wallet.batch()?;
batch.save_acct_path(save_path)?;
batch.commit()?;
Ok(return_id)
}
/// Adds/sets a particular account path with a given label
pub fn set_acct_path<T: ?Sized, C, K>(
wallet: &mut T,
label: &str,
path: &Identifier,
) -> Result<(), Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
let label = label.to_owned();
let save_path = AcctPathMapping {
label: label.to_owned(),
path: path.clone(),
};
let mut batch = wallet.batch()?;
batch.save_acct_path(save_path)?;
batch.commit()?;
Ok(())
}

View file

@ -14,10 +14,12 @@
//! Functions to restore a wallet's outputs from just the master seed
use core::global;
use keychain::{Identifier, Keychain};
use keychain::{ExtKeychain, Identifier, Keychain};
use libtx::proof;
use libwallet::internal::keys;
use libwallet::types::*;
use libwallet::Error;
use std::collections::HashMap;
use util::secp::{key::SecretKey, pedersen};
use util::LOGGER;
@ -26,9 +28,9 @@ struct OutputResult {
///
pub commit: pedersen::Commitment,
///
pub key_id: Option<Identifier>,
pub key_id: Identifier,
///
pub n_child: Option<u32>,
pub n_child: u32,
///
pub value: u64,
///
@ -74,15 +76,19 @@ where
);
let lock_height = if *is_coinbase {
*height + global::coinbase_maturity(*height) // ignores on/off spendability around soft fork height
*height + global::coinbase_maturity()
} else {
*height
};
// TODO: Output paths are always going to be length 3 for now, but easy enough to grind
// through to find the right path if required later
let key_id = Identifier::from_serialized_path(3u8, &info.message.as_bytes());
wallet_outputs.push(OutputResult {
commit: *commit,
key_id: None,
n_child: None,
key_id: key_id.clone(),
n_child: key_id.to_path().last_path_index(),
value: info.value,
height: *height,
lock_height: lock_height,
@ -93,58 +99,6 @@ where
Ok(wallet_outputs)
}
/// Attempts to populate a list of outputs with their
/// correct child indices based on the root key
fn populate_child_indices<T, C, K>(
wallet: &mut T,
outputs: &mut Vec<OutputResult>,
max_derivations: u32,
) -> Result<(), Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
info!(
LOGGER,
"Attempting to populate child indices and key identifiers for {} identified outputs",
outputs.len()
);
// keep track of child keys we've already found, and avoid some EC ops
let mut found_child_indices: Vec<u32> = vec![];
for output in outputs.iter_mut() {
let mut found = false;
for i in 1..max_derivations {
// seems to be a bug allowing multiple child keys at the moment
/*if found_child_indices.contains(&i){
continue;
}*/
let key_id = wallet.keychain().derive_key_id(i as u32)?;
let b = wallet.keychain().derived_key(&key_id)?;
if output.blinding != b {
continue;
}
found = true;
found_child_indices.push(i);
info!(
LOGGER,
"Key index {} found for output {:?}", i, output.commit
);
output.key_id = Some(key_id);
output.n_child = Some(i);
break;
}
if !found {
warn!(
LOGGER,
"Unable to find child key index for: {:?}", output.commit,
);
}
}
Ok(())
}
/// Restore a wallet
pub fn restore<T, C, K>(wallet: &mut T) -> Result<(), Error>
where
@ -152,8 +106,6 @@ where
C: WalletClient,
K: Keychain,
{
let max_derivations = 1_000_000;
// Don't proceed if wallet_data has anything in it
let is_empty = wallet.iter().next().is_none();
if !is_empty {
@ -195,29 +147,34 @@ where
result_vec.len(),
);
populate_child_indices(wallet, &mut result_vec, max_derivations)?;
let mut found_parents: HashMap<Identifier, u32> = HashMap::new();
// Now save what we have
let root_key_id = wallet.keychain().root_key_id();
let current_chain_height = wallet.client().get_chain_height()?;
let mut batch = wallet.batch()?;
let mut max_child_index = 0;
for output in result_vec {
if output.key_id.is_some() && output.n_child.is_some() {
{
let mut batch = wallet.batch()?;
for output in result_vec {
let parent_key_id = output.key_id.parent_path();
if !found_parents.contains_key(&parent_key_id) {
found_parents.insert(parent_key_id.clone(), 0);
}
let log_id = batch.next_tx_log_id(&parent_key_id)?;
let mut tx_log_entry = None;
// wallet update will create tx log entries when it finds confirmed coinbase
// transactions
if !output.is_coinbase {
let log_id = batch.next_tx_log_id(root_key_id.clone())?;
// also keep tx log updated so everything still tallies
let mut t = TxLogEntry::new(TxLogEntryType::TxReceived, log_id);
let mut t =
TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxReceived, log_id);
t.amount_credited = output.value;
t.num_outputs = 1;
tx_log_entry = Some(log_id);
let _ = batch.save_tx_log_entry(t);
batch.save_tx_log_entry(t, &parent_key_id)?;
}
let _ = batch.save(OutputData {
root_key_id: root_key_id.clone(),
key_id: output.key_id.unwrap(),
n_child: output.n_child.unwrap(),
root_key_id: parent_key_id.clone(),
key_id: output.key_id,
n_child: output.n_child,
value: output.value,
status: OutputStatus::Unconfirmed,
height: output.height,
@ -226,28 +183,28 @@ where
tx_log_entry: tx_log_entry,
});
max_child_index = if max_child_index >= output.n_child.unwrap() {
max_child_index
} else {
output.n_child.unwrap()
let max_child_index = found_parents.get(&parent_key_id).unwrap().clone();
if output.n_child >= max_child_index {
found_parents.insert(parent_key_id.clone(), output.n_child);
};
} else {
warn!(
LOGGER,
"Commit {:?} identified but unable to recover key. Output has not been restored.",
output.commit
);
}
batch.commit()?;
}
// restore labels, account paths and child derivation indices
let label_base = "account";
let mut index = 1;
for (path, max_child_index) in found_parents.iter() {
if *path == ExtKeychain::derive_key_id(2, 0, 0, 0, 0) {
//default path already exists
continue;
}
let label = format!("{}_{}", label_base, index);
keys::set_acct_path(wallet, &label, path)?;
index = index + 1;
{
let mut batch = wallet.batch()?;
batch.save_child_index(path, max_child_index + 1)?;
}
}
if max_child_index > 0 {
let details = WalletDetails {
last_child_index: max_child_index + 1,
last_confirmed_height: current_chain_height,
};
batch.save_details(root_key_id.clone(), details)?;
}
batch.commit()?;
Ok(())
}

View file

@ -37,6 +37,7 @@ pub fn build_send_tx_slate<T: ?Sized, C, K>(
max_outputs: usize,
change_outputs: usize,
selection_strategy_is_use_all: bool,
parent_key_id: Identifier,
) -> Result<
(
Slate,
@ -59,6 +60,7 @@ where
max_outputs,
change_outputs,
selection_strategy_is_use_all,
&parent_key_id,
)?;
// Create public slate
@ -85,22 +87,19 @@ where
}
// Store change output(s)
for (_, derivation) in &change_amounts_derivations {
let change_id = keychain.derive_key_id(derivation.clone()).unwrap();
context.add_output(&change_id);
for (_, id) in &change_amounts_derivations {
context.add_output(&id);
}
let lock_inputs = context.get_inputs().clone();
let _lock_outputs = context.get_outputs().clone();
let root_key_id = keychain.root_key_id();
// Return a closure to acquire wallet lock and lock the coins being spent
// so we avoid accidental double spend attempt.
let update_sender_wallet_fn = move |wallet: &mut T, tx_hex: &str| {
let mut batch = wallet.batch()?;
let log_id = batch.next_tx_log_id(root_key_id.clone())?;
let mut t = TxLogEntry::new(TxLogEntryType::TxSent, log_id);
let log_id = batch.next_tx_log_id(&parent_key_id)?;
let mut t = TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxSent, log_id);
t.tx_slate_id = Some(slate_id);
t.fee = Some(fee);
t.tx_hex = Some(tx_hex.to_owned());
@ -116,14 +115,13 @@ where
t.amount_debited = amount_debited;
// write the output representing our change
for (change_amount, change_derivation) in &change_amounts_derivations {
let change_id = keychain.derive_key_id(change_derivation.clone()).unwrap();
for (change_amount, id) in &change_amounts_derivations {
t.num_outputs += 1;
t.amount_credited += change_amount;
batch.save(OutputData {
root_key_id: root_key_id.clone(),
key_id: change_id.clone(),
n_child: change_derivation.clone(),
root_key_id: parent_key_id.clone(),
key_id: id.clone(),
n_child: id.to_path().last_path_index(),
value: change_amount.clone(),
status: OutputStatus::Unconfirmed,
height: current_height,
@ -132,7 +130,7 @@ where
tx_log_entry: Some(log_id),
})?;
}
batch.save_tx_log_entry(t)?;
batch.save_tx_log_entry(t, &parent_key_id)?;
batch.commit()?;
Ok(())
};
@ -147,6 +145,7 @@ where
pub fn build_recipient_output_with_slate<T: ?Sized, C, K>(
wallet: &mut T,
slate: &mut Slate,
parent_key_id: Identifier,
) -> Result<
(
Identifier,
@ -161,10 +160,9 @@ where
K: Keychain,
{
// Create a potential output for this transaction
let (key_id, derivation) = keys::next_available_key(wallet).unwrap();
let key_id = keys::next_available_key(wallet).unwrap();
let keychain = wallet.keychain().clone();
let root_key_id = keychain.root_key_id();
let key_id_inner = key_id.clone();
let amount = slate.amount;
let height = slate.height;
@ -187,15 +185,15 @@ where
// (up to the caller to decide when to do)
let wallet_add_fn = move |wallet: &mut T| {
let mut batch = wallet.batch()?;
let log_id = batch.next_tx_log_id(root_key_id.clone())?;
let mut t = TxLogEntry::new(TxLogEntryType::TxReceived, log_id);
let log_id = batch.next_tx_log_id(&parent_key_id)?;
let mut t = TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxReceived, log_id);
t.tx_slate_id = Some(slate_id);
t.amount_credited = amount;
t.num_outputs = 1;
batch.save(OutputData {
root_key_id: root_key_id,
key_id: key_id_inner,
n_child: derivation,
root_key_id: parent_key_id.clone(),
key_id: key_id_inner.clone(),
n_child: key_id_inner.to_path().last_path_index(),
value: amount,
status: OutputStatus::Unconfirmed,
height: height,
@ -203,7 +201,7 @@ where
is_coinbase: false,
tx_log_entry: Some(log_id),
})?;
batch.save_tx_log_entry(t)?;
batch.save_tx_log_entry(t, &parent_key_id)?;
batch.commit()?;
Ok(())
};
@ -222,13 +220,14 @@ pub fn select_send_tx<T: ?Sized, C, K>(
max_outputs: usize,
change_outputs: usize,
selection_strategy_is_use_all: bool,
parent_key_id: &Identifier,
) -> Result<
(
Vec<Box<build::Append<K>>>,
Vec<OutputData>,
Vec<(u64, u32)>, // change amounts and derivations
u64, // amount
u64, // fee
Vec<(u64, Identifier)>, // change amounts and derivations
u64, // amount
u64, // fee
),
Error,
>
@ -245,6 +244,7 @@ where
minimum_confirmations,
max_outputs,
selection_strategy_is_use_all,
parent_key_id,
);
// sender is responsible for setting the fee on the partial tx
@ -300,6 +300,7 @@ where
minimum_confirmations,
max_outputs,
selection_strategy_is_use_all,
parent_key_id,
);
fee = tx_fee(coins.len(), num_outputs, 1, None);
total = coins.iter().map(|c| c.value).sum();
@ -325,7 +326,7 @@ pub fn inputs_and_change<T: ?Sized, C, K>(
amount: u64,
fee: u64,
num_change_outputs: usize,
) -> Result<(Vec<Box<build::Append<K>>>, Vec<(u64, u32)>), Error>
) -> Result<(Vec<Box<build::Append<K>>>, Vec<(u64, Identifier)>), Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
@ -345,11 +346,10 @@ where
// build inputs using the appropriate derived key_ids
for coin in coins {
let key_id = wallet.keychain().derive_key_id(coin.n_child)?;
if coin.is_coinbase {
parts.push(build::coinbase_input(coin.value, key_id));
parts.push(build::coinbase_input(coin.value, coin.key_id.clone()));
} else {
parts.push(build::input(coin.value, key_id));
parts.push(build::input(coin.value, coin.key_id.clone()));
}
}
@ -377,12 +377,9 @@ where
part_change
};
let keychain = wallet.keychain().clone();
let root_key_id = keychain.root_key_id();
let change_derivation = wallet.next_child(root_key_id.clone()).unwrap();
let change_key = keychain.derive_key_id(change_derivation).unwrap();
let change_key = wallet.next_child().unwrap();
change_amounts_derivations.push((change_amount, change_derivation));
change_amounts_derivations.push((change_amount, change_key.clone()));
parts.push(build::output(change_amount, change_key));
}
}
@ -404,6 +401,7 @@ pub fn select_coins<T: ?Sized, C, K>(
minimum_confirmations: u64,
max_outputs: usize,
select_all: bool,
parent_key_id: &Identifier,
) -> (usize, Vec<OutputData>)
// max_outputs_available, Outputs
where
@ -412,14 +410,12 @@ where
K: Keychain,
{
// first find all eligible outputs based on number of confirmations
let root_key_id = wallet.keychain().root_key_id();
let mut eligible = wallet
.iter()
.filter(|out| {
out.root_key_id == root_key_id
out.root_key_id == *parent_key_id
&& out.eligible_to_spend(current_height, minimum_confirmations)
})
.collect::<Vec<OutputData>>();
}).collect::<Vec<OutputData>>();
let max_available = eligible.len();
@ -482,8 +478,7 @@ fn select_from(amount: u64, select_all: bool, outputs: Vec<OutputData>) -> Optio
let res = selected_amount < amount;
selected_amount += out.value;
res
})
.cloned()
}).cloned()
.collect(),
);
}

View file

@ -28,7 +28,11 @@ use util::LOGGER;
/// Receive a transaction, modifying the slate accordingly (which can then be
/// sent back to sender for posting)
pub fn receive_tx<T: ?Sized, C, K>(wallet: &mut T, slate: &mut Slate) -> Result<(), Error>
pub fn receive_tx<T: ?Sized, C, K>(
wallet: &mut T,
slate: &mut Slate,
parent_key_id: &Identifier,
) -> Result<(), Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
@ -36,7 +40,7 @@ where
{
// create an output using the amount in the slate
let (_, mut context, receiver_create_fn) =
selection::build_recipient_output_with_slate(wallet, slate)?;
selection::build_recipient_output_with_slate(wallet, slate, parent_key_id.clone())?;
// fill public keys
let _ = slate.fill_round_1(
@ -64,6 +68,7 @@ pub fn create_send_tx<T: ?Sized, C, K>(
max_outputs: usize,
num_change_outputs: usize,
selection_strategy_is_use_all: bool,
parent_key_id: &Identifier,
) -> Result<
(
Slate,
@ -80,7 +85,7 @@ where
// Get lock height
let current_height = wallet.client().get_chain_height()?;
// ensure outputs we're selecting are up to date
updater::refresh_outputs(wallet)?;
updater::refresh_outputs(wallet, parent_key_id)?;
let lock_height = current_height;
@ -101,6 +106,7 @@ where
max_outputs,
num_change_outputs,
selection_strategy_is_use_all,
parent_key_id.clone(),
)?;
// Generate a kernel offset and subtract from our context's secret key. Store
@ -137,13 +143,17 @@ where
}
/// Rollback outputs associated with a transaction in the wallet
pub fn cancel_tx<T: ?Sized, C, K>(wallet: &mut T, tx_id: u32) -> Result<(), Error>
pub fn cancel_tx<T: ?Sized, C, K>(
wallet: &mut T,
parent_key_id: &Identifier,
tx_id: u32,
) -> Result<(), Error>
where
T: WalletBackend<C, K>,
C: WalletClient,
K: Keychain,
{
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id))?;
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), &parent_key_id)?;
if tx_vec.len() != 1 {
return Err(ErrorKind::TransactionDoesntExist(tx_id))?;
}
@ -155,9 +165,9 @@ where
return Err(ErrorKind::TransactionNotCancellable(tx_id))?;
}
// get outputs associated with tx
let res = updater::retrieve_outputs(wallet, false, Some(tx_id))?;
let res = updater::retrieve_outputs(wallet, false, Some(tx_id), &parent_key_id)?;
let outputs = res.iter().map(|(out, _)| out).cloned().collect();
updater::cancel_tx_and_outputs(wallet, tx, outputs)?;
updater::cancel_tx_and_outputs(wallet, tx, outputs, parent_key_id)?;
Ok(())
}
@ -165,6 +175,7 @@ where
/// as well as whether it's been confirmed
pub fn retrieve_tx_hex<T: ?Sized, C, K>(
wallet: &mut T,
parent_key_id: &Identifier,
tx_id: u32,
) -> Result<(bool, Option<String>), Error>
where
@ -172,7 +183,7 @@ where
C: WalletClient,
K: Keychain,
{
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id))?;
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), parent_key_id)?;
if tx_vec.len() != 1 {
return Err(ErrorKind::TransactionDoesntExist(tx_id))?;
}
@ -186,6 +197,7 @@ pub fn issue_burn_tx<T: ?Sized, C, K>(
amount: u64,
minimum_confirmations: u64,
max_outputs: usize,
parent_key_id: &Identifier,
) -> Result<Transaction, Error>
where
T: WalletBackend<C, K>,
@ -199,7 +211,7 @@ where
let current_height = wallet.client().get_chain_height()?;
let _ = updater::refresh_outputs(wallet);
let _ = updater::refresh_outputs(wallet, parent_key_id);
// select some spendable coins from the wallet
let (_, coins) = selection::select_coins(
@ -209,6 +221,7 @@ where
minimum_confirmations,
max_outputs,
false,
parent_key_id,
);
debug!(LOGGER, "selected some coins - {}", coins.len());
@ -232,7 +245,7 @@ where
#[cfg(test)]
mod test {
use keychain::{ExtKeychain, Keychain};
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use libtx::build;
#[test]
@ -240,7 +253,7 @@ mod test {
// based on the public key and amount begin spent
fn output_commitment_equals_input_commitment_on_spend() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let tx1 = build::transaction(vec![build::output(105, key_id1.clone())], &keychain).unwrap();
let tx2 = build::transaction(vec![build::input(105, key_id1.clone())], &keychain).unwrap();

Some files were not shown because too many files have changed in this diff Show more