From 5c6abd5e22e7ca0fdbe629cb09a646833b4ebeb2 Mon Sep 17 00:00:00 2001 From: Antioch Peverell Date: Mon, 14 Jan 2019 13:29:12 +0000 Subject: [PATCH 1/7] 1440 confs (#2376) --- doc/coinbase_maturity.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/coinbase_maturity.md b/doc/coinbase_maturity.md index b894a59d3..b53c8cb70 100644 --- a/doc/coinbase_maturity.md +++ b/doc/coinbase_maturity.md @@ -1,8 +1,8 @@ # The Coinbase Maturity Rule (aka Output Lock Heights) -Coinbase outputs (block rewards & fees) are "locked" and require 1,000 confirmations (blocks added to the current chain) before they mature sufficiently to be spendable. This is to reduce the risk of later txs being reversed if a chain reorganization occurs. +Coinbase outputs (block rewards & fees) are "locked" and require 1,440 confirmations (i.e 24 hours worth of blocks added to the chain) before they mature sufficiently to be spendable. This is to reduce the risk of later txs being reversed if a chain reorganization occurs. -Bitcoin does something very similar, requiring 100 confirmations (Bitcoin blocks are every 10 minutes, Grin blocks every 60 seconds) before mining rewards can be spent. +Bitcoin does something very similar, requiring 100 confirmations (Bitcoin blocks are every 10 minutes, Grin blocks are every 60 seconds) before mining rewards can be spent. Grin enforces coinbase maturity in both the transaction pool and the block validation pipeline. A transaction containing an input spending a coinbase output cannot be added to the transaction pool until it has sufficiently matured (based on current chain height and the height of the block producing the coinbase output). Similarly a block is invalid if it contains an input spending a coinbase output before it has sufficiently matured, based on the height of the block containing the input and the height of the block that originally produced the coinbase output. From 9a497f143939ac475e6c45d478bb1d49a9c53268 Mon Sep 17 00:00:00 2001 From: Yeastplume Date: Mon, 14 Jan 2019 15:43:10 +0000 Subject: [PATCH 2/7] Wallet Performance enhancements - cache commit + transaction fetching logic (#2375) * add optional cached commit to output data, to speed up queries * rustfmt * clean up transaction retrieval logic * rustfmt * small logic error * rename cache method * rustfmt --- config/src/comments.rs | 9 +++ wallet/src/libwallet/api.rs | 4 +- wallet/src/libwallet/internal/restore.rs | 3 + wallet/src/libwallet/internal/selection.rs | 16 ++++- wallet/src/libwallet/internal/tx.rs | 6 +- wallet/src/libwallet/internal/updater.rs | 74 +++++++++++++--------- wallet/src/libwallet/types.rs | 9 +++ wallet/src/lmdb_wallet.rs | 15 +++++ wallet/src/types.rs | 4 ++ 9 files changed, 104 insertions(+), 36 deletions(-) diff --git a/config/src/comments.rs b/config/src/comments.rs index 5b339d591..67507d812 100644 --- a/config/src/comments.rs +++ b/config/src/comments.rs @@ -411,6 +411,15 @@ fn comments() -> HashMap { "data_file_dir".to_string(), " #where to find wallet files (seed, data, etc) +" + .to_string(), + ); + retval.insert( + "no_commit_cache".to_string(), + " +#If true, don't store calculated commits in the database +#better privacy, but at a performance cost of having to +#re-calculate commits every time they're used " .to_string(), ); diff --git a/wallet/src/libwallet/api.rs b/wallet/src/libwallet/api.rs index 91a7b79d5..9d247a148 100644 --- a/wallet/src/libwallet/api.rs +++ b/wallet/src/libwallet/api.rs @@ -431,7 +431,7 @@ where let res = Ok(( validated, - updater::retrieve_txs(&mut *w, tx_id, tx_slate_id, Some(&parent_key_id))?, + updater::retrieve_txs(&mut *w, tx_id, tx_slate_id, Some(&parent_key_id), false)?, )); w.close()?; @@ -867,7 +867,7 @@ where None => w.parent_key_id(), }; // Don't do this multiple times - let tx = updater::retrieve_txs(&mut *w, None, Some(slate.id), Some(&parent_key_id))?; + let tx = updater::retrieve_txs(&mut *w, None, Some(slate.id), Some(&parent_key_id), false)?; for t in &tx { if t.tx_type == TxLogEntryType::TxReceived { return Err(ErrorKind::TransactionAlreadyReceived(slate.id.to_string()).into()); diff --git a/wallet/src/libwallet/internal/restore.rs b/wallet/src/libwallet/internal/restore.rs index fe25a88ec..af3fe2caa 100644 --- a/wallet/src/libwallet/internal/restore.rs +++ b/wallet/src/libwallet/internal/restore.rs @@ -142,6 +142,7 @@ where C: NodeClient, K: Keychain, { + let commit = wallet.calc_commit_for_cache(output.value, &output.key_id)?; let mut batch = wallet.batch()?; let parent_key_id = output.key_id.parent_path(); @@ -167,6 +168,7 @@ where key_id: output.key_id, n_child: output.n_child, mmr_index: Some(output.mmr_index), + commit: commit, value: output.value, status: OutputStatus::Unspent, height: output.height, @@ -198,6 +200,7 @@ where output.tx_log_entry.clone(), None, Some(&parent_key_id), + false, )?; if entries.len() > 0 { let mut entry = entries[0].clone(); diff --git a/wallet/src/libwallet/internal/selection.rs b/wallet/src/libwallet/internal/selection.rs index a7740b26f..fcc6ed4ca 100644 --- a/wallet/src/libwallet/internal/selection.rs +++ b/wallet/src/libwallet/internal/selection.rs @@ -21,6 +21,8 @@ use crate::libwallet::error::{Error, ErrorKind}; use crate::libwallet::internal::keys; use crate::libwallet::types::*; +use std::collections::HashMap; + /// Initialize a transaction on the sender side, returns a corresponding /// libwallet transaction slate with the appropriate inputs selected, /// and saves the private wallet identifiers of our selected outputs @@ -85,9 +87,15 @@ where context.add_input(&input.key_id, &input.mmr_index); } - // Store change output(s) - for (_, id, mmr_index) in &change_amounts_derivations { + let mut commits: HashMap> = HashMap::new(); + + // Store change output(s) and cached commits + for (change_amount, id, mmr_index) in &change_amounts_derivations { context.add_output(&id, &mmr_index); + commits.insert( + id.clone(), + wallet.calc_commit_for_cache(*change_amount, &id)?, + ); } let lock_inputs = context.get_inputs().clone(); @@ -119,10 +127,12 @@ where for (change_amount, id, _) in &change_amounts_derivations { t.num_outputs += 1; t.amount_credited += change_amount; + let commit = commits.get(&id).unwrap().clone(); batch.save(OutputData { root_key_id: parent_key_id.clone(), key_id: id.clone(), n_child: id.to_path().last_path_index(), + commit: commit, mmr_index: None, value: change_amount.clone(), status: OutputStatus::Unconfirmed, @@ -189,6 +199,7 @@ where // Create closure that adds the output to recipient's wallet // (up to the caller to decide when to do) let wallet_add_fn = move |wallet: &mut T| { + let commit = wallet.calc_commit_for_cache(amount, &key_id_inner)?; let mut batch = wallet.batch()?; let log_id = batch.next_tx_log_id(&parent_key_id)?; let mut t = TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxReceived, log_id); @@ -200,6 +211,7 @@ where key_id: key_id_inner.clone(), mmr_index: None, n_child: key_id_inner.to_path().last_path_index(), + commit: commit, value: amount, status: OutputStatus::Unconfirmed, height: height, diff --git a/wallet/src/libwallet/internal/tx.rs b/wallet/src/libwallet/internal/tx.rs index a18d6b0a1..c68dd3084 100644 --- a/wallet/src/libwallet/internal/tx.rs +++ b/wallet/src/libwallet/internal/tx.rs @@ -161,7 +161,7 @@ where } else if let Some(tx_slate_id) = tx_slate_id { tx_id_string = tx_slate_id.to_string(); } - let tx_vec = updater::retrieve_txs(wallet, tx_id, tx_slate_id, Some(&parent_key_id))?; + let tx_vec = updater::retrieve_txs(wallet, tx_id, tx_slate_id, Some(&parent_key_id), false)?; if tx_vec.len() != 1 { return Err(ErrorKind::TransactionDoesntExist(tx_id_string))?; } @@ -191,7 +191,7 @@ where C: NodeClient, K: Keychain, { - let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), None, Some(parent_key_id))?; + let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), None, Some(parent_key_id), false)?; if tx_vec.len() != 1 { return Err(ErrorKind::TransactionDoesntExist(tx_id.to_string()))?; } @@ -207,7 +207,7 @@ where K: Keychain, { // finalize command - let tx_vec = updater::retrieve_txs(wallet, None, Some(slate.id), None)?; + let tx_vec = updater::retrieve_txs(wallet, None, Some(slate.id), None, false)?; let mut tx = None; // don't want to assume this is the right tx, in case of self-sending for t in tx_vec { diff --git a/wallet/src/libwallet/internal/updater.rs b/wallet/src/libwallet/internal/updater.rs index 3b4fd185f..678fa9632 100644 --- a/wallet/src/libwallet/internal/updater.rs +++ b/wallet/src/libwallet/internal/updater.rs @@ -74,7 +74,10 @@ where let res = outputs .into_iter() .map(|out| { - let commit = keychain.commit(out.value, &out.key_id).unwrap(); + let commit = match out.commit.clone() { + Some(c) => pedersen::Commitment::from_vec(util::from_hex(c).unwrap()), + None => keychain.commit(out.value, &out.key_id).unwrap(), + }; (out, commit) }) .collect(); @@ -88,30 +91,39 @@ pub fn retrieve_txs( tx_id: Option, tx_slate_id: Option, parent_key_id: Option<&Identifier>, + outstanding_only: bool, ) -> Result, Error> where T: WalletBackend, C: NodeClient, K: Keychain, { - // just read the wallet here, no need for a write lock - let mut txs = if let Some(id) = tx_id { - wallet.tx_log_iter().filter(|t| t.id == id).collect() - } else if tx_slate_id.is_some() { - wallet - .tx_log_iter() - .filter(|t| t.tx_slate_id == tx_slate_id) - .collect() - } else { - wallet.tx_log_iter().collect::>() - }; - if let Some(k) = parent_key_id { - txs = txs - .iter() - .filter(|t| t.parent_key_id == *k) - .map(|t| t.clone()) - .collect(); - } + let mut txs: Vec = wallet + .tx_log_iter() + .filter(|tx_entry| { + let f_pk = match parent_key_id { + Some(k) => tx_entry.parent_key_id == *k, + None => true, + }; + let f_tx_id = match tx_id { + Some(i) => tx_entry.id == i, + None => true, + }; + let f_txs = match tx_slate_id { + Some(t) => tx_entry.tx_slate_id == Some(t), + None => true, + }; + let f_outstanding = match outstanding_only { + true => { + !tx_entry.confirmed + && (tx_entry.tx_type == TxLogEntryType::TxReceived + || tx_entry.tx_type == TxLogEntryType::TxSent) + } + false => true, + }; + f_pk && f_tx_id && f_txs && f_outstanding + }) + .collect(); txs.sort_by_key(|tx| tx.creation_ts); Ok(txs) } @@ -153,20 +165,18 @@ where .filter(|x| x.root_key_id == *parent_key_id && x.status != OutputStatus::Spent) .collect(); + let tx_entries = retrieve_txs(wallet, None, None, Some(&parent_key_id), true)?; + // Only select outputs that are actually involved in an outstanding transaction let unspents: Vec = match update_all { false => unspents .into_iter() .filter(|x| match x.tx_log_entry.as_ref() { Some(t) => { - let entries = retrieve_txs(wallet, Some(*t), None, Some(&parent_key_id)); - match entries { - Err(_) => true, - Ok(e) => { - e.len() > 0 - && !e[0].confirmed && (e[0].tx_type == TxLogEntryType::TxReceived - || e[0].tx_type == TxLogEntryType::TxSent) - } + if let Some(_) = tx_entries.iter().find(|&te| te.id == *t) { + true + } else { + false } } None => true, @@ -176,7 +186,10 @@ where }; for out in unspents { - let commit = keychain.commit(out.value, &out.key_id)?; + let commit = match out.commit.clone() { + Some(c) => pedersen::Commitment::from_vec(util::from_hex(c).unwrap()), + None => keychain.commit(out.value, &out.key_id).unwrap(), + }; wallet_outputs.insert(commit, (out.key_id.clone(), out.mmr_index)); } Ok(wallet_outputs) @@ -466,13 +479,16 @@ where { // Now acquire the wallet lock and write the new output. + let amount = reward(block_fees.fees); + let commit = wallet.calc_commit_for_cache(amount, &key_id)?; let mut batch = wallet.batch()?; batch.save(OutputData { root_key_id: parent_key_id, key_id: key_id.clone(), n_child: key_id.to_path().last_path_index(), mmr_index: None, - value: reward(block_fees.fees), + commit: commit, + value: amount, status: OutputStatus::Unconfirmed, height: height, lock_height: lock_height, diff --git a/wallet/src/libwallet/types.rs b/wallet/src/libwallet/types.rs index a1337e2c2..cf2a35278 100644 --- a/wallet/src/libwallet/types.rs +++ b/wallet/src/libwallet/types.rs @@ -67,6 +67,13 @@ where /// Return the client being used to communicate with the node fn w2n_client(&mut self) -> &mut C; + /// return the commit for caching if allowed, none otherwise + fn calc_commit_for_cache( + &mut self, + amount: u64, + id: &Identifier, + ) -> Result, Error>; + /// Set parent key id by stored account name fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error>; @@ -241,6 +248,8 @@ pub struct OutputData { pub key_id: Identifier, /// How many derivations down from the root key pub n_child: u32, + /// The actual commit, optionally stored + pub commit: Option, /// PMMR Index, used on restore in case of duplicate wallets using the same /// key_id (2 wallets using same seed, for instance pub mmr_index: Option, diff --git a/wallet/src/lmdb_wallet.rs b/wallet/src/lmdb_wallet.rs index 5f60cc909..b58368377 100644 --- a/wallet/src/lmdb_wallet.rs +++ b/wallet/src/lmdb_wallet.rs @@ -199,6 +199,21 @@ where &mut self.w2n_client } + /// return the version of the commit for caching + fn calc_commit_for_cache( + &mut self, + amount: u64, + id: &Identifier, + ) -> Result, Error> { + if self.config.no_commit_cache == Some(true) { + Ok(None) + } else { + Ok(Some(util::to_hex( + self.keychain().commit(amount, &id)?.0.to_vec(), + ))) + } + } + /// Set parent path by account name fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error> { let label = label.to_owned(); diff --git a/wallet/src/types.rs b/wallet/src/types.rs index 65ca4c7a2..f47add9d4 100644 --- a/wallet/src/types.rs +++ b/wallet/src/types.rs @@ -52,6 +52,9 @@ pub struct WalletConfig { pub owner_api_include_foreign: Option, // The directory in which wallet files are stored pub data_file_dir: String, + /// If Some(true), don't cache commits alongside output data + /// speed improvement, but your commits are in the database + pub no_commit_cache: Option, /// TLS certificate file pub tls_certificate_file: Option, /// TLS certificate private key file @@ -74,6 +77,7 @@ impl Default for WalletConfig { check_node_api_http_addr: "http://127.0.0.1:3413".to_string(), owner_api_include_foreign: Some(false), data_file_dir: ".".to_string(), + no_commit_cache: Some(false), tls_certificate_file: None, tls_certificate_key: None, dark_background_color_scheme: Some(true), From 5d257283bdc7858bdc39c167e739386de37916ae Mon Sep 17 00:00:00 2001 From: Yeastplume Date: Mon, 14 Jan 2019 16:49:34 +0000 Subject: [PATCH 3/7] Only create 1 received tx log entry on restore (#2378) * restore only creates 1 received tx * restore child index sanity output --- wallet/src/libwallet/internal/restore.rs | 125 ++++++++++++++++------- 1 file changed, 88 insertions(+), 37 deletions(-) diff --git a/wallet/src/libwallet/internal/restore.rs b/wallet/src/libwallet/internal/restore.rs index af3fe2caa..0dde49790 100644 --- a/wallet/src/libwallet/internal/restore.rs +++ b/wallet/src/libwallet/internal/restore.rs @@ -45,6 +45,18 @@ struct OutputResult { pub blinding: SecretKey, } +#[derive(Debug, Clone)] +/// Collect stats in case we want to just output a single tx log entry +/// for restored non-coinbase outputs +struct RestoredTxStats { + /// + pub log_id: u32, + /// + pub amount_credited: u64, + /// + pub num_outputs: usize, +} + fn identify_utxo_outputs( wallet: &mut T, outputs: Vec<(pedersen::Commitment, pedersen::RangeProof, bool, u64, u64)>, @@ -136,6 +148,7 @@ fn restore_missing_output( wallet: &mut T, output: OutputResult, found_parents: &mut HashMap, + tx_stats: &mut Option<&mut HashMap>, ) -> Result<(), Error> where T: WalletBackend, @@ -148,21 +161,48 @@ where let parent_key_id = output.key_id.parent_path(); if !found_parents.contains_key(&parent_key_id) { found_parents.insert(parent_key_id.clone(), 0); + if let Some(ref mut s) = tx_stats { + s.insert( + parent_key_id.clone(), + RestoredTxStats { + log_id: batch.next_tx_log_id(&parent_key_id)?, + amount_credited: 0, + num_outputs: 0, + }, + ); + } } - let log_id = batch.next_tx_log_id(&parent_key_id)?; - let entry_type = match output.is_coinbase { - true => TxLogEntryType::ConfirmedCoinbase, - false => TxLogEntryType::TxReceived, + let log_id = if tx_stats.is_none() || output.is_coinbase { + let log_id = batch.next_tx_log_id(&parent_key_id)?; + let entry_type = match output.is_coinbase { + true => TxLogEntryType::ConfirmedCoinbase, + false => TxLogEntryType::TxReceived, + }; + let mut t = TxLogEntry::new(parent_key_id.clone(), entry_type, log_id); + t.confirmed = true; + t.amount_credited = output.value; + t.num_outputs = 1; + t.update_confirmation_ts(); + batch.save_tx_log_entry(t, &parent_key_id)?; + log_id + } else { + if let Some(ref mut s) = tx_stats { + let ts = s.get(&parent_key_id).unwrap().clone(); + s.insert( + parent_key_id.clone(), + RestoredTxStats { + log_id: ts.log_id, + amount_credited: ts.amount_credited + output.value, + num_outputs: ts.num_outputs + 1, + }, + ); + ts.log_id + } else { + 0 + } }; - let mut t = TxLogEntry::new(parent_key_id.clone(), entry_type, log_id); - t.confirmed = true; - t.amount_credited = output.value; - t.num_outputs = 1; - t.update_confirmation_ts(); - batch.save_tx_log_entry(t, &parent_key_id)?; - let _ = batch.save(OutputData { root_key_id: parent_key_id.clone(), key_id: output.key_id, @@ -292,7 +332,7 @@ where Restoring.", m.value, m.key_id, m.commit, ); - restore_missing_output(wallet, m, &mut found_parents)?; + restore_missing_output(wallet, m, &mut found_parents, &mut None)?; } // Unlock locked outputs @@ -330,24 +370,19 @@ where // restore labels, account paths and child derivation indices let label_base = "account"; - let mut index = 1; + let mut acct_index = 1; for (path, max_child_index) in found_parents.iter() { - if *path == ExtKeychain::derive_key_id(2, 0, 0, 0, 0) { - //default path already exists - continue; - } - let res = wallet.acct_path_iter().find(|e| e.path == *path); - if let None = res { - let label = format!("{}_{}", label_base, index); + // default path already exists + if *path != ExtKeychain::derive_key_id(2, 0, 0, 0, 0) { + let label = format!("{}_{}", label_base, acct_index); keys::set_acct_path(wallet, &label, path)?; - index = index + 1; - } - { - let mut batch = wallet.batch()?; - batch.save_child_index(path, max_child_index + 1)?; + acct_index += 1; } + let mut batch = wallet.batch()?; + debug!("Next child for account {} is {}", path, max_child_index + 1); + batch.save_child_index(path, max_child_index + 1)?; + batch.commit()?; } - Ok(()) } @@ -375,27 +410,43 @@ where ); let mut found_parents: HashMap = HashMap::new(); - // Now save what we have + let mut restore_stats = HashMap::new(); + // Now save what we have for output in result_vec { - restore_missing_output(wallet, output, &mut found_parents)?; + restore_missing_output( + wallet, + output, + &mut found_parents, + &mut Some(&mut restore_stats), + )?; } // restore labels, account paths and child derivation indices let label_base = "account"; - let mut index = 1; + let mut acct_index = 1; for (path, max_child_index) in found_parents.iter() { - if *path == ExtKeychain::derive_key_id(2, 0, 0, 0, 0) { - //default path already exists - continue; + // default path already exists + if *path != ExtKeychain::derive_key_id(2, 0, 0, 0, 0) { + let label = format!("{}_{}", label_base, acct_index); + keys::set_acct_path(wallet, &label, path)?; + acct_index += 1; } - let label = format!("{}_{}", label_base, index); - keys::set_acct_path(wallet, &label, path)?; - index = index + 1; - { + // restore tx log entry for non-coinbase outputs + if let Some(s) = restore_stats.get(path) { let mut batch = wallet.batch()?; - batch.save_child_index(path, max_child_index + 1)?; + let mut t = TxLogEntry::new(path.clone(), TxLogEntryType::TxReceived, s.log_id); + t.confirmed = true; + t.amount_credited = s.amount_credited; + t.num_outputs = s.num_outputs; + t.update_confirmation_ts(); + batch.save_tx_log_entry(t, &path)?; + batch.commit()?; } + let mut batch = wallet.batch()?; + batch.save_child_index(path, max_child_index + 1)?; + debug!("Next child for account {} is {}", path, max_child_index + 1); + batch.commit()?; } Ok(()) } From ba994248ac7f4e4e9e24abb5c654051f81862779 Mon Sep 17 00:00:00 2001 From: Ignotus Peverell Date: Mon, 14 Jan 2019 09:37:34 -0800 Subject: [PATCH 4/7] Prevent reward overflow (#2372) * Prevent reward overflow Without this, a miner could cause a crash by including a kernel with an insane fee directly in the block. * Plus and minus, not so similar * Can't be trusted with more code today --- core/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 70d5d8c3b..a6d0ef841 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -43,7 +43,7 @@ pub const REWARD: u64 = BLOCK_TIME_SEC * GRIN_BASE; /// Actual block reward for a given total fee amount pub fn reward(fee: u64) -> u64 { - REWARD + fee + REWARD.saturating_add(fee) } /// Nominal height for standard time intervals, hour is 60 blocks From d7be94fafb99391fe5e86182624081664597f5c2 Mon Sep 17 00:00:00 2001 From: Ignotus Peverell Date: Mon, 14 Jan 2019 10:15:42 -0800 Subject: [PATCH 5/7] More range proof tests (#2373) Trying to cheat with range proofs to make sure it still fails at the block level. Defense in depth, belt and suspenders and all that good stuff. * rustfmt * Revert bad carry-over --- core/tests/block.rs | 108 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 105 insertions(+), 3 deletions(-) diff --git a/core/tests/block.rs b/core/tests/block.rs index e7c11173d..a74246d6a 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -18,7 +18,7 @@ use crate::core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT}; use crate::core::core::block::Error; use crate::core::core::hash::Hashed; use crate::core::core::id::ShortIdentifiable; -use crate::core::core::transaction; +use crate::core::core::transaction::{self, Transaction}; use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use crate::core::core::Committed; use crate::core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures}; @@ -109,12 +109,10 @@ fn block_with_cut_through() { // block should have been automatically compacted (including reward // output) and should still be valid - println!("3"); b.validate(&BlindingFactor::zero(), verifier_cache()) .unwrap(); assert_eq!(b.inputs().len(), 3); assert_eq!(b.outputs().len(), 3); - println!("4"); } #[test] @@ -415,3 +413,107 @@ fn serialize_deserialize_compact_block() { assert_eq!(cb1.header, cb2.header); assert_eq!(cb1.kern_ids(), cb2.kern_ids()); } + +// Duplicate a range proof from a valid output into another of the same amount +#[test] +fn same_amount_outputs_copy_range_proof() { + let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); + let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0); + let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0); + let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); + + let tx = build::transaction( + vec![ + input(7, key_id1), + output(3, key_id2), + output(3, key_id3), + with_fee(1), + ], + &keychain, + ) + .unwrap(); + + // now we reconstruct the transaction, swapping the rangeproofs so they + // have the wrong privkey + let ins = tx.inputs(); + let mut outs = tx.outputs().clone(); + let kernels = tx.kernels(); + outs[0].proof = outs[1].proof; + + let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); + let prev = BlockHeader::default(); + let b = new_block( + vec![&mut Transaction::new( + ins.clone(), + outs.clone(), + kernels.clone(), + )], + &keychain, + &prev, + &key_id, + ); + + // block should have been automatically compacted (including reward + // output) and should still be valid + match b.validate(&BlindingFactor::zero(), verifier_cache()) { + Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {} + _ => panic!("Bad range proof should be invalid"), + } +} + +// Swap a range proof with the right private key but wrong amount +#[test] +fn wrong_amount_range_proof() { + let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); + let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0); + let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0); + let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0); + + let tx1 = build::transaction( + vec![ + input(7, key_id1.clone()), + output(3, key_id2.clone()), + output(3, key_id3.clone()), + with_fee(1), + ], + &keychain, + ) + .unwrap(); + let tx2 = build::transaction( + vec![ + input(7, key_id1), + output(2, key_id2), + output(4, key_id3), + with_fee(1), + ], + &keychain, + ) + .unwrap(); + + // we take the range proofs from tx2 into tx1 and rebuild the transaction + let ins = tx1.inputs(); + let mut outs = tx1.outputs().clone(); + let kernels = tx1.kernels(); + outs[0].proof = tx2.outputs()[0].proof; + outs[1].proof = tx2.outputs()[1].proof; + + let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); + let prev = BlockHeader::default(); + let b = new_block( + vec![&mut Transaction::new( + ins.clone(), + outs.clone(), + kernels.clone(), + )], + &keychain, + &prev, + &key_id, + ); + + // block should have been automatically compacted (including reward + // output) and should still be valid + match b.validate(&BlindingFactor::zero(), verifier_cache()) { + Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {} + _ => panic!("Bad range proof should be invalid"), + } +} From c7bb5eab27a1bd77d6ac619464e4a02efb257cf3 Mon Sep 17 00:00:00 2001 From: Gary Yu Date: Tue, 15 Jan 2019 03:44:35 +0800 Subject: [PATCH 6/7] fix: avoid duplicate connection (#2361) * fix: avoid duplicate connection * ignore the duplicate connecting to same peer within 10 seconds * refactor: use hashmap instead of vector for connecting history * remove the double checking for already connected peer on connect * add previous connecting time to the log * fix a mistake on shrink * move the now() into the inter loop for accurate time * change the minimum allowed inverval time from 10s to 30s --- servers/src/grin/seed.rs | 45 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/servers/src/grin/seed.rs b/servers/src/grin/seed.rs index 1da8184f0..a85662c95 100644 --- a/servers/src/grin/seed.rs +++ b/servers/src/grin/seed.rs @@ -17,9 +17,10 @@ //! configurable with either no peers, a user-defined list or a preset //! list of DNS records (the default). -use chrono::prelude::Utc; +use chrono::prelude::{DateTime, Utc}; use chrono::{Duration, MIN_DATE}; use rand::{thread_rng, Rng}; +use std::collections::HashMap; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::{mpsc, Arc}; use std::{cmp, str, thread, time}; @@ -76,6 +77,8 @@ pub fn connect_and_monitor( let mut prev_ping = Utc::now(); let mut start_attempt = 0; + let mut connecting_history: HashMap> = HashMap::new(); + loop { if stop_state.lock().is_stopped() { break; @@ -98,7 +101,13 @@ pub fn connect_and_monitor( // with exponential backoff if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) { // try to connect to any address sent to the channel - listen_for_addrs(peers.clone(), p2p_server.clone(), capabilities, &rx); + listen_for_addrs( + peers.clone(), + p2p_server.clone(), + capabilities, + &rx, + &mut connecting_history, + ); // monitor additional peers if we need to add more monitor_peers( @@ -297,6 +306,7 @@ fn listen_for_addrs( p2p: Arc, capab: p2p::Capabilities, rx: &mpsc::Receiver, + connecting_history: &mut HashMap>, ) { if peers.peer_count() >= p2p.config.peer_max_count() { // clean the rx messages to avoid accumulating @@ -304,7 +314,24 @@ fn listen_for_addrs( return; } + let connect_min_interval = 30; for addr in rx.try_iter() { + // ignore the duplicate connecting to same peer within 30 seconds + let now = Utc::now(); + if let Some(last_connect_time) = connecting_history.get(&addr) { + if *last_connect_time + Duration::seconds(connect_min_interval) > now { + debug!( + "peer_connect: ignore a duplicate request to {}. previous connecting time: {}", + addr, + last_connect_time.format("%H:%M:%S%.3f").to_string(), + ); + continue; + } else { + *connecting_history.get_mut(&addr).unwrap() = now; + } + } + connecting_history.insert(addr, now); + let peers_c = peers.clone(); let p2p_c = p2p.clone(); let _ = thread::Builder::new() @@ -319,6 +346,20 @@ fn listen_for_addrs( } }); } + + // shrink the connecting history. + // put a threshold here to avoid frequent shrinking in every call + if connecting_history.len() > 100 { + let now = Utc::now(); + let old: Vec<_> = connecting_history + .iter() + .filter(|&(_, t)| *t + Duration::seconds(connect_min_interval) < now) + .map(|(s, _)| s.clone()) + .collect(); + for addr in old { + connecting_history.remove(&addr); + } + } } pub fn dns_seeds() -> Box Vec + Send> { From 34bd35e8fca1ded88dad30169b816fcb4322cfa2 Mon Sep 17 00:00:00 2001 From: Gavin McDermott Date: Mon, 14 Jan 2019 11:53:09 -0800 Subject: [PATCH 7/7] fix: foreign_api handles pre-flight OPTIONS call on POST requests (#2365) --- wallet/src/controller.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wallet/src/controller.rs b/wallet/src/controller.rs index 1277e7450..99e358370 100644 --- a/wallet/src/controller.rs +++ b/wallet/src/controller.rs @@ -627,6 +627,10 @@ where ok(create_error_response(e)) })) } + + fn options(&self, _req: Request) -> ResponseFuture { + Box::new(ok(create_ok_response("{}"))) + } } // Utility to serialize a struct into JSON and produce a sensible Response