mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 11:31:08 +03:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
49612543d4
14 changed files with 347 additions and 81 deletions
|
@ -411,6 +411,15 @@ fn comments() -> HashMap<String, String> {
|
||||||
"data_file_dir".to_string(),
|
"data_file_dir".to_string(),
|
||||||
"
|
"
|
||||||
#where to find wallet files (seed, data, etc)
|
#where to find wallet files (seed, data, etc)
|
||||||
|
"
|
||||||
|
.to_string(),
|
||||||
|
);
|
||||||
|
retval.insert(
|
||||||
|
"no_commit_cache".to_string(),
|
||||||
|
"
|
||||||
|
#If true, don't store calculated commits in the database
|
||||||
|
#better privacy, but at a performance cost of having to
|
||||||
|
#re-calculate commits every time they're used
|
||||||
"
|
"
|
||||||
.to_string(),
|
.to_string(),
|
||||||
);
|
);
|
||||||
|
|
|
@ -43,7 +43,7 @@ pub const REWARD: u64 = BLOCK_TIME_SEC * GRIN_BASE;
|
||||||
|
|
||||||
/// Actual block reward for a given total fee amount
|
/// Actual block reward for a given total fee amount
|
||||||
pub fn reward(fee: u64) -> u64 {
|
pub fn reward(fee: u64) -> u64 {
|
||||||
REWARD + fee
|
REWARD.saturating_add(fee)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Nominal height for standard time intervals, hour is 60 blocks
|
/// Nominal height for standard time intervals, hour is 60 blocks
|
||||||
|
|
|
@ -18,7 +18,7 @@ use crate::core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
|
||||||
use crate::core::core::block::Error;
|
use crate::core::core::block::Error;
|
||||||
use crate::core::core::hash::Hashed;
|
use crate::core::core::hash::Hashed;
|
||||||
use crate::core::core::id::ShortIdentifiable;
|
use crate::core::core::id::ShortIdentifiable;
|
||||||
use crate::core::core::transaction;
|
use crate::core::core::transaction::{self, Transaction};
|
||||||
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||||
use crate::core::core::Committed;
|
use crate::core::core::Committed;
|
||||||
use crate::core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures};
|
use crate::core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures};
|
||||||
|
@ -109,12 +109,10 @@ fn block_with_cut_through() {
|
||||||
|
|
||||||
// block should have been automatically compacted (including reward
|
// block should have been automatically compacted (including reward
|
||||||
// output) and should still be valid
|
// output) and should still be valid
|
||||||
println!("3");
|
|
||||||
b.validate(&BlindingFactor::zero(), verifier_cache())
|
b.validate(&BlindingFactor::zero(), verifier_cache())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(b.inputs().len(), 3);
|
assert_eq!(b.inputs().len(), 3);
|
||||||
assert_eq!(b.outputs().len(), 3);
|
assert_eq!(b.outputs().len(), 3);
|
||||||
println!("4");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -415,3 +413,107 @@ fn serialize_deserialize_compact_block() {
|
||||||
assert_eq!(cb1.header, cb2.header);
|
assert_eq!(cb1.header, cb2.header);
|
||||||
assert_eq!(cb1.kern_ids(), cb2.kern_ids());
|
assert_eq!(cb1.kern_ids(), cb2.kern_ids());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Duplicate a range proof from a valid output into another of the same amount
|
||||||
|
#[test]
|
||||||
|
fn same_amount_outputs_copy_range_proof() {
|
||||||
|
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||||
|
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
|
||||||
|
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
|
||||||
|
|
||||||
|
let tx = build::transaction(
|
||||||
|
vec![
|
||||||
|
input(7, key_id1),
|
||||||
|
output(3, key_id2),
|
||||||
|
output(3, key_id3),
|
||||||
|
with_fee(1),
|
||||||
|
],
|
||||||
|
&keychain,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// now we reconstruct the transaction, swapping the rangeproofs so they
|
||||||
|
// have the wrong privkey
|
||||||
|
let ins = tx.inputs();
|
||||||
|
let mut outs = tx.outputs().clone();
|
||||||
|
let kernels = tx.kernels();
|
||||||
|
outs[0].proof = outs[1].proof;
|
||||||
|
|
||||||
|
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
|
||||||
|
let prev = BlockHeader::default();
|
||||||
|
let b = new_block(
|
||||||
|
vec![&mut Transaction::new(
|
||||||
|
ins.clone(),
|
||||||
|
outs.clone(),
|
||||||
|
kernels.clone(),
|
||||||
|
)],
|
||||||
|
&keychain,
|
||||||
|
&prev,
|
||||||
|
&key_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
// block should have been automatically compacted (including reward
|
||||||
|
// output) and should still be valid
|
||||||
|
match b.validate(&BlindingFactor::zero(), verifier_cache()) {
|
||||||
|
Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {}
|
||||||
|
_ => panic!("Bad range proof should be invalid"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap a range proof with the right private key but wrong amount
|
||||||
|
#[test]
|
||||||
|
fn wrong_amount_range_proof() {
|
||||||
|
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||||
|
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
|
||||||
|
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
|
||||||
|
|
||||||
|
let tx1 = build::transaction(
|
||||||
|
vec![
|
||||||
|
input(7, key_id1.clone()),
|
||||||
|
output(3, key_id2.clone()),
|
||||||
|
output(3, key_id3.clone()),
|
||||||
|
with_fee(1),
|
||||||
|
],
|
||||||
|
&keychain,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let tx2 = build::transaction(
|
||||||
|
vec![
|
||||||
|
input(7, key_id1),
|
||||||
|
output(2, key_id2),
|
||||||
|
output(4, key_id3),
|
||||||
|
with_fee(1),
|
||||||
|
],
|
||||||
|
&keychain,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// we take the range proofs from tx2 into tx1 and rebuild the transaction
|
||||||
|
let ins = tx1.inputs();
|
||||||
|
let mut outs = tx1.outputs().clone();
|
||||||
|
let kernels = tx1.kernels();
|
||||||
|
outs[0].proof = tx2.outputs()[0].proof;
|
||||||
|
outs[1].proof = tx2.outputs()[1].proof;
|
||||||
|
|
||||||
|
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
|
||||||
|
let prev = BlockHeader::default();
|
||||||
|
let b = new_block(
|
||||||
|
vec![&mut Transaction::new(
|
||||||
|
ins.clone(),
|
||||||
|
outs.clone(),
|
||||||
|
kernels.clone(),
|
||||||
|
)],
|
||||||
|
&keychain,
|
||||||
|
&prev,
|
||||||
|
&key_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
// block should have been automatically compacted (including reward
|
||||||
|
// output) and should still be valid
|
||||||
|
match b.validate(&BlindingFactor::zero(), verifier_cache()) {
|
||||||
|
Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {}
|
||||||
|
_ => panic!("Bad range proof should be invalid"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
# The Coinbase Maturity Rule (aka Output Lock Heights)
|
# The Coinbase Maturity Rule (aka Output Lock Heights)
|
||||||
|
|
||||||
Coinbase outputs (block rewards & fees) are "locked" and require 1,000 confirmations (blocks added to the current chain) before they mature sufficiently to be spendable. This is to reduce the risk of later txs being reversed if a chain reorganization occurs.
|
Coinbase outputs (block rewards & fees) are "locked" and require 1,440 confirmations (i.e 24 hours worth of blocks added to the chain) before they mature sufficiently to be spendable. This is to reduce the risk of later txs being reversed if a chain reorganization occurs.
|
||||||
|
|
||||||
Bitcoin does something very similar, requiring 100 confirmations (Bitcoin blocks are every 10 minutes, Grin blocks every 60 seconds) before mining rewards can be spent.
|
Bitcoin does something very similar, requiring 100 confirmations (Bitcoin blocks are every 10 minutes, Grin blocks are every 60 seconds) before mining rewards can be spent.
|
||||||
|
|
||||||
Grin enforces coinbase maturity in both the transaction pool and the block validation pipeline. A transaction containing an input spending a coinbase output cannot be added to the transaction pool until it has sufficiently matured (based on current chain height and the height of the block producing the coinbase output).
|
Grin enforces coinbase maturity in both the transaction pool and the block validation pipeline. A transaction containing an input spending a coinbase output cannot be added to the transaction pool until it has sufficiently matured (based on current chain height and the height of the block producing the coinbase output).
|
||||||
Similarly a block is invalid if it contains an input spending a coinbase output before it has sufficiently matured, based on the height of the block containing the input and the height of the block that originally produced the coinbase output.
|
Similarly a block is invalid if it contains an input spending a coinbase output before it has sufficiently matured, based on the height of the block containing the input and the height of the block that originally produced the coinbase output.
|
||||||
|
|
|
@ -17,9 +17,10 @@
|
||||||
//! configurable with either no peers, a user-defined list or a preset
|
//! configurable with either no peers, a user-defined list or a preset
|
||||||
//! list of DNS records (the default).
|
//! list of DNS records (the default).
|
||||||
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::{DateTime, Utc};
|
||||||
use chrono::{Duration, MIN_DATE};
|
use chrono::{Duration, MIN_DATE};
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::net::{SocketAddr, ToSocketAddrs};
|
use std::net::{SocketAddr, ToSocketAddrs};
|
||||||
use std::sync::{mpsc, Arc};
|
use std::sync::{mpsc, Arc};
|
||||||
use std::{cmp, str, thread, time};
|
use std::{cmp, str, thread, time};
|
||||||
|
@ -76,6 +77,8 @@ pub fn connect_and_monitor(
|
||||||
let mut prev_ping = Utc::now();
|
let mut prev_ping = Utc::now();
|
||||||
let mut start_attempt = 0;
|
let mut start_attempt = 0;
|
||||||
|
|
||||||
|
let mut connecting_history: HashMap<SocketAddr, DateTime<Utc>> = HashMap::new();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if stop_state.lock().is_stopped() {
|
if stop_state.lock().is_stopped() {
|
||||||
break;
|
break;
|
||||||
|
@ -98,7 +101,13 @@ pub fn connect_and_monitor(
|
||||||
// with exponential backoff
|
// with exponential backoff
|
||||||
if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) {
|
if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) {
|
||||||
// try to connect to any address sent to the channel
|
// try to connect to any address sent to the channel
|
||||||
listen_for_addrs(peers.clone(), p2p_server.clone(), capabilities, &rx);
|
listen_for_addrs(
|
||||||
|
peers.clone(),
|
||||||
|
p2p_server.clone(),
|
||||||
|
capabilities,
|
||||||
|
&rx,
|
||||||
|
&mut connecting_history,
|
||||||
|
);
|
||||||
|
|
||||||
// monitor additional peers if we need to add more
|
// monitor additional peers if we need to add more
|
||||||
monitor_peers(
|
monitor_peers(
|
||||||
|
@ -297,6 +306,7 @@ fn listen_for_addrs(
|
||||||
p2p: Arc<p2p::Server>,
|
p2p: Arc<p2p::Server>,
|
||||||
capab: p2p::Capabilities,
|
capab: p2p::Capabilities,
|
||||||
rx: &mpsc::Receiver<SocketAddr>,
|
rx: &mpsc::Receiver<SocketAddr>,
|
||||||
|
connecting_history: &mut HashMap<SocketAddr, DateTime<Utc>>,
|
||||||
) {
|
) {
|
||||||
if peers.peer_count() >= p2p.config.peer_max_count() {
|
if peers.peer_count() >= p2p.config.peer_max_count() {
|
||||||
// clean the rx messages to avoid accumulating
|
// clean the rx messages to avoid accumulating
|
||||||
|
@ -304,7 +314,24 @@ fn listen_for_addrs(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let connect_min_interval = 30;
|
||||||
for addr in rx.try_iter() {
|
for addr in rx.try_iter() {
|
||||||
|
// ignore the duplicate connecting to same peer within 30 seconds
|
||||||
|
let now = Utc::now();
|
||||||
|
if let Some(last_connect_time) = connecting_history.get(&addr) {
|
||||||
|
if *last_connect_time + Duration::seconds(connect_min_interval) > now {
|
||||||
|
debug!(
|
||||||
|
"peer_connect: ignore a duplicate request to {}. previous connecting time: {}",
|
||||||
|
addr,
|
||||||
|
last_connect_time.format("%H:%M:%S%.3f").to_string(),
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
*connecting_history.get_mut(&addr).unwrap() = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
connecting_history.insert(addr, now);
|
||||||
|
|
||||||
let peers_c = peers.clone();
|
let peers_c = peers.clone();
|
||||||
let p2p_c = p2p.clone();
|
let p2p_c = p2p.clone();
|
||||||
let _ = thread::Builder::new()
|
let _ = thread::Builder::new()
|
||||||
|
@ -319,6 +346,20 @@ fn listen_for_addrs(
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shrink the connecting history.
|
||||||
|
// put a threshold here to avoid frequent shrinking in every call
|
||||||
|
if connecting_history.len() > 100 {
|
||||||
|
let now = Utc::now();
|
||||||
|
let old: Vec<_> = connecting_history
|
||||||
|
.iter()
|
||||||
|
.filter(|&(_, t)| *t + Duration::seconds(connect_min_interval) < now)
|
||||||
|
.map(|(s, _)| s.clone())
|
||||||
|
.collect();
|
||||||
|
for addr in old {
|
||||||
|
connecting_history.remove(&addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dns_seeds() -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
pub fn dns_seeds() -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
||||||
|
|
|
@ -627,6 +627,10 @@ where
|
||||||
ok(create_error_response(e))
|
ok(create_error_response(e))
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn options(&self, _req: Request<Body>) -> ResponseFuture {
|
||||||
|
Box::new(ok(create_ok_response("{}")))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Utility to serialize a struct into JSON and produce a sensible Response
|
// Utility to serialize a struct into JSON and produce a sensible Response
|
||||||
|
|
|
@ -431,7 +431,7 @@ where
|
||||||
|
|
||||||
let res = Ok((
|
let res = Ok((
|
||||||
validated,
|
validated,
|
||||||
updater::retrieve_txs(&mut *w, tx_id, tx_slate_id, Some(&parent_key_id))?,
|
updater::retrieve_txs(&mut *w, tx_id, tx_slate_id, Some(&parent_key_id), false)?,
|
||||||
));
|
));
|
||||||
|
|
||||||
w.close()?;
|
w.close()?;
|
||||||
|
@ -867,7 +867,7 @@ where
|
||||||
None => w.parent_key_id(),
|
None => w.parent_key_id(),
|
||||||
};
|
};
|
||||||
// Don't do this multiple times
|
// Don't do this multiple times
|
||||||
let tx = updater::retrieve_txs(&mut *w, None, Some(slate.id), Some(&parent_key_id))?;
|
let tx = updater::retrieve_txs(&mut *w, None, Some(slate.id), Some(&parent_key_id), false)?;
|
||||||
for t in &tx {
|
for t in &tx {
|
||||||
if t.tx_type == TxLogEntryType::TxReceived {
|
if t.tx_type == TxLogEntryType::TxReceived {
|
||||||
return Err(ErrorKind::TransactionAlreadyReceived(slate.id.to_string()).into());
|
return Err(ErrorKind::TransactionAlreadyReceived(slate.id.to_string()).into());
|
||||||
|
|
|
@ -45,6 +45,18 @@ struct OutputResult {
|
||||||
pub blinding: SecretKey,
|
pub blinding: SecretKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
/// Collect stats in case we want to just output a single tx log entry
|
||||||
|
/// for restored non-coinbase outputs
|
||||||
|
struct RestoredTxStats {
|
||||||
|
///
|
||||||
|
pub log_id: u32,
|
||||||
|
///
|
||||||
|
pub amount_credited: u64,
|
||||||
|
///
|
||||||
|
pub num_outputs: usize,
|
||||||
|
}
|
||||||
|
|
||||||
fn identify_utxo_outputs<T, C, K>(
|
fn identify_utxo_outputs<T, C, K>(
|
||||||
wallet: &mut T,
|
wallet: &mut T,
|
||||||
outputs: Vec<(pedersen::Commitment, pedersen::RangeProof, bool, u64, u64)>,
|
outputs: Vec<(pedersen::Commitment, pedersen::RangeProof, bool, u64, u64)>,
|
||||||
|
@ -136,37 +148,67 @@ fn restore_missing_output<T, C, K>(
|
||||||
wallet: &mut T,
|
wallet: &mut T,
|
||||||
output: OutputResult,
|
output: OutputResult,
|
||||||
found_parents: &mut HashMap<Identifier, u32>,
|
found_parents: &mut HashMap<Identifier, u32>,
|
||||||
|
tx_stats: &mut Option<&mut HashMap<Identifier, RestoredTxStats>>,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
T: WalletBackend<C, K>,
|
T: WalletBackend<C, K>,
|
||||||
C: NodeClient,
|
C: NodeClient,
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
|
let commit = wallet.calc_commit_for_cache(output.value, &output.key_id)?;
|
||||||
let mut batch = wallet.batch()?;
|
let mut batch = wallet.batch()?;
|
||||||
|
|
||||||
let parent_key_id = output.key_id.parent_path();
|
let parent_key_id = output.key_id.parent_path();
|
||||||
if !found_parents.contains_key(&parent_key_id) {
|
if !found_parents.contains_key(&parent_key_id) {
|
||||||
found_parents.insert(parent_key_id.clone(), 0);
|
found_parents.insert(parent_key_id.clone(), 0);
|
||||||
|
if let Some(ref mut s) = tx_stats {
|
||||||
|
s.insert(
|
||||||
|
parent_key_id.clone(),
|
||||||
|
RestoredTxStats {
|
||||||
|
log_id: batch.next_tx_log_id(&parent_key_id)?,
|
||||||
|
amount_credited: 0,
|
||||||
|
num_outputs: 0,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let log_id = if tx_stats.is_none() || output.is_coinbase {
|
||||||
let log_id = batch.next_tx_log_id(&parent_key_id)?;
|
let log_id = batch.next_tx_log_id(&parent_key_id)?;
|
||||||
let entry_type = match output.is_coinbase {
|
let entry_type = match output.is_coinbase {
|
||||||
true => TxLogEntryType::ConfirmedCoinbase,
|
true => TxLogEntryType::ConfirmedCoinbase,
|
||||||
false => TxLogEntryType::TxReceived,
|
false => TxLogEntryType::TxReceived,
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut t = TxLogEntry::new(parent_key_id.clone(), entry_type, log_id);
|
let mut t = TxLogEntry::new(parent_key_id.clone(), entry_type, log_id);
|
||||||
t.confirmed = true;
|
t.confirmed = true;
|
||||||
t.amount_credited = output.value;
|
t.amount_credited = output.value;
|
||||||
t.num_outputs = 1;
|
t.num_outputs = 1;
|
||||||
t.update_confirmation_ts();
|
t.update_confirmation_ts();
|
||||||
batch.save_tx_log_entry(t, &parent_key_id)?;
|
batch.save_tx_log_entry(t, &parent_key_id)?;
|
||||||
|
log_id
|
||||||
|
} else {
|
||||||
|
if let Some(ref mut s) = tx_stats {
|
||||||
|
let ts = s.get(&parent_key_id).unwrap().clone();
|
||||||
|
s.insert(
|
||||||
|
parent_key_id.clone(),
|
||||||
|
RestoredTxStats {
|
||||||
|
log_id: ts.log_id,
|
||||||
|
amount_credited: ts.amount_credited + output.value,
|
||||||
|
num_outputs: ts.num_outputs + 1,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
ts.log_id
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let _ = batch.save(OutputData {
|
let _ = batch.save(OutputData {
|
||||||
root_key_id: parent_key_id.clone(),
|
root_key_id: parent_key_id.clone(),
|
||||||
key_id: output.key_id,
|
key_id: output.key_id,
|
||||||
n_child: output.n_child,
|
n_child: output.n_child,
|
||||||
mmr_index: Some(output.mmr_index),
|
mmr_index: Some(output.mmr_index),
|
||||||
|
commit: commit,
|
||||||
value: output.value,
|
value: output.value,
|
||||||
status: OutputStatus::Unspent,
|
status: OutputStatus::Unspent,
|
||||||
height: output.height,
|
height: output.height,
|
||||||
|
@ -198,6 +240,7 @@ where
|
||||||
output.tx_log_entry.clone(),
|
output.tx_log_entry.clone(),
|
||||||
None,
|
None,
|
||||||
Some(&parent_key_id),
|
Some(&parent_key_id),
|
||||||
|
false,
|
||||||
)?;
|
)?;
|
||||||
if entries.len() > 0 {
|
if entries.len() > 0 {
|
||||||
let mut entry = entries[0].clone();
|
let mut entry = entries[0].clone();
|
||||||
|
@ -289,7 +332,7 @@ where
|
||||||
Restoring.",
|
Restoring.",
|
||||||
m.value, m.key_id, m.commit,
|
m.value, m.key_id, m.commit,
|
||||||
);
|
);
|
||||||
restore_missing_output(wallet, m, &mut found_parents)?;
|
restore_missing_output(wallet, m, &mut found_parents, &mut None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock locked outputs
|
// Unlock locked outputs
|
||||||
|
@ -327,24 +370,19 @@ where
|
||||||
|
|
||||||
// restore labels, account paths and child derivation indices
|
// restore labels, account paths and child derivation indices
|
||||||
let label_base = "account";
|
let label_base = "account";
|
||||||
let mut index = 1;
|
let mut acct_index = 1;
|
||||||
for (path, max_child_index) in found_parents.iter() {
|
for (path, max_child_index) in found_parents.iter() {
|
||||||
if *path == ExtKeychain::derive_key_id(2, 0, 0, 0, 0) {
|
|
||||||
// default path already exists
|
// default path already exists
|
||||||
continue;
|
if *path != ExtKeychain::derive_key_id(2, 0, 0, 0, 0) {
|
||||||
}
|
let label = format!("{}_{}", label_base, acct_index);
|
||||||
let res = wallet.acct_path_iter().find(|e| e.path == *path);
|
|
||||||
if let None = res {
|
|
||||||
let label = format!("{}_{}", label_base, index);
|
|
||||||
keys::set_acct_path(wallet, &label, path)?;
|
keys::set_acct_path(wallet, &label, path)?;
|
||||||
index = index + 1;
|
acct_index += 1;
|
||||||
}
|
}
|
||||||
{
|
|
||||||
let mut batch = wallet.batch()?;
|
let mut batch = wallet.batch()?;
|
||||||
|
debug!("Next child for account {} is {}", path, max_child_index + 1);
|
||||||
batch.save_child_index(path, max_child_index + 1)?;
|
batch.save_child_index(path, max_child_index + 1)?;
|
||||||
|
batch.commit()?;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -372,27 +410,43 @@ where
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut found_parents: HashMap<Identifier, u32> = HashMap::new();
|
let mut found_parents: HashMap<Identifier, u32> = HashMap::new();
|
||||||
// Now save what we have
|
let mut restore_stats = HashMap::new();
|
||||||
|
|
||||||
|
// Now save what we have
|
||||||
for output in result_vec {
|
for output in result_vec {
|
||||||
restore_missing_output(wallet, output, &mut found_parents)?;
|
restore_missing_output(
|
||||||
|
wallet,
|
||||||
|
output,
|
||||||
|
&mut found_parents,
|
||||||
|
&mut Some(&mut restore_stats),
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore labels, account paths and child derivation indices
|
// restore labels, account paths and child derivation indices
|
||||||
let label_base = "account";
|
let label_base = "account";
|
||||||
let mut index = 1;
|
let mut acct_index = 1;
|
||||||
for (path, max_child_index) in found_parents.iter() {
|
for (path, max_child_index) in found_parents.iter() {
|
||||||
if *path == ExtKeychain::derive_key_id(2, 0, 0, 0, 0) {
|
|
||||||
// default path already exists
|
// default path already exists
|
||||||
continue;
|
if *path != ExtKeychain::derive_key_id(2, 0, 0, 0, 0) {
|
||||||
}
|
let label = format!("{}_{}", label_base, acct_index);
|
||||||
let label = format!("{}_{}", label_base, index);
|
|
||||||
keys::set_acct_path(wallet, &label, path)?;
|
keys::set_acct_path(wallet, &label, path)?;
|
||||||
index = index + 1;
|
acct_index += 1;
|
||||||
{
|
}
|
||||||
|
// restore tx log entry for non-coinbase outputs
|
||||||
|
if let Some(s) = restore_stats.get(path) {
|
||||||
|
let mut batch = wallet.batch()?;
|
||||||
|
let mut t = TxLogEntry::new(path.clone(), TxLogEntryType::TxReceived, s.log_id);
|
||||||
|
t.confirmed = true;
|
||||||
|
t.amount_credited = s.amount_credited;
|
||||||
|
t.num_outputs = s.num_outputs;
|
||||||
|
t.update_confirmation_ts();
|
||||||
|
batch.save_tx_log_entry(t, &path)?;
|
||||||
|
batch.commit()?;
|
||||||
|
}
|
||||||
let mut batch = wallet.batch()?;
|
let mut batch = wallet.batch()?;
|
||||||
batch.save_child_index(path, max_child_index + 1)?;
|
batch.save_child_index(path, max_child_index + 1)?;
|
||||||
}
|
debug!("Next child for account {} is {}", path, max_child_index + 1);
|
||||||
|
batch.commit()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,8 @@ use crate::libwallet::error::{Error, ErrorKind};
|
||||||
use crate::libwallet::internal::keys;
|
use crate::libwallet::internal::keys;
|
||||||
use crate::libwallet::types::*;
|
use crate::libwallet::types::*;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
/// Initialize a transaction on the sender side, returns a corresponding
|
/// Initialize a transaction on the sender side, returns a corresponding
|
||||||
/// libwallet transaction slate with the appropriate inputs selected,
|
/// libwallet transaction slate with the appropriate inputs selected,
|
||||||
/// and saves the private wallet identifiers of our selected outputs
|
/// and saves the private wallet identifiers of our selected outputs
|
||||||
|
@ -85,9 +87,15 @@ where
|
||||||
context.add_input(&input.key_id, &input.mmr_index);
|
context.add_input(&input.key_id, &input.mmr_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store change output(s)
|
let mut commits: HashMap<Identifier, Option<String>> = HashMap::new();
|
||||||
for (_, id, mmr_index) in &change_amounts_derivations {
|
|
||||||
|
// Store change output(s) and cached commits
|
||||||
|
for (change_amount, id, mmr_index) in &change_amounts_derivations {
|
||||||
context.add_output(&id, &mmr_index);
|
context.add_output(&id, &mmr_index);
|
||||||
|
commits.insert(
|
||||||
|
id.clone(),
|
||||||
|
wallet.calc_commit_for_cache(*change_amount, &id)?,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let lock_inputs = context.get_inputs().clone();
|
let lock_inputs = context.get_inputs().clone();
|
||||||
|
@ -119,10 +127,12 @@ where
|
||||||
for (change_amount, id, _) in &change_amounts_derivations {
|
for (change_amount, id, _) in &change_amounts_derivations {
|
||||||
t.num_outputs += 1;
|
t.num_outputs += 1;
|
||||||
t.amount_credited += change_amount;
|
t.amount_credited += change_amount;
|
||||||
|
let commit = commits.get(&id).unwrap().clone();
|
||||||
batch.save(OutputData {
|
batch.save(OutputData {
|
||||||
root_key_id: parent_key_id.clone(),
|
root_key_id: parent_key_id.clone(),
|
||||||
key_id: id.clone(),
|
key_id: id.clone(),
|
||||||
n_child: id.to_path().last_path_index(),
|
n_child: id.to_path().last_path_index(),
|
||||||
|
commit: commit,
|
||||||
mmr_index: None,
|
mmr_index: None,
|
||||||
value: change_amount.clone(),
|
value: change_amount.clone(),
|
||||||
status: OutputStatus::Unconfirmed,
|
status: OutputStatus::Unconfirmed,
|
||||||
|
@ -189,6 +199,7 @@ where
|
||||||
// Create closure that adds the output to recipient's wallet
|
// Create closure that adds the output to recipient's wallet
|
||||||
// (up to the caller to decide when to do)
|
// (up to the caller to decide when to do)
|
||||||
let wallet_add_fn = move |wallet: &mut T| {
|
let wallet_add_fn = move |wallet: &mut T| {
|
||||||
|
let commit = wallet.calc_commit_for_cache(amount, &key_id_inner)?;
|
||||||
let mut batch = wallet.batch()?;
|
let mut batch = wallet.batch()?;
|
||||||
let log_id = batch.next_tx_log_id(&parent_key_id)?;
|
let log_id = batch.next_tx_log_id(&parent_key_id)?;
|
||||||
let mut t = TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxReceived, log_id);
|
let mut t = TxLogEntry::new(parent_key_id.clone(), TxLogEntryType::TxReceived, log_id);
|
||||||
|
@ -200,6 +211,7 @@ where
|
||||||
key_id: key_id_inner.clone(),
|
key_id: key_id_inner.clone(),
|
||||||
mmr_index: None,
|
mmr_index: None,
|
||||||
n_child: key_id_inner.to_path().last_path_index(),
|
n_child: key_id_inner.to_path().last_path_index(),
|
||||||
|
commit: commit,
|
||||||
value: amount,
|
value: amount,
|
||||||
status: OutputStatus::Unconfirmed,
|
status: OutputStatus::Unconfirmed,
|
||||||
height: height,
|
height: height,
|
||||||
|
|
|
@ -161,7 +161,7 @@ where
|
||||||
} else if let Some(tx_slate_id) = tx_slate_id {
|
} else if let Some(tx_slate_id) = tx_slate_id {
|
||||||
tx_id_string = tx_slate_id.to_string();
|
tx_id_string = tx_slate_id.to_string();
|
||||||
}
|
}
|
||||||
let tx_vec = updater::retrieve_txs(wallet, tx_id, tx_slate_id, Some(&parent_key_id))?;
|
let tx_vec = updater::retrieve_txs(wallet, tx_id, tx_slate_id, Some(&parent_key_id), false)?;
|
||||||
if tx_vec.len() != 1 {
|
if tx_vec.len() != 1 {
|
||||||
return Err(ErrorKind::TransactionDoesntExist(tx_id_string))?;
|
return Err(ErrorKind::TransactionDoesntExist(tx_id_string))?;
|
||||||
}
|
}
|
||||||
|
@ -191,7 +191,7 @@ where
|
||||||
C: NodeClient,
|
C: NodeClient,
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), None, Some(parent_key_id))?;
|
let tx_vec = updater::retrieve_txs(wallet, Some(tx_id), None, Some(parent_key_id), false)?;
|
||||||
if tx_vec.len() != 1 {
|
if tx_vec.len() != 1 {
|
||||||
return Err(ErrorKind::TransactionDoesntExist(tx_id.to_string()))?;
|
return Err(ErrorKind::TransactionDoesntExist(tx_id.to_string()))?;
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ where
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
// finalize command
|
// finalize command
|
||||||
let tx_vec = updater::retrieve_txs(wallet, None, Some(slate.id), None)?;
|
let tx_vec = updater::retrieve_txs(wallet, None, Some(slate.id), None, false)?;
|
||||||
let mut tx = None;
|
let mut tx = None;
|
||||||
// don't want to assume this is the right tx, in case of self-sending
|
// don't want to assume this is the right tx, in case of self-sending
|
||||||
for t in tx_vec {
|
for t in tx_vec {
|
||||||
|
|
|
@ -74,7 +74,10 @@ where
|
||||||
let res = outputs
|
let res = outputs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|out| {
|
.map(|out| {
|
||||||
let commit = keychain.commit(out.value, &out.key_id).unwrap();
|
let commit = match out.commit.clone() {
|
||||||
|
Some(c) => pedersen::Commitment::from_vec(util::from_hex(c).unwrap()),
|
||||||
|
None => keychain.commit(out.value, &out.key_id).unwrap(),
|
||||||
|
};
|
||||||
(out, commit)
|
(out, commit)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -88,30 +91,39 @@ pub fn retrieve_txs<T: ?Sized, C, K>(
|
||||||
tx_id: Option<u32>,
|
tx_id: Option<u32>,
|
||||||
tx_slate_id: Option<Uuid>,
|
tx_slate_id: Option<Uuid>,
|
||||||
parent_key_id: Option<&Identifier>,
|
parent_key_id: Option<&Identifier>,
|
||||||
|
outstanding_only: bool,
|
||||||
) -> Result<Vec<TxLogEntry>, Error>
|
) -> Result<Vec<TxLogEntry>, Error>
|
||||||
where
|
where
|
||||||
T: WalletBackend<C, K>,
|
T: WalletBackend<C, K>,
|
||||||
C: NodeClient,
|
C: NodeClient,
|
||||||
K: Keychain,
|
K: Keychain,
|
||||||
{
|
{
|
||||||
// just read the wallet here, no need for a write lock
|
let mut txs: Vec<TxLogEntry> = wallet
|
||||||
let mut txs = if let Some(id) = tx_id {
|
|
||||||
wallet.tx_log_iter().filter(|t| t.id == id).collect()
|
|
||||||
} else if tx_slate_id.is_some() {
|
|
||||||
wallet
|
|
||||||
.tx_log_iter()
|
.tx_log_iter()
|
||||||
.filter(|t| t.tx_slate_id == tx_slate_id)
|
.filter(|tx_entry| {
|
||||||
.collect()
|
let f_pk = match parent_key_id {
|
||||||
} else {
|
Some(k) => tx_entry.parent_key_id == *k,
|
||||||
wallet.tx_log_iter().collect::<Vec<_>>()
|
None => true,
|
||||||
};
|
};
|
||||||
if let Some(k) = parent_key_id {
|
let f_tx_id = match tx_id {
|
||||||
txs = txs
|
Some(i) => tx_entry.id == i,
|
||||||
.iter()
|
None => true,
|
||||||
.filter(|t| t.parent_key_id == *k)
|
};
|
||||||
.map(|t| t.clone())
|
let f_txs = match tx_slate_id {
|
||||||
.collect();
|
Some(t) => tx_entry.tx_slate_id == Some(t),
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
let f_outstanding = match outstanding_only {
|
||||||
|
true => {
|
||||||
|
!tx_entry.confirmed
|
||||||
|
&& (tx_entry.tx_type == TxLogEntryType::TxReceived
|
||||||
|
|| tx_entry.tx_type == TxLogEntryType::TxSent)
|
||||||
}
|
}
|
||||||
|
false => true,
|
||||||
|
};
|
||||||
|
f_pk && f_tx_id && f_txs && f_outstanding
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
txs.sort_by_key(|tx| tx.creation_ts);
|
txs.sort_by_key(|tx| tx.creation_ts);
|
||||||
Ok(txs)
|
Ok(txs)
|
||||||
}
|
}
|
||||||
|
@ -153,20 +165,18 @@ where
|
||||||
.filter(|x| x.root_key_id == *parent_key_id && x.status != OutputStatus::Spent)
|
.filter(|x| x.root_key_id == *parent_key_id && x.status != OutputStatus::Spent)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let tx_entries = retrieve_txs(wallet, None, None, Some(&parent_key_id), true)?;
|
||||||
|
|
||||||
// Only select outputs that are actually involved in an outstanding transaction
|
// Only select outputs that are actually involved in an outstanding transaction
|
||||||
let unspents: Vec<OutputData> = match update_all {
|
let unspents: Vec<OutputData> = match update_all {
|
||||||
false => unspents
|
false => unspents
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| match x.tx_log_entry.as_ref() {
|
.filter(|x| match x.tx_log_entry.as_ref() {
|
||||||
Some(t) => {
|
Some(t) => {
|
||||||
let entries = retrieve_txs(wallet, Some(*t), None, Some(&parent_key_id));
|
if let Some(_) = tx_entries.iter().find(|&te| te.id == *t) {
|
||||||
match entries {
|
true
|
||||||
Err(_) => true,
|
} else {
|
||||||
Ok(e) => {
|
false
|
||||||
e.len() > 0
|
|
||||||
&& !e[0].confirmed && (e[0].tx_type == TxLogEntryType::TxReceived
|
|
||||||
|| e[0].tx_type == TxLogEntryType::TxSent)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => true,
|
None => true,
|
||||||
|
@ -176,7 +186,10 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
for out in unspents {
|
for out in unspents {
|
||||||
let commit = keychain.commit(out.value, &out.key_id)?;
|
let commit = match out.commit.clone() {
|
||||||
|
Some(c) => pedersen::Commitment::from_vec(util::from_hex(c).unwrap()),
|
||||||
|
None => keychain.commit(out.value, &out.key_id).unwrap(),
|
||||||
|
};
|
||||||
wallet_outputs.insert(commit, (out.key_id.clone(), out.mmr_index));
|
wallet_outputs.insert(commit, (out.key_id.clone(), out.mmr_index));
|
||||||
}
|
}
|
||||||
Ok(wallet_outputs)
|
Ok(wallet_outputs)
|
||||||
|
@ -466,13 +479,16 @@ where
|
||||||
|
|
||||||
{
|
{
|
||||||
// Now acquire the wallet lock and write the new output.
|
// Now acquire the wallet lock and write the new output.
|
||||||
|
let amount = reward(block_fees.fees);
|
||||||
|
let commit = wallet.calc_commit_for_cache(amount, &key_id)?;
|
||||||
let mut batch = wallet.batch()?;
|
let mut batch = wallet.batch()?;
|
||||||
batch.save(OutputData {
|
batch.save(OutputData {
|
||||||
root_key_id: parent_key_id,
|
root_key_id: parent_key_id,
|
||||||
key_id: key_id.clone(),
|
key_id: key_id.clone(),
|
||||||
n_child: key_id.to_path().last_path_index(),
|
n_child: key_id.to_path().last_path_index(),
|
||||||
mmr_index: None,
|
mmr_index: None,
|
||||||
value: reward(block_fees.fees),
|
commit: commit,
|
||||||
|
value: amount,
|
||||||
status: OutputStatus::Unconfirmed,
|
status: OutputStatus::Unconfirmed,
|
||||||
height: height,
|
height: height,
|
||||||
lock_height: lock_height,
|
lock_height: lock_height,
|
||||||
|
|
|
@ -67,6 +67,13 @@ where
|
||||||
/// Return the client being used to communicate with the node
|
/// Return the client being used to communicate with the node
|
||||||
fn w2n_client(&mut self) -> &mut C;
|
fn w2n_client(&mut self) -> &mut C;
|
||||||
|
|
||||||
|
/// return the commit for caching if allowed, none otherwise
|
||||||
|
fn calc_commit_for_cache(
|
||||||
|
&mut self,
|
||||||
|
amount: u64,
|
||||||
|
id: &Identifier,
|
||||||
|
) -> Result<Option<String>, Error>;
|
||||||
|
|
||||||
/// Set parent key id by stored account name
|
/// Set parent key id by stored account name
|
||||||
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error>;
|
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error>;
|
||||||
|
|
||||||
|
@ -241,6 +248,8 @@ pub struct OutputData {
|
||||||
pub key_id: Identifier,
|
pub key_id: Identifier,
|
||||||
/// How many derivations down from the root key
|
/// How many derivations down from the root key
|
||||||
pub n_child: u32,
|
pub n_child: u32,
|
||||||
|
/// The actual commit, optionally stored
|
||||||
|
pub commit: Option<String>,
|
||||||
/// PMMR Index, used on restore in case of duplicate wallets using the same
|
/// PMMR Index, used on restore in case of duplicate wallets using the same
|
||||||
/// key_id (2 wallets using same seed, for instance
|
/// key_id (2 wallets using same seed, for instance
|
||||||
pub mmr_index: Option<u64>,
|
pub mmr_index: Option<u64>,
|
||||||
|
|
|
@ -199,6 +199,21 @@ where
|
||||||
&mut self.w2n_client
|
&mut self.w2n_client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// return the version of the commit for caching
|
||||||
|
fn calc_commit_for_cache(
|
||||||
|
&mut self,
|
||||||
|
amount: u64,
|
||||||
|
id: &Identifier,
|
||||||
|
) -> Result<Option<String>, Error> {
|
||||||
|
if self.config.no_commit_cache == Some(true) {
|
||||||
|
Ok(None)
|
||||||
|
} else {
|
||||||
|
Ok(Some(util::to_hex(
|
||||||
|
self.keychain().commit(amount, &id)?.0.to_vec(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Set parent path by account name
|
/// Set parent path by account name
|
||||||
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error> {
|
fn set_parent_key_id_by_name(&mut self, label: &str) -> Result<(), Error> {
|
||||||
let label = label.to_owned();
|
let label = label.to_owned();
|
||||||
|
|
|
@ -52,6 +52,9 @@ pub struct WalletConfig {
|
||||||
pub owner_api_include_foreign: Option<bool>,
|
pub owner_api_include_foreign: Option<bool>,
|
||||||
// The directory in which wallet files are stored
|
// The directory in which wallet files are stored
|
||||||
pub data_file_dir: String,
|
pub data_file_dir: String,
|
||||||
|
/// If Some(true), don't cache commits alongside output data
|
||||||
|
/// speed improvement, but your commits are in the database
|
||||||
|
pub no_commit_cache: Option<bool>,
|
||||||
/// TLS certificate file
|
/// TLS certificate file
|
||||||
pub tls_certificate_file: Option<String>,
|
pub tls_certificate_file: Option<String>,
|
||||||
/// TLS certificate private key file
|
/// TLS certificate private key file
|
||||||
|
@ -74,6 +77,7 @@ impl Default for WalletConfig {
|
||||||
check_node_api_http_addr: "http://127.0.0.1:3413".to_string(),
|
check_node_api_http_addr: "http://127.0.0.1:3413".to_string(),
|
||||||
owner_api_include_foreign: Some(false),
|
owner_api_include_foreign: Some(false),
|
||||||
data_file_dir: ".".to_string(),
|
data_file_dir: ".".to_string(),
|
||||||
|
no_commit_cache: Some(false),
|
||||||
tls_certificate_file: None,
|
tls_certificate_file: None,
|
||||||
tls_certificate_key: None,
|
tls_certificate_key: None,
|
||||||
dark_background_color_scheme: Some(true),
|
dark_background_color_scheme: Some(true),
|
||||||
|
|
Loading…
Reference in a new issue