Feature/slate serialization (#2534)

* - Add backwards compatability
- Add hex serialization

* rustfmt

* rustfmt

* Windows Compatibility Fixes #1 (#2535)

* initial changes for windows build and unit/integration tests

* rustfmt

* wallet+store tests

* rustfmt

* fix linux daemonize

* better encapsulate file rename

* rustfmt

* remove daemonize commands

* rustfmt

* remove server start/stop commands

* add ability to drop pmmr backend files explicitly for txhashset unzip

* rustfmt

* fix pmmr tests

* rustfmt

* Windows TUI Fix (#2555)

* switch pancurses backend to win32

* revert changes to restore test

* compatibility fix + debug messages

* rustfmt

* Add content disposition for OK responses  (#2545)

* Testing http send and fixing accordingly

* add repost method into wallet owner api (#2553)

* add repost method into wallet owner api

* rustfmt

* Add ability to compare selection strategies (#2516)

Before tx creation user can estimate fee and locked amount
with different selection strategies by providing `-e` flag for
`wallet send` command.
This commit is contained in:
Yoni Svechinsky 2019-02-13 15:29:44 +02:00 committed by Yeastplume
parent 0d36acf01b
commit ee4eed71ea
39 changed files with 2408 additions and 1221 deletions

1144
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -25,9 +25,7 @@ chrono = "0.4.4"
clap = { version = "2.31", features = ["yaml"] }
rpassword = "2.0.0"
ctrlc = { version = "3.1", features = ["termination"] }
cursive = "0.9.0"
humansize = "1.1.0"
daemonize = "0.3"
serde = "1"
serde_json = "1"
log = "0.4"
@ -45,6 +43,14 @@ grin_servers = { path = "./servers", version = "1.0.1" }
grin_util = { path = "./util", version = "1.0.1" }
grin_wallet = { path = "./wallet", version = "1.0.1" }
[target.'cfg(windows)'.dependencies]
cursive = { version = "0.10.0", default-features = false, features = ["pancurses-backend"] }
[target.'cfg(windows)'.dependencies.pancurses]
version = "0.16.0"
features = ["win32"]
[target.'cfg(unix)'.dependencies]
cursive = "0.9.0"
[build-dependencies]
built = "0.3"
reqwest = "0.9"

View file

@ -56,7 +56,7 @@ pub struct ChainValidationHandler {
impl Handler for ChainValidationHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
match w(&self.chain).validate(true) {
Ok(_) => response(StatusCode::OK, ""),
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("validate failed: {}", e),
@ -75,7 +75,7 @@ pub struct ChainCompactHandler {
impl Handler for ChainCompactHandler {
fn post(&self, _req: Request<Body>) -> ResponseFuture {
match w(&self.chain).compact() {
Ok(_) => response(StatusCode::OK, ""),
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("compact failed: {}", e),

View file

@ -97,6 +97,6 @@ impl Handler for PeerHandler {
_ => return response(StatusCode::BAD_REQUEST, "invalid command"),
};
response(StatusCode::OK, "")
response(StatusCode::OK, "{}")
}
}

View file

@ -857,6 +857,14 @@ impl Chain {
}
let header = self.get_block_header(&h)?;
{
let mut txhashset_ref = self.txhashset.write();
// Drop file handles in underlying txhashset
txhashset_ref.release_backend_files();
}
// Rewrite hashset
txhashset::zip_write(self.db_root.clone(), txhashset_data, &header)?;
let mut txhashset =

View file

@ -18,7 +18,7 @@
use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::{
Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel, TxKernelEntry,
};
@ -153,6 +153,15 @@ impl TxHashSet {
})
}
/// Close all backend file handles
pub fn release_backend_files(&mut self) {
self.header_pmmr_h.backend.release_files();
self.sync_pmmr_h.backend.release_files();
self.output_pmmr_h.backend.release_files();
self.rproof_pmmr_h.backend.release_files();
self.kernel_pmmr_h.backend.release_files();
}
/// Check if an output is unspent.
/// We look in the index to find the output MMR pos.
/// Then we check the entry in the output MMR and confirm the hash matches.

View file

@ -12,7 +12,7 @@ edition = "2018"
[dependencies]
blake2-rfc = "0.2"
byteorder = "1"
croaring = "=0.3"
croaring = "=0.3.8"
enum_primitive = "0.1"
failure = "0.1"
failure_derive = "0.1"

View file

@ -62,6 +62,9 @@ pub trait Backend<T: PMMRable> {
/// fastest way to to be able to allow direct access to the file
fn get_data_file_path(&self) -> &Path;
/// Release underlying datafiles and locks
fn release_files(&mut self);
/// Also a bit of a hack...
/// Saves a snapshot of the rewound utxo file with the block hash as
/// filename suffix. We need this when sending a txhashset zip file to a

View file

@ -18,6 +18,7 @@ use crate::core::hash::Hashed;
use crate::core::verifier_cache::VerifierCache;
use crate::core::{committed, Committed};
use crate::keychain::{self, BlindingFactor};
use crate::libtx::secp_ser;
use crate::ser::{
self, read_multi, FixedLength, PMMRable, Readable, Reader, VerifySortedAndUnique, Writeable,
Writer,
@ -164,9 +165,14 @@ pub struct TxKernel {
/// Remainder of the sum of all transaction commitments. If the transaction
/// is well formed, amounts components should sum to zero and the excess
/// is hence a valid public key.
#[serde(
serialize_with = "secp_ser::as_hex",
deserialize_with = "secp_ser::commitment_from_hex"
)]
pub excess: Commitment,
/// The signature proving the excess is a valid public key, which signs
/// the transaction fee.
#[serde(with = "secp_ser::sig_serde")]
pub excess_sig: secp::Signature,
}
@ -756,6 +762,10 @@ impl TransactionBody {
pub struct Transaction {
/// The kernel "offset" k2
/// excess is k1G after splitting the key k = k1 + k2
#[serde(
serialize_with = "secp_ser::as_hex",
deserialize_with = "secp_ser::blind_from_hex"
)]
pub offset: BlindingFactor,
/// The transaction body - inputs/outputs/kernels
body: TransactionBody,
@ -1111,6 +1121,10 @@ pub struct Input {
/// We will check maturity for coinbase output.
pub features: OutputFeatures,
/// The commit referencing the output being spent.
#[serde(
serialize_with = "secp_ser::as_hex",
deserialize_with = "secp_ser::commitment_from_hex"
)]
pub commit: Commitment,
}
@ -1214,8 +1228,16 @@ pub struct Output {
/// Options for an output's structure or use
pub features: OutputFeatures,
/// The homomorphic commitment representing the output amount
#[serde(
serialize_with = "secp_ser::as_hex",
deserialize_with = "secp_ser::commitment_from_hex"
)]
pub commit: Commitment,
/// A proof that the commitment is in the right range
#[serde(
serialize_with = "secp_ser::as_hex",
deserialize_with = "secp_ser::rangeproof_from_hex"
)]
pub proof: RangeProof,
}

View file

@ -122,6 +122,8 @@ impl<T: PMMRable> Backend<T> for VecBackend<T> {
Path::new("")
}
fn release_files(&mut self) {}
fn dump_stats(&self) {}
}

View file

@ -12,6 +12,7 @@
1. [POST Finalize Tx](#post-finalize-tx)
1. [POST Cancel Tx](#post-cancel-tx)
1. [POST Post Tx](#post-post-tx)
1. [POST Repost Tx](#post-repost-tx)
1. [POST Issue Burn Tx](#post-issue-burn-tx)
1. [Adding Foreign API Endpoints](#add-foreign-api-endpoints)
@ -641,6 +642,50 @@ Push new transaction to the connected node transaction pool. Add `?fluff` at the
},
});
```
### POST Repost Tx
Repost a `sending` transaction to the connected node transaction pool with a given transaction id. Add `?fluff` at the end of the URL to bypass Dandelion relay . This could be used for retry posting when a `sending` transaction is created but somehow failed on posting.
* **URL**
* /v1/wallet/owner/repost?id=x
* /v1/wallet/owner/repost?tx_id=x
* /v1/wallet/owner/repost?fluff&tx_id=x
* **Method:**
`POST`
* **URL Params**
**Required:**
* `id=[number]` the transaction id
* `tx_id=[string]`the transaction slate id
* **Data Params**
None
* **Success Response:**
* **Code:** 200
* **Error Response:**
* **Code:** 400
* **Sample Call:**
```javascript
$.ajax({
url: "/v1/wallet/owner/repost?id=3",
dataType: "json",
type : "POST",
success : function(r) {
console.log(r);
}
});
```
### POST Issue Burn Tx

View file

@ -13,6 +13,7 @@
// limitations under the License.
/// Grin server commands processing
#[cfg(not(target_os = "windows"))]
use std::env::current_dir;
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
@ -22,7 +23,6 @@ use std::time::Duration;
use clap::ArgMatches;
use ctrlc;
use daemonize::Daemonize;
use crate::config::GlobalConfig;
use crate::core::global;
@ -31,7 +31,7 @@ use crate::servers;
use crate::tui::ui;
/// wrap below to allow UI to clean up on stop
fn start_server(config: servers::ServerConfig) {
pub fn start_server(config: servers::ServerConfig) {
start_server_tui(config);
// Just kill process for now, otherwise the process
// hangs around until sigint because the API server
@ -157,29 +157,11 @@ pub fn server_command(
});
}*/
// start the server in the different run modes (interactive or daemon)
if let Some(a) = server_args {
match a.subcommand() {
("run", _) => {
start_server(server_config);
}
("start", _) => {
let daemonize = Daemonize::new()
.pid_file("/tmp/grin.pid")
.chown_pid_file(true)
.working_directory(current_dir().unwrap())
.privileged_action(move || {
start_server(server_config.clone());
loop {
thread::sleep(Duration::from_secs(60));
}
});
match daemonize.start() {
Ok(_) => info!("Grin server successfully started."),
Err(e) => error!("Error starting: {}", e),
}
}
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),
("", _) => {
println!("Subcommand required, use 'grin help server' for details");
}

View file

@ -349,6 +349,9 @@ pub fn parse_send_args(args: &ArgMatches) -> Result<command::SendArgs, ParseErro
// selection_strategy
let selection_strategy = parse_required(args, "selection_strategy")?;
// estimate_selection_strategies
let estimate_selection_strategies = args.is_present("estimate_selection_strategies");
// method
let method = parse_required(args, "method")?;
@ -360,10 +363,18 @@ pub fn parse_send_args(args: &ArgMatches) -> Result<command::SendArgs, ParseErro
None => "default",
}
} else {
if !estimate_selection_strategies {
parse_required(args, "dest")?
} else {
""
}
}
};
if method == "http" && !dest.starts_with("http://") && !dest.starts_with("https://") {
if !estimate_selection_strategies
&& method == "http"
&& !dest.starts_with("http://")
&& !dest.starts_with("https://")
{
let msg = format!(
"HTTP Destination should start with http://: or https://: {}",
dest,
@ -386,6 +397,7 @@ pub fn parse_send_args(args: &ArgMatches) -> Result<command::SendArgs, ParseErro
message: message,
minimum_confirmations: min_c,
selection_strategy: selection_strategy.to_owned(),
estimate_selection_strategies,
method: method.to_owned(),
dest: dest.to_owned(),
change_outputs: change_outputs,
@ -562,7 +574,11 @@ pub fn wallet_command(
}
("send", Some(args)) => {
let a = arg_parse!(parse_send_args(&args));
command::send(inst_wallet(), a)
command::send(
inst_wallet(),
a,
wallet_config.dark_background_color_scheme.unwrap_or(true),
)
}
("receive", Some(args)) => {
let a = arg_parse!(parse_receive_args(&args));

View file

@ -44,10 +44,6 @@ subcommands:
subcommands:
- config:
about: Generate a configuration grin-server.toml file in the current directory
- start:
about: Start the Grin server as a daemon
- stop:
about: Stop the Grin server daemon
- run:
about: Run the Grin server in this console
- client:
@ -161,6 +157,10 @@ subcommands:
- smallest
default_value: all
takes_value: true
- estimate_selection_strategies:
help: Estimates all possible Coin/Output selection strategies.
short: e
long: estimate-selection
- change_outputs:
help: Number of change outputs to generate (mainly for testing)
short: o

View file

@ -11,7 +11,7 @@ edition = "2018"
[dependencies]
byteorder = "1"
croaring = "=0.3"
croaring = "=0.3.8"
env_logger = "0.5"
libc = "0.2"
failure = "0.1"

View file

@ -73,11 +73,19 @@ pub fn new_named_env(path: String, name: String, max_readers: Option<u32>) -> lm
env_builder.set_maxdbs(8).unwrap();
// half a TB should give us plenty room, will be an issue on 32 bits
// (which we don't support anyway)
env_builder
.set_mapsize(549_755_813_888)
.unwrap_or_else(|e| {
#[cfg(not(target_os = "windows"))]
env_builder.set_mapsize(5_368_709_120).unwrap_or_else(|e| {
panic!("Unable to allocate LMDB space: {:?}", e);
});
//TODO: This is temporary to support (beta) windows support
//Windows allocates the entire file at once, so this needs to
//be changed to allocate as little as possible and increase as needed
#[cfg(target_os = "windows")]
env_builder.set_mapsize(524_288_000).unwrap_or_else(|e| {
panic!("Unable to allocate LMDB space: {:?}", e);
});
if let Some(max_readers) = max_readers {
env_builder
.set_maxreaders(max_readers)

View file

@ -152,6 +152,12 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
self.data_file.path()
}
/// Release underlying data files
fn release_files(&mut self) {
self.data_file.release();
self.hash_file.release();
}
fn snapshot(&self, header: &BlockHeader) -> Result<(), String> {
self.leaf_set
.snapshot(header)
@ -208,8 +214,8 @@ impl<T: PMMRable> PMMRBackend<T> {
Ok(PMMRBackend {
data_dir: data_dir.to_path_buf(),
prunable,
hash_file,
data_file,
hash_file: hash_file,
data_file: data_file,
leaf_set,
prune_list,
})
@ -334,20 +340,11 @@ impl<T: PMMRable> PMMRBackend<T> {
}
self.prune_list.flush()?;
}
// 4. Rename the compact copy of hash file and reopen it.
fs::rename(
tmp_prune_file_hash.clone(),
self.data_dir.join(PMMR_HASH_FILE),
)?;
self.hash_file = DataFile::open(self.data_dir.join(PMMR_HASH_FILE))?;
self.hash_file.replace(Path::new(&tmp_prune_file_hash))?;
// 5. Rename the compact copy of the data file and reopen it.
fs::rename(
tmp_prune_file_data.clone(),
self.data_dir.join(PMMR_DATA_FILE),
)?;
self.data_file = DataFile::open(self.data_dir.join(PMMR_DATA_FILE))?;
self.data_file.replace(Path::new(&tmp_prune_file_data))?;
// 6. Write the leaf_set to disk.
// Optimize the bitmap storage in the process.

View file

@ -101,6 +101,17 @@ where
self.file.path()
}
/// Replace underlying file with another, deleting original
pub fn replace(&mut self, with: &Path) -> io::Result<()> {
self.file.replace(with)?;
Ok(())
}
/// Drop underlying file handles
pub fn release(&mut self) {
self.file.release();
}
/// Write the file out to disk, pruning removed elements.
pub fn save_prune<F>(&self, target: &str, prune_offs: &[u64], prune_cb: F) -> io::Result<()>
where
@ -125,7 +136,7 @@ where
/// latter by truncating the underlying file and re-creating the mmap.
pub struct AppendOnlyFile {
path: PathBuf,
file: File,
file: Option<File>,
mmap: Option<memmap::Mmap>,
buffer_start: usize,
buffer: Vec<u8>,
@ -135,28 +146,36 @@ pub struct AppendOnlyFile {
impl AppendOnlyFile {
/// Open a file (existing or not) as append-only, backed by a mmap.
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<AppendOnlyFile> {
let file = OpenOptions::new()
.read(true)
.append(true)
.create(true)
.open(&path)?;
let mut aof = AppendOnlyFile {
file,
file: None,
path: path.as_ref().to_path_buf(),
mmap: None,
buffer_start: 0,
buffer: vec![],
buffer_start_bak: 0,
};
// If we have a non-empty file then mmap it.
let sz = aof.size();
if sz > 0 {
aof.buffer_start = sz as usize;
aof.mmap = Some(unsafe { memmap::Mmap::map(&aof.file)? });
}
aof.init()?;
Ok(aof)
}
/// (Re)init an underlying file and its associated memmap
pub fn init(&mut self) -> io::Result<()> {
self.file = Some(
OpenOptions::new()
.read(true)
.append(true)
.create(true)
.open(self.path.clone())?,
);
// If we have a non-empty file then mmap it.
let sz = self.size();
if sz > 0 {
self.buffer_start = sz as usize;
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file.as_ref().unwrap())? });
}
Ok(())
}
/// Append data to the file. Until the append-only file is synced, data is
/// only written to memory.
pub fn append(&mut self, bytes: &mut [u8]) {
@ -193,21 +212,37 @@ impl AppendOnlyFile {
pub fn flush(&mut self) -> io::Result<()> {
if self.buffer_start_bak > 0 {
// Flushing a rewound state, we need to truncate via set_len() before applying.
self.file.set_len(self.buffer_start as u64)?;
// Drop and recreate, or windows throws an access error
self.mmap = None;
self.file = None;
{
let file = OpenOptions::new()
.read(true)
.create(true)
.write(true)
.open(&self.path)?;
file.set_len(self.buffer_start as u64)?;
}
let file = OpenOptions::new()
.read(true)
.create(true)
.append(true)
.open(&self.path)?;
self.file = Some(file);
self.buffer_start_bak = 0;
}
self.buffer_start += self.buffer.len();
self.file.write_all(&self.buffer[..])?;
self.file.sync_all()?;
self.file.as_mut().unwrap().write_all(&self.buffer[..])?;
self.file.as_mut().unwrap().sync_all()?;
self.buffer = vec![];
// Note: file must be non-empty to memory map it
if self.file.metadata()?.len() == 0 {
if self.file.as_ref().unwrap().metadata()?.len() == 0 {
self.mmap = None;
} else {
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file)? });
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file.as_ref().unwrap())? });
}
Ok(())
@ -313,6 +348,23 @@ impl AppendOnlyFile {
}
}
/// Replace the underlying file with another file
/// deleting the original
pub fn replace(&mut self, with: &Path) -> io::Result<()> {
self.mmap = None;
self.file = None;
fs::remove_file(&self.path)?;
fs::rename(with, &self.path)?;
self.init()?;
Ok(())
}
/// Release underlying file handles
pub fn release(&mut self) {
self.mmap = None;
self.file = None;
}
/// Current size of the file in bytes.
pub fn size(&self) -> u64 {
fs::metadata(&self.path).map(|md| md.len()).unwrap_or(0)

View file

@ -30,6 +30,7 @@ use crate::store::types::prune_noop;
#[test]
fn pmmr_append() {
let (data_dir, elems) = setup("append");
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
// adding first set of 4 elements and sync
@ -71,6 +72,7 @@ fn pmmr_append() {
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.root(), (pos_14, pos_15).hash_with_index(16));
}
}
teardown(data_dir);
}
@ -80,6 +82,7 @@ fn pmmr_compact_leaf_sibling() {
let (data_dir, elems) = setup("compact_leaf_sibling");
// setup the mmr store with all elements
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
@ -143,6 +146,7 @@ fn pmmr_compact_leaf_sibling() {
// Check we can still retrieve the "removed" hash at pos 1 from the hash file.
// It should still be available even after pruning and compacting.
assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash);
}
teardown(data_dir);
}
@ -152,6 +156,7 @@ fn pmmr_prune_compact() {
let (data_dir, elems) = setup("prune_compact");
// setup the mmr store with all elements
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
@ -193,6 +198,7 @@ fn pmmr_prune_compact() {
assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2));
assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7));
}
}
teardown(data_dir);
}
@ -202,6 +208,7 @@ fn pmmr_reload() {
let (data_dir, elems) = setup("reload");
// set everything up with an initial backend
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
@ -260,7 +267,8 @@ fn pmmr_reload() {
// create a new backend referencing the data files
// and check everything still works as expected
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
let mut backend =
store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
assert_eq!(backend.unpruned_size(), mmr_size);
{
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
@ -291,6 +299,7 @@ fn pmmr_reload() {
assert_eq!(backend.get_from_file(4), Some(pos_4_hash));
assert_eq!(backend.get_from_file(5), Some(pos_5_hash));
}
}
teardown(data_dir);
}
@ -298,6 +307,7 @@ fn pmmr_reload() {
#[test]
fn pmmr_rewind() {
let (data_dir, elems) = setup("rewind");
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap();
// adding elements and keeping the corresponding root
@ -366,7 +376,9 @@ fn pmmr_rewind() {
}
println!("doing a sync after rewinding");
backend.sync().unwrap();
if let Err(e) = backend.sync() {
panic!("Err: {:?}", e);
}
{
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, 10);
@ -418,6 +430,7 @@ fn pmmr_rewind() {
// check we have no data in the backend after
// pruning, compacting and rewinding
assert_eq!(backend.data_size(), 0);
}
teardown(data_dir);
}
@ -425,6 +438,7 @@ fn pmmr_rewind() {
#[test]
fn pmmr_compact_single_leaves() {
let (data_dir, elems) = setup("compact_single_leaves");
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap();
let mmr_size = load(0, &elems[0..5], &mut backend);
backend.sync().unwrap();
@ -454,6 +468,7 @@ fn pmmr_compact_single_leaves() {
backend
.check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap();
}
teardown(data_dir);
}
@ -461,6 +476,7 @@ fn pmmr_compact_single_leaves() {
#[test]
fn pmmr_compact_entire_peak() {
let (data_dir, elems) = setup("compact_entire_peak");
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap();
let mmr_size = load(0, &elems[0..5], &mut backend);
backend.sync().unwrap();
@ -495,6 +511,7 @@ fn pmmr_compact_entire_peak() {
assert_eq!(backend.get_data(8), Some(pos_8));
assert_eq!(backend.get_hash(8), Some(pos_8_hash));
assert_eq!(backend.get_from_file(8), Some(pos_8_hash));
}
teardown(data_dir);
}
@ -502,8 +519,23 @@ fn pmmr_compact_entire_peak() {
#[test]
fn pmmr_compact_horizon() {
let (data_dir, elems) = setup("compact_horizon");
{
let pos_1_hash;
let pos_2_hash;
let pos_3_hash;
let pos_6_hash;
let pos_7_hash;
let pos_8;
let pos_8_hash;
let pos_11;
let pos_11_hash;
let mmr_size;
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), true, None).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
// 0010012001001230
@ -511,19 +543,18 @@ fn pmmr_compact_horizon() {
assert_eq!(backend.data_size(), 19);
assert_eq!(backend.hash_size(), 35);
let pos_1_hash = backend.get_hash(1).unwrap();
let pos_2_hash = backend.get_hash(2).unwrap();
let pos_3_hash = backend.get_hash(3).unwrap();
let pos_6_hash = backend.get_hash(6).unwrap();
let pos_7_hash = backend.get_hash(7).unwrap();
pos_1_hash = backend.get_hash(1).unwrap();
pos_2_hash = backend.get_hash(2).unwrap();
pos_3_hash = backend.get_hash(3).unwrap();
pos_6_hash = backend.get_hash(6).unwrap();
pos_7_hash = backend.get_hash(7).unwrap();
let pos_8 = backend.get_data(8).unwrap();
let pos_8_hash = backend.get_hash(8).unwrap();
pos_8 = backend.get_data(8).unwrap();
pos_8_hash = backend.get_hash(8).unwrap();
let pos_11 = backend.get_data(11).unwrap();
let pos_11_hash = backend.get_hash(11).unwrap();
pos_11 = backend.get_data(11).unwrap();
pos_11_hash = backend.get_hash(11).unwrap();
{
// pruning some choice nodes
{
let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
@ -585,7 +616,8 @@ fn pmmr_compact_horizon() {
{
// recreate backend
let backend =
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None).unwrap();
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None)
.unwrap();
assert_eq!(backend.data_size(), 19);
assert_eq!(backend.hash_size(), 35);
@ -600,7 +632,8 @@ fn pmmr_compact_horizon() {
{
let mut backend =
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None).unwrap();
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None)
.unwrap();
{
let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
@ -619,7 +652,8 @@ fn pmmr_compact_horizon() {
{
// recreate backend
let backend =
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None).unwrap();
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), true, None)
.unwrap();
// 0010012001001230
@ -636,6 +670,7 @@ fn pmmr_compact_horizon() {
assert_eq!(backend.get_data(11), Some(pos_11));
assert_eq!(backend.get_from_file(11), Some(pos_11_hash));
}
}
teardown(data_dir);
}
@ -645,6 +680,8 @@ fn compact_twice() {
let (data_dir, elems) = setup("compact_twice");
// setup the mmr store with all elements
// Scoped to allow Windows to teardown
{
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), true, None).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
@ -712,6 +749,7 @@ fn compact_twice() {
assert_eq!(root, pmmr.root());
assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7));
}
}
teardown(data_dir);
}

View file

@ -80,7 +80,15 @@ where
fs::create_dir_all(&p)?;
}
}
let mut outfile = fs::File::create(&file_path)?;
//let mut outfile = fs::File::create(&file_path)?;
let res = fs::File::create(&file_path);
let mut outfile = match res {
Err(e) => {
error!("{:?}", e);
return Err(zip::result::ZipError::Io(e));
}
Ok(r) => r,
};
io::copy(&mut file, &mut outfile)?;
}

View file

@ -16,10 +16,10 @@
use std::fs::File;
use std::io::{Read, Write};
use crate::adapters::util::{deserialize_slate, serialize_slate};
use crate::libwallet::slate::Slate;
use crate::libwallet::{Error, ErrorKind};
use crate::libwallet::Error;
use crate::{WalletCommAdapter, WalletConfig};
use serde_json as json;
use std::collections::HashMap;
#[derive(Clone)]
@ -43,7 +43,8 @@ impl WalletCommAdapter for FileWalletCommAdapter {
fn send_tx_async(&self, dest: &str, slate: &Slate) -> Result<(), Error> {
let mut pub_tx = File::create(dest)?;
pub_tx.write_all(json::to_string(&slate).unwrap().as_bytes())?;
let slate_string = serialize_slate(slate);
pub_tx.write_all(slate_string.as_bytes())?;
pub_tx.sync_all()?;
Ok(())
}
@ -52,7 +53,7 @@ impl WalletCommAdapter for FileWalletCommAdapter {
let mut pub_tx_f = File::open(params)?;
let mut content = String::new();
pub_tx_f.read_to_string(&mut content)?;
Ok(json::from_str(&content).map_err(|err| ErrorKind::Format(err.to_string()))?)
Ok(deserialize_slate(&content))
}
fn listen(

View file

@ -12,9 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::adapters::util::get_versioned_slate;
use crate::api;
use crate::controller;
use crate::libwallet::slate::Slate;
use crate::libwallet::slate::{Slate, VersionedSlate};
use crate::libwallet::{Error, ErrorKind};
use crate::{instantiate_wallet, HTTPNodeClient, WalletCommAdapter, WalletConfig};
/// HTTP Wallet 'plugin' implementation
@ -47,15 +48,15 @@ impl WalletCommAdapter for HTTPWalletCommAdapter {
}
let url = format!("{}/v1/wallet/foreign/receive_tx", dest);
debug!("Posting transaction slate to {}", url);
let res = api::client::post(url.as_str(), None, slate);
let slate = get_versioned_slate(slate);
let res: Result<VersionedSlate, _> = api::client::post(url.as_str(), None, &slate);
match res {
Err(e) => {
let report = format!("Posting transaction slate (is recipient listening?): {}", e);
error!("{}", report);
Err(ErrorKind::ClientCallback(report).into())
}
Ok(r) => Ok(r),
Ok(r) => Ok(r.into()),
}
}

View file

@ -15,7 +15,8 @@
// Keybase Wallet Plugin
use crate::controller;
use crate::libwallet::slate::Slate;
use crate::libwallet::slate::{Slate, VersionedSlate};
use crate::libwallet::slate_versions::v0::SlateV0;
use crate::libwallet::{Error, ErrorKind};
use crate::{instantiate_wallet, HTTPNodeClient, WalletCommAdapter, WalletConfig};
use failure::ResultExt;
@ -192,6 +193,7 @@ fn get_unread(topic: &str) -> Result<HashMap<String, String>, Error> {
/// Send a message to a keybase channel that self-destructs after ttl seconds.
fn send<T: Serialize>(message: T, channel: &str, topic: &str, ttl: u16) -> bool {
let seconds = format!("{}s", ttl);
let serialized = to_string(&message).unwrap();
let payload = to_string(&json!({
"method": "send",
"params": {
@ -200,7 +202,7 @@ fn send<T: Serialize>(message: T, channel: &str, topic: &str, ttl: u16) -> bool
"name": channel, "topic_name": topic, "topic_type": "dev"
},
"message": {
"body": to_string(&message).unwrap()
"body": serialized
},
"exploding_lifetime": seconds
}
@ -210,7 +212,10 @@ fn send<T: Serialize>(message: T, channel: &str, topic: &str, ttl: u16) -> bool
let response = api_send(&payload);
if let Ok(res) = response {
match res["result"]["message"].as_str() {
Some("message sent") => true,
Some("message sent") => {
debug!("Message sent to {}: {}", channel, serialized);
true
}
_ => false,
}
} else {
@ -254,9 +259,10 @@ fn poll(nseconds: u64, channel: &str) -> Option<Slate> {
while start.elapsed().as_secs() < nseconds {
let unread = read_from_channel(channel, SLATE_SIGNED);
for msg in unread.unwrap().iter() {
let blob = from_str::<Slate>(msg);
let blob = from_str::<VersionedSlate>(msg);
match blob {
Ok(slate) => {
let slate: Slate = slate.into();
info!(
"keybase response message received from @{}, tx uuid: {}",
channel, slate.id,
@ -288,8 +294,10 @@ impl WalletCommAdapter for KeybaseWalletCommAdapter {
return Err(ErrorKind::GenericError("Tx rejected".to_owned()))?;
}
let id = slate.id;
// Send original slate to recipient with the SLATE_NEW topic
match send(slate, addr, SLATE_NEW, TTL) {
match send(&slate, addr, SLATE_NEW, TTL) {
true => (),
false => {
return Err(ErrorKind::ClientCallback(
@ -297,10 +305,7 @@ impl WalletCommAdapter for KeybaseWalletCommAdapter {
))?
}
}
info!(
"tx request has been sent to @{}, tx uuid: {}",
addr, slate.id
);
info!("tx request has been sent to @{}, tx uuid: {}", addr, id);
// Wait for response from recipient with SLATE_SIGNED topic
match poll(TTL as u64, addr) {
Some(slate) => return Ok(slate),
@ -345,9 +350,10 @@ impl WalletCommAdapter for KeybaseWalletCommAdapter {
break;
}
for (msg, channel) in &unread.unwrap() {
let blob = from_str::<Slate>(msg);
let blob = from_str::<VersionedSlate>(msg);
match blob {
Ok(mut slate) => {
Ok(message) => {
let mut slate: Slate = message.clone().into();
let tx_uuid = slate.id;
// Reject multiple recipients channel for safety
@ -378,19 +384,29 @@ impl WalletCommAdapter for KeybaseWalletCommAdapter {
Ok(())
}) {
// Reply to the same channel with topic SLATE_SIGNED
Ok(_) => match send(slate, channel, SLATE_SIGNED, TTL) {
true => {
Ok(_) => {
let success = match message {
// Send the same version of slate that was sent to us
VersionedSlate::V0(_) => {
send(SlateV0::from(slate), channel, SLATE_SIGNED, TTL)
}
VersionedSlate::V1(_) => {
send(slate, channel, SLATE_SIGNED, TTL)
}
};
if success {
notify_on_receive(
config.keybase_notify_ttl.unwrap_or(1440),
channel.to_string(),
tx_uuid.to_string(),
);
debug!("Returned slate to @{} via keybase", channel);
}
false => {
} else {
error!("Failed to return slate to @{} via keybase. Incoming tx failed", channel);
}
},
}
Err(e) => {
error!(
"Error on receiving tx via keybase: {}. Incoming tx failed",
@ -399,7 +415,7 @@ impl WalletCommAdapter for KeybaseWalletCommAdapter {
}
}
}
Err(_) => (),
Err(_) => debug!("Failed to deserialize keybase message: {}", msg),
}
}
sleep(LISTEN_SLEEP_DURATION);

View file

@ -16,6 +16,7 @@ mod file;
mod http;
mod keybase;
mod null;
pub mod util;
pub use self::file::FileWalletCommAdapter;
pub use self::http::HTTPWalletCommAdapter;

View file

@ -0,0 +1,23 @@
use crate::libwallet::slate::{Slate, VersionedSlate};
use crate::libwallet::slate_versions::v0::SlateV0;
use crate::libwallet::ErrorKind;
use serde_json as json;
pub fn get_versioned_slate(slate: &Slate) -> VersionedSlate {
let slate = slate.clone();
match slate.version {
0 => VersionedSlate::V0(SlateV0::from(slate)),
_ => VersionedSlate::V1(slate),
}
}
pub fn serialize_slate(slate: &Slate) -> String {
json::to_string(&get_versioned_slate(slate)).unwrap()
}
pub fn deserialize_slate(raw_slate: &str) -> Slate {
let versioned_slate: VersionedSlate = json::from_str(&raw_slate)
.map_err(|err| ErrorKind::Format(err.to_string()))
.unwrap();
versioned_slate.into()
}

View file

@ -200,6 +200,7 @@ pub struct SendArgs {
pub message: Option<String>,
pub minimum_confirmations: u64,
pub selection_strategy: String,
pub estimate_selection_strategies: bool,
pub method: String,
pub dest: String,
pub change_outputs: usize,
@ -210,8 +211,28 @@ pub struct SendArgs {
pub fn send(
wallet: Arc<Mutex<WalletInst<impl NodeClient + 'static, keychain::ExtKeychain>>>,
args: SendArgs,
dark_scheme: bool,
) -> Result<(), Error> {
controller::owner_single_use(wallet.clone(), |api| {
if args.estimate_selection_strategies {
let strategies = vec!["smallest", "all"]
.into_iter()
.map(|strategy| {
let (total, fee) = api
.estimate_initiate_tx(
None,
args.amount,
args.minimum_confirmations,
args.max_outputs,
args.change_outputs,
strategy == "all",
)
.unwrap();
(strategy, total, fee)
})
.collect();
display::estimate(args.amount, strategies, dark_scheme);
} else {
let result = api.initiate_tx(
None,
args.amount,
@ -274,6 +295,7 @@ pub fn send(
}
}
}
}
Ok(())
})?;
Ok(())

View file

@ -15,13 +15,14 @@
//! Controller for wallet.. instantiates and handles listeners (or single-run
//! invocations) as needed.
//! Still experimental
use crate::adapters::util::get_versioned_slate;
use crate::adapters::{FileWalletCommAdapter, HTTPWalletCommAdapter, KeybaseWalletCommAdapter};
use crate::api::{ApiServer, BasicAuthMiddleware, Handler, ResponseFuture, Router, TLSConfig};
use crate::core::core;
use crate::core::core::Transaction;
use crate::keychain::Keychain;
use crate::libwallet::api::{APIForeign, APIOwner};
use crate::libwallet::slate::Slate;
use crate::libwallet::slate::{Slate, VersionedSlate};
use crate::libwallet::types::{
CbData, NodeClient, OutputData, SendTXArgs, TxLogEntry, WalletBackend, WalletInfo,
};
@ -40,6 +41,7 @@ use std::marker::PhantomData;
use std::net::SocketAddr;
use std::sync::Arc;
use url::form_urlencoded;
use uuid::Uuid;
/// Instantiate wallet Owner API for a single-use (command line) call
/// Return a function containing a loaded API context to call
@ -467,6 +469,87 @@ where
))
}
pub fn repost(
&self,
req: Request<Body>,
api: APIOwner<T, C, K>,
) -> Box<dyn Future<Item = (), Error = Error> + Send> {
let params = parse_params(&req);
let mut id_int: Option<u32> = None;
let mut tx_uuid: Option<Uuid> = None;
if let Some(id_string) = params.get("id") {
match id_string[0].parse() {
Ok(id) => id_int = Some(id),
Err(e) => {
error!("repost: could not parse id: {}", e);
return Box::new(err(ErrorKind::GenericError(
"repost: cannot repost transaction. Could not parse id in request."
.to_owned(),
)
.into()));
}
}
} else if let Some(tx_id_string) = params.get("tx_id") {
match tx_id_string[0].parse() {
Ok(tx_id) => tx_uuid = Some(tx_id),
Err(e) => {
error!("repost: could not parse tx_id: {}", e);
return Box::new(err(ErrorKind::GenericError(
"repost: cannot repost transaction. Could not parse tx_id in request."
.to_owned(),
)
.into()));
}
}
} else {
return Box::new(err(ErrorKind::GenericError(
"repost: Cannot repost transaction. Missing id or tx_id param in request."
.to_owned(),
)
.into()));
}
let res = api.retrieve_txs(true, id_int, tx_uuid);
if let Err(e) = res {
return Box::new(err(ErrorKind::GenericError(format!(
"repost: cannot repost transaction. retrieve_txs failed, err: {:?}",
e
))
.into()));
}
let (_, txs) = res.unwrap();
let res = api.get_stored_tx(&txs[0]);
if let Err(e) = res {
return Box::new(err(ErrorKind::GenericError(format!(
"repost: cannot repost transaction. get_stored_tx failed, err: {:?}",
e
))
.into()));
}
let stored_tx = res.unwrap();
if stored_tx.is_none() {
error!(
"Transaction with id {:?}/{:?} does not have transaction data. Not reposting.",
id_int, tx_uuid,
);
return Box::new(err(ErrorKind::GenericError(
"repost: Cannot repost transaction. Missing id or tx_id param in request."
.to_owned(),
)
.into()));
}
let fluff = params.get("fluff").is_some();
Box::new(match api.post_tx(&stored_tx.unwrap(), fluff) {
Ok(_) => ok(()),
Err(e) => {
error!("repost: failed with error: {}", e);
err(e)
}
})
}
fn handle_post_request(&self, req: Request<Body>) -> WalletResponseFuture {
let api = APIOwner::new(self.wallet.clone());
match req
@ -487,10 +570,14 @@ where
),
"cancel_tx" => Box::new(
self.cancel_tx(req, api)
.and_then(|_| ok(response(StatusCode::OK, ""))),
.and_then(|_| ok(response(StatusCode::OK, "{}"))),
),
"post_tx" => Box::new(
self.post_tx(req, api)
.and_then(|_| ok(response(StatusCode::OK, "{}"))),
),
"repost" => Box::new(
self.repost(req, api)
.and_then(|_| ok(response(StatusCode::OK, ""))),
),
_ => Box::new(err(ErrorKind::GenericError(
@ -574,16 +661,17 @@ where
&self,
req: Request<Body>,
mut api: APIForeign<T, C, K>,
) -> Box<dyn Future<Item = Slate, Error = Error> + Send> {
) -> Box<dyn Future<Item = VersionedSlate, Error = Error> + Send> {
Box::new(parse_body(req).and_then(
//TODO: No way to insert a message from the params
move |mut slate| {
move |slate: VersionedSlate| {
let mut slate: Slate = slate.into();
if let Err(e) = api.verify_slate_messages(&slate) {
error!("Error validating participant messages: {}", e);
err(e)
} else {
match api.receive_tx(&mut slate, None, None) {
Ok(_) => ok(slate.clone()),
Ok(_) => ok(get_versioned_slate(&slate.clone())),
Err(e) => {
error!("receive_tx: failed with error: {}", e);
err(e)
@ -677,20 +765,31 @@ fn create_ok_response(json: &str) -> Response<Body> {
"access-control-allow-headers",
"Content-Type, Authorization",
)
.header(hyper::header::CONTENT_TYPE, "application/json")
.body(json.to_string().into())
.unwrap()
}
/// Build a new hyper Response with the status code and body provided.
///
/// Whenever the status code is `StatusCode::OK` the text parameter should be
/// valid JSON as the content type header will be set to `application/json'
fn response<T: Into<Body>>(status: StatusCode, text: T) -> Response<Body> {
Response::builder()
let mut builder = &mut Response::builder();
builder = builder
.status(status)
.header("access-control-allow-origin", "*")
.header(
"access-control-allow-headers",
"Content-Type, Authorization",
)
.body(text.into())
.unwrap()
);
if status == StatusCode::OK {
builder = builder.header(hyper::header::CONTENT_TYPE, "application/json");
}
builder.body(text.into()).unwrap()
}
fn parse_params(req: &Request<Body>) -> HashMap<String, Vec<String>> {

View file

@ -338,6 +338,49 @@ pub fn info(
);
}
}
/// Display summary info in a pretty way
pub fn estimate(
amount: u64,
strategies: Vec<(
&str, // strategy
u64, // total amount to be locked
u64, // fee
)>,
dark_background_color_scheme: bool,
) {
println!(
"\nEstimation for sending {}:\n",
amount_to_hr_string(amount, false)
);
let mut table = table!();
table.set_titles(row![
bMG->"Selection strategy",
bMG->"Fee",
bMG->"Will be locked",
]);
for (strategy, total, fee) in strategies {
if dark_background_color_scheme {
table.add_row(row![
bFC->strategy,
FR->amount_to_hr_string(fee, false),
FY->amount_to_hr_string(total, false),
]);
} else {
table.add_row(row![
bFD->strategy,
FR->amount_to_hr_string(fee, false),
FY->amount_to_hr_string(total, false),
]);
}
}
table.printstd();
println!();
}
/// Display list of wallet accounts in a pretty way
pub fn accounts(acct_mappings: Vec<AcctPathMapping>) {
println!("\n____ Wallet Accounts ____\n",);

View file

@ -25,7 +25,8 @@ extern crate serde_derive;
extern crate log;
use failure;
use grin_api as api;
use grin_core as core;
#[macro_use]
extern crate grin_core as core;
use grin_keychain as keychain;
use grin_store as store;
use grin_util as util;

View file

@ -671,6 +671,74 @@ where
Ok((slate, lock_fn))
}
/// Estimates the amount to be locked and fee for the transaction without creating one
///
/// # Arguments
/// * `src_acct_name` - The human readable account name from which to draw outputs
/// for the transaction, overriding whatever the active account is as set via the
/// [`set_active_account`](struct.APIOwner.html#method.set_active_account) method.
/// If None, the transaction will use the active account.
/// * `amount` - The amount to send, in nanogrins. (`1 G = 1_000_000_000nG`)
/// * `minimum_confirmations` - The minimum number of confirmations an output
/// should have in order to be included in the transaction.
/// * `max_outputs` - By default, the wallet selects as many inputs as possible in a
/// transaction, to reduce the Output set and the fees. The wallet will attempt to spend
/// include up to `max_outputs` in a transaction, however if this is not enough to cover
/// the whole amount, the wallet will include more outputs. This parameter should be considered
/// a soft limit.
/// * `num_change_outputs` - The target number of change outputs to create in the transaction.
/// The actual number created will be `num_change_outputs` + whatever remainder is needed.
/// * `selection_strategy_is_use_all` - If `true`, attempt to use up as many outputs as
/// possible to create the transaction, up the 'soft limit' of `max_outputs`. This helps
/// to reduce the size of the UTXO set and the amount of data stored in the wallet, and
/// minimizes fees. This will generally result in many inputs and a large change output(s),
/// usually much larger than the amount being sent. If `false`, the transaction will include
/// as many outputs as are needed to meet the amount, (and no more) starting with the smallest
/// value outputs.
///
/// # Returns
/// * a result containing:
/// * (total, fee) - A tuple:
/// * Total amount to be locked.
/// * Transaction fee
pub fn estimate_initiate_tx(
&mut self,
src_acct_name: Option<&str>,
amount: u64,
minimum_confirmations: u64,
max_outputs: usize,
num_change_outputs: usize,
selection_strategy_is_use_all: bool,
) -> Result<
(
u64, // total
u64, // fee
),
Error,
> {
let mut w = self.wallet.lock();
w.open_with_credentials()?;
let parent_key_id = match src_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
tx::estimate_send_tx(
&mut *w,
amount,
minimum_confirmations,
max_outputs,
num_change_outputs,
selection_strategy_is_use_all,
&parent_key_id,
)
}
/// Lock outputs associated with a given slate/transaction
pub fn tx_lock_outputs(
&mut self,

View file

@ -242,6 +242,52 @@ pub fn select_send_tx<T: ?Sized, C, K>(
),
Error,
>
where
T: WalletBackend<C, K>,
C: NodeClient,
K: Keychain,
{
let (coins, _total, amount, fee) = select_coins_and_fee(
wallet,
amount,
current_height,
minimum_confirmations,
max_outputs,
change_outputs,
selection_strategy_is_use_all,
&parent_key_id,
)?;
// build transaction skeleton with inputs and change
let (mut parts, change_amounts_derivations) =
inputs_and_change(&coins, wallet, amount, fee, change_outputs)?;
// This is more proof of concept than anything but here we set lock_height
// on tx being sent (based on current chain height via api).
parts.push(build::with_lock_height(lock_height));
Ok((parts, coins, change_amounts_derivations, fee))
}
/// Select outputs and calculating fee.
pub fn select_coins_and_fee<T: ?Sized, C, K>(
wallet: &mut T,
amount: u64,
current_height: u64,
minimum_confirmations: u64,
max_outputs: usize,
change_outputs: usize,
selection_strategy_is_use_all: bool,
parent_key_id: &Identifier,
) -> Result<
(
Vec<OutputData>,
u64, // total
u64, // amount
u64, // fee
),
Error,
>
where
T: WalletBackend<C, K>,
C: NodeClient,
@ -325,16 +371,7 @@ where
amount_with_fee = amount + fee;
}
}
// build transaction skeleton with inputs and change
let (mut parts, change_amounts_derivations) =
inputs_and_change(&coins, wallet, amount, fee, change_outputs)?;
// This is more proof of concept than anything but here we set lock_height
// on tx being sent (based on current chain height via api).
parts.push(build::with_lock_height(lock_height));
Ok((parts, coins, change_amounts_derivations, fee))
Ok((coins, total, amount, fee))
}
/// Selects inputs and change for a transaction

View file

@ -42,6 +42,52 @@ where
Ok(slate)
}
/// Estimates locked amount and fee for the transaction without creating one
pub fn estimate_send_tx<T: ?Sized, C, K>(
wallet: &mut T,
amount: u64,
minimum_confirmations: u64,
max_outputs: usize,
num_change_outputs: usize,
selection_strategy_is_use_all: bool,
parent_key_id: &Identifier,
) -> Result<
(
u64, // total
u64, // fee
),
Error,
>
where
T: WalletBackend<C, K>,
C: NodeClient,
K: Keychain,
{
// Get lock height
let current_height = wallet.w2n_client().get_chain_height()?;
// ensure outputs we're selecting are up to date
updater::refresh_outputs(wallet, parent_key_id, false)?;
// Sender selects outputs into a new slate and save our corresponding keys in
// a transaction context. The secret key in our transaction context will be
// randomly selected. This returns the public slate, and a closure that locks
// our inputs and outputs once we're convinced the transaction exchange went
// according to plan
// This function is just a big helper to do all of that, in theory
// this process can be split up in any way
let (_coins, total, _amount, fee) = selection::select_coins_and_fee(
wallet,
amount,
current_height,
minimum_confirmations,
max_outputs,
num_change_outputs,
selection_strategy_is_use_all,
parent_key_id,
)?;
Ok((total, fee))
}
/// Add inputs to the slate (effectively becoming the sender)
pub fn add_inputs_to_slate<T: ?Sized, C, K>(
wallet: &mut T,

View file

@ -26,6 +26,7 @@ pub mod api;
mod error;
pub mod internal;
pub mod slate;
pub mod slate_versions;
pub mod types;
pub use crate::libwallet::error::{Error, ErrorKind};

View file

@ -18,6 +18,7 @@
use crate::blake2::blake2b::blake2b;
use crate::keychain::{BlindSum, BlindingFactor, Keychain};
use crate::libwallet::error::{Error, ErrorKind};
use crate::libwallet::slate_versions::v0::SlateV0;
use crate::util::secp;
use crate::util::secp::key::{PublicKey, SecretKey};
use crate::util::secp::Signature;
@ -33,6 +34,25 @@ use uuid::Uuid;
const CURRENT_SLATE_VERSION: u64 = 1;
/// A wrapper around slates the enables support for versioning
#[derive(Serialize, Deserialize, Clone)]
#[serde(untagged)]
pub enum VersionedSlate {
/// Pre versioning version
V0(SlateV0),
/// Version 1 with versioning and hex serialization - current
V1(Slate),
}
impl From<VersionedSlate> for Slate {
fn from(ver: VersionedSlate) -> Self {
match ver {
VersionedSlate::V0(slate_v0) => Slate::from(slate_v0),
VersionedSlate::V1(slate) => slate,
}
}
}
/// Public data for each participant in the slate
#[derive(Serialize, Deserialize, Debug, Clone)]
@ -40,14 +60,18 @@ pub struct ParticipantData {
/// Id of participant in the transaction. (For now, 0=sender, 1=rec)
pub id: u64,
/// Public key corresponding to private blinding factor
#[serde(with = "secp_ser::pubkey_serde")]
pub public_blind_excess: PublicKey,
/// Public key corresponding to private nonce
#[serde(with = "secp_ser::pubkey_serde")]
pub public_nonce: PublicKey,
/// Public partial signature
#[serde(with = "secp_ser::option_sig_serde")]
pub part_sig: Option<Signature>,
/// A message for other participants
pub message: Option<String>,
/// Signature, created with private key corresponding to 'public_blind_excess'
#[serde(with = "secp_ser::option_sig_serde")]
pub message_sig: Option<Signature>,
}

View file

@ -0,0 +1,18 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module contains old slate versions and conversions to the newest slate version
//! Used for serialization and deserialization of slates in a backwards compatible way.
#[allow(missing_docs)]
pub mod v0;

View file

@ -0,0 +1,370 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Contains V0 of the slate
use crate::core::core::transaction::{
Input, KernelFeatures, Output, OutputFeatures, Transaction, TransactionBody, TxKernel,
};
use crate::keychain::BlindingFactor;
use crate::libwallet::slate::{ParticipantData, Slate};
use crate::util::secp;
use crate::util::secp::key::PublicKey;
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::secp::Signature;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SlateV0 {
/// The number of participants intended to take part in this transaction
pub num_participants: usize,
/// Unique transaction ID, selected by sender
pub id: Uuid,
/// The core transaction data:
/// inputs, outputs, kernels, kernel offset
pub tx: TransactionV0,
/// base amount (excluding fee)
pub amount: u64,
/// fee amount
pub fee: u64,
/// Block height for the transaction
pub height: u64,
/// Lock height
pub lock_height: u64,
/// Participant data, each participant in the transaction will
/// insert their public data here. For now, 0 is sender and 1
/// is receiver, though this will change for multi-party
pub participant_data: Vec<ParticipantDataV0>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ParticipantDataV0 {
/// Id of participant in the transaction. (For now, 0=sender, 1=rec)
pub id: u64,
/// Public key corresponding to private blinding factor
pub public_blind_excess: PublicKey,
/// Public key corresponding to private nonce
pub public_nonce: PublicKey,
/// Public partial signature
pub part_sig: Option<Signature>,
/// A message for other participants
pub message: Option<String>,
/// Signature, created with private key corresponding to 'public_blind_excess'
pub message_sig: Option<Signature>,
}
/// A transaction
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionV0 {
/// The kernel "offset" k2
/// excess is k1G after splitting the key k = k1 + k2
pub offset: BlindingFactor,
/// The transaction body - inputs/outputs/kernels
pub body: TransactionBodyV0,
}
/// TransactionBody is a common abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionBodyV0 {
/// List of inputs spent by the transaction.
pub inputs: Vec<InputV0>,
/// List of outputs the transaction produces.
pub outputs: Vec<OutputV0>,
/// List of kernels that make up this transaction (usually a single kernel).
pub kernels: Vec<TxKernelV0>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct InputV0 {
/// The features of the output being spent.
/// We will check maturity for coinbase output.
pub features: OutputFeatures,
/// The commit referencing the output being spent.
pub commit: Commitment,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct OutputV0 {
/// Options for an output's structure or use
pub features: OutputFeatures,
/// The homomorphic commitment representing the output amount
pub commit: Commitment,
/// A proof that the commitment is in the right range
pub proof: RangeProof,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TxKernelV0 {
/// Options for a kernel's structure or use
pub features: KernelFeatures,
/// Fee originally included in the transaction this proof is for.
pub fee: u64,
/// This kernel is not valid earlier than lock_height blocks
/// The max lock_height of all *inputs* to this transaction
pub lock_height: u64,
/// Remainder of the sum of all transaction commitments. If the transaction
/// is well formed, amounts components should sum to zero and the excess
/// is hence a valid public key.
pub excess: Commitment,
/// The signature proving the excess is a valid public key, which signs
/// the transaction fee.
pub excess_sig: secp::Signature,
}
impl From<SlateV0> for Slate {
fn from(slate: SlateV0) -> Slate {
let SlateV0 {
num_participants,
id,
tx,
amount,
fee,
height,
lock_height,
participant_data,
} = slate;
let tx = Transaction::from(tx);
let participant_data = map_vec!(participant_data, |data| ParticipantData::from(data));
let version = 0;
Slate {
num_participants,
id,
tx,
amount,
fee,
height,
lock_height,
participant_data,
version,
}
}
}
impl From<&ParticipantDataV0> for ParticipantData {
fn from(data: &ParticipantDataV0) -> ParticipantData {
let ParticipantDataV0 {
id,
public_blind_excess,
public_nonce,
part_sig,
message,
message_sig,
} = data;
let id = *id;
let public_blind_excess = *public_blind_excess;
let public_nonce = *public_nonce;
let part_sig = *part_sig;
let message: Option<String> = message.as_ref().map(|t| String::from(&**t));
let message_sig = *message_sig;
ParticipantData {
id,
public_blind_excess,
public_nonce,
part_sig,
message,
message_sig,
}
}
}
impl From<TransactionV0> for Transaction {
fn from(tx: TransactionV0) -> Transaction {
let TransactionV0 { offset, body } = tx;
let body = TransactionBody::from(&body);
let transaction = Transaction::new(body.inputs, body.outputs, body.kernels);
transaction.with_offset(offset)
}
}
impl From<&TransactionBodyV0> for TransactionBody {
fn from(body: &TransactionBodyV0) -> Self {
let TransactionBodyV0 {
inputs,
outputs,
kernels,
} = body;
let inputs = map_vec!(inputs, |inp| Input::from(inp));
let outputs = map_vec!(outputs, |out| Output::from(out));
let kernels = map_vec!(kernels, |kern| TxKernel::from(kern));
TransactionBody {
inputs,
outputs,
kernels,
}
}
}
impl From<&InputV0> for Input {
fn from(input: &InputV0) -> Input {
let InputV0 { features, commit } = *input;
Input { features, commit }
}
}
impl From<&OutputV0> for Output {
fn from(output: &OutputV0) -> Output {
let OutputV0 {
features,
commit,
proof,
} = *output;
Output {
features,
commit,
proof,
}
}
}
impl From<&TxKernelV0> for TxKernel {
fn from(kernel: &TxKernelV0) -> TxKernel {
let TxKernelV0 {
features,
fee,
lock_height,
excess,
excess_sig,
} = *kernel;
TxKernel {
features,
fee,
lock_height,
excess,
excess_sig,
}
}
}
impl From<Slate> for SlateV0 {
fn from(slate: Slate) -> SlateV0 {
let Slate {
num_participants,
id,
tx,
amount,
fee,
height,
lock_height,
participant_data,
version: _,
} = slate;
let tx = TransactionV0::from(tx);
let participant_data = map_vec!(participant_data, |data| ParticipantDataV0::from(data));
SlateV0 {
num_participants,
id,
tx,
amount,
fee,
height,
lock_height,
participant_data,
}
}
}
impl From<&ParticipantData> for ParticipantDataV0 {
fn from(data: &ParticipantData) -> ParticipantDataV0 {
let ParticipantData {
id,
public_blind_excess,
public_nonce,
part_sig,
message,
message_sig,
} = data;
let id = *id;
let public_blind_excess = *public_blind_excess;
let public_nonce = *public_nonce;
let part_sig = *part_sig;
let message: Option<String> = message.as_ref().map(|t| String::from(&**t));
let message_sig = *message_sig;
ParticipantDataV0 {
id,
public_blind_excess,
public_nonce,
part_sig,
message,
message_sig,
}
}
}
impl From<Transaction> for TransactionV0 {
fn from(tx: Transaction) -> TransactionV0 {
let offset = tx.offset;
let body: TransactionBody = tx.into();
let body = TransactionBodyV0::from(&body);
TransactionV0 { offset, body }
}
}
impl From<&TransactionBody> for TransactionBodyV0 {
fn from(body: &TransactionBody) -> Self {
let TransactionBody {
inputs,
outputs,
kernels,
} = body;
let inputs = map_vec!(inputs, |inp| InputV0::from(inp));
let outputs = map_vec!(outputs, |out| OutputV0::from(out));
let kernels = map_vec!(kernels, |kern| TxKernelV0::from(kern));
TransactionBodyV0 {
inputs,
outputs,
kernels,
}
}
}
impl From<&Input> for InputV0 {
fn from(input: &Input) -> Self {
let Input { features, commit } = *input;
InputV0 { features, commit }
}
}
impl From<&Output> for OutputV0 {
fn from(output: &Output) -> Self {
let Output {
features,
commit,
proof,
} = *output;
OutputV0 {
features,
commit,
proof,
}
}
}
impl From<&TxKernel> for TxKernelV0 {
fn from(kernel: &TxKernel) -> Self {
let TxKernel {
features,
fee,
lock_height,
excess,
excess_sig,
} = *kernel;
TxKernelV0 {
features,
fee,
lock_height,
excess,
excess_sig,
}
}
}

View file

@ -137,6 +137,10 @@ where
self.wallets.insert(addr.to_owned(), (tx, wallet));
}
pub fn stop(&mut self) {
self.running.store(false, Ordering::Relaxed);
}
/// Run the incoming message queue and respond more or less
/// synchronously
pub fn run(&mut self) -> Result<(), libwallet::Error> {

View file

@ -26,6 +26,7 @@ use grin_keychain as keychain;
use grin_util as util;
use grin_wallet as wallet;
use std::fs;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
@ -44,6 +45,7 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
let dest_dir = format!("{}/{}_restore", base_dir, wallet_dir);
fs::create_dir_all(dest_dir.clone())?;
let dest_seed = format!("{}/wallet.seed", dest_dir);
println!("Source: {}, Dest: {}", source_seed, dest_seed);
fs::copy(source_seed, dest_seed)?;
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> = WalletProxy::new(base_dir);
@ -54,6 +56,7 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
wallet_proxy.add_wallet(wallet_dir, client.get_send_instance(), wallet.clone());
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -67,6 +70,9 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
@ -108,6 +114,7 @@ fn compare_wallet_restore(
}
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -164,6 +171,9 @@ fn compare_wallet_restore(
dest_accts.as_ref().unwrap().len()
);
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
@ -208,6 +218,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
wallet_proxy.add_wallet("wallet3", client3.get_send_instance(), wallet3.clone());
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -327,6 +338,8 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
Ok(())
}

View file

@ -226,6 +226,33 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
Ok(())
})?;
// Estimate fee and locked amount for a transaction
wallet::controller::owner_single_use(wallet1.clone(), |sender_api| {
let (total, fee) = sender_api.estimate_initiate_tx(
None,
amount * 2, // amount
2, // minimum confirmations
500, // max outputs
1, // num change outputs
true, // select all outputs
)?;
assert_eq!(total, 600_000_000_000);
assert_eq!(fee, 4_000_000);
let (total, fee) = sender_api.estimate_initiate_tx(
None,
amount * 2, // amount
2, // minimum confirmations
500, // max outputs
1, // num change outputs
false, // select the smallest amount of outputs
)?;
assert_eq!(total, 180_000_000_000);
assert_eq!(fee, 6_000_000);
Ok(())
})?;
// Send another transaction, but don't post to chain immediately and use
// the stored transaction instead
wallet::controller::owner_single_use(wallet1.clone(), |sender_api| {