Merge branch 'master' into unitdiff

This commit is contained in:
Ignotus Peverell 2018-10-25 14:20:41 -07:00 committed by GitHub
commit a42d66efff
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
131 changed files with 1942 additions and 2372 deletions

200
Cargo.lock generated
View file

@ -19,6 +19,11 @@ dependencies = [
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "antidote"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "arc-swap"
version = "0.3.2"
@ -315,6 +320,11 @@ dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam-channel"
version = "0.2.6"
@ -666,10 +676,10 @@ dependencies = [
"grin_util 0.4.0",
"grin_wallet 0.4.0",
"humansize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -691,13 +701,13 @@ dependencies = [
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper-rustls 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"ring 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustls 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-rustls 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -722,11 +732,11 @@ dependencies = [
"grin_util 0.4.0",
"grin_wallet 0.4.0",
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -760,6 +770,7 @@ dependencies = [
"grin_util 0.4.0",
"grin_wallet 0.4.0",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num-bigint 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -767,7 +778,6 @@ dependencies = [
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -779,13 +789,13 @@ dependencies = [
"digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
"grin_util 0.4.0",
"hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"ripemd160 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -802,12 +812,12 @@ dependencies = [
"grin_store 0.4.0",
"grin_util 0.4.0",
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -822,10 +832,10 @@ dependencies = [
"grin_store 0.4.0",
"grin_util 0.4.0",
"grin_wallet 0.4.0",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -852,11 +862,11 @@ dependencies = [
"itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -873,11 +883,11 @@ dependencies = [
"grin_util 0.4.0",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -888,13 +898,13 @@ dependencies = [
"base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"secp256k1zkp 0.7.1 (git+https://github.com/mimblewimble/rust-secp256k1-zkp?tag=grin_integration_28)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slog-term 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"walkdir 2.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -916,12 +926,12 @@ dependencies = [
"grin_store 0.4.0",
"grin_util 0.4.0",
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"prettytable-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1076,17 +1086,6 @@ dependencies = [
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "isatty"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "itertools"
version = "0.7.8"
@ -1194,6 +1193,11 @@ name = "linked-hash-map"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "linked-hash-map"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lmdb-zero"
version = "0.4.4"
@ -1228,6 +1232,36 @@ version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "log-mdc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "log4rs"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde-value 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_yaml 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
"thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1592,6 +1626,15 @@ dependencies = [
"vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ordered-float"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "owning_ref"
version = "0.3.3"
@ -2026,6 +2069,15 @@ name = "serde"
version = "1.0.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde-value"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ordered-float 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde_derive"
version = "1.0.80"
@ -2057,6 +2109,17 @@ dependencies = [
"url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde_yaml"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "sha2"
version = "0.7.1"
@ -2087,33 +2150,6 @@ name = "slab"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "slog"
version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "slog-async"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "slog-term"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"isatty 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "smallvec"
version = "0.6.5"
@ -2173,11 +2209,6 @@ dependencies = [
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "take_mut"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "tar"
version = "0.4.17"
@ -2247,6 +2278,16 @@ dependencies = [
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread-id"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread_local"
version = "0.3.6"
@ -2473,11 +2514,24 @@ dependencies = [
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "traitobject"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "try-lock"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "typemap"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "typenum"
version = "1.10.0"
@ -2540,6 +2594,14 @@ dependencies = [
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "unsafe-any"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "untrusted"
version = "0.6.2"
@ -2712,6 +2774,14 @@ name = "xi-unicode"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "yaml-rust"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "zip"
version = "0.4.2"
@ -2728,6 +2798,7 @@ dependencies = [
"checksum adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e522997b529f05601e05166c07ed17789691f562762c7f3b987263d2dedee5c"
"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a"
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
"checksum antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5"
"checksum arc-swap 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f344c31716d7f1afc56f8cc08163f7d1826b223924c04b89b0a533459d5f99f"
"checksum argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f67b0b6a86dae6e67ff4ca2b6201396074996379fba2b92ff649126f37cb392"
"checksum array-macro 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8b1b1a00de235e9f2cc0e650423dc249d875c116a5934188c08fdd0c02d840ef"
@ -2764,6 +2835,7 @@ dependencies = [
"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb"
"checksum croaring 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "38961600edf0408acc371eb2359901f5ed2e634dde8537fc9a05e88fb26cde0e"
"checksum croaring-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "48a344ef01931b3106d6083c08fefc16e2b0b9d2de695a49a7437e56607cfda1"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
"checksum crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7b85741761b7f160bc5e7e0c14986ef685b7f8bf9b7ad081c60c604bb4649827"
"checksum crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3486aefc4c0487b9cb52372c97df0a48b8c249514af1ee99703bf70d2f2ceda1"
"checksum crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30fecfcac6abfef8771151f8be4abc9e4edc112c2bcb233314cafde2680536e9"
@ -2816,7 +2888,6 @@ dependencies = [
"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
"checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220"
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
"checksum isatty 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e31a8281fc93ec9693494da65fbf28c0c2aa60a2eaec25dc58e2f31952e95edc"
"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
"checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c"
@ -2830,10 +2901,13 @@ dependencies = [
"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
"checksum libz-sys 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "4401fe74560a0d46fce3464625ac8aa7a79d291dd28cee021d18852d5191c280"
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
"checksum lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "13416eee745b087c22934f35f1f24da22da41ba2a5ce197143d168ce055cc58d"
"checksum lock_api 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "775751a3e69bde4df9b38dd00a1b5d6ac13791e4223d4a0506577f0dd27cfb7a"
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
"checksum log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7"
"checksum log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25e0fc8737a634116a2deb38d821e4400ed16ce9dcb0d628a978d399260f5902"
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43"
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
@ -2873,6 +2947,7 @@ dependencies = [
"checksum openssl 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5af9e83eb3c51ee806387d26a43056f3246d865844caa6dd704d2ba7e831c264"
"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
"checksum openssl-sys 0.9.38 (registry+https://github.com/rust-lang/crates.io-index)" = "ff3d1b390ab1b9700f682ad95a30dc9c0f40dd212ca57266012cfc678b0e365a"
"checksum ordered-float 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7eb5259643245d3f292c7a146b2df53bba24d7eab159410e648eb73dc164669d"
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"
@ -2925,16 +3000,15 @@ dependencies = [
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
"checksum serde-value 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52903ade2290cbd61a0937a66a268f26cebf246e3ddd7964a8babb297111fb0d"
"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
"checksum serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "43344e7ce05d0d8280c5940cabb4964bea626aa58b1ec0e8c73fa2a8512a38ce"
"checksum serde_urlencoded 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aaed41d9fb1e2f587201b863356590c90c1157495d811430a0c0325fe8169650"
"checksum serde_yaml 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "980f5cc4e92ba24ba471b6a7b3df17d5b7b2c16fb1900a1aa0a79062320b16c4"
"checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0"
"checksum signal-hook 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7ca1f1c0ed6c8beaab713ad902c041e4f09d06e1b4bb74c5fc553c078ed0110"
"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac"
"checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"
"checksum slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09e4f1d0276ac7d448d98db16f0dab0220c24d4842d88ce4dad4b306fa234f1d"
"checksum slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e544d16c6b230d84c866662fe55e31aacfca6ae71e6fc49ae9a311cb379bfc2f"
"checksum slog-term 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5951a808c40f419922ee014c15b6ae1cd34d963538b57d8a4778b9ca3fff1e0b"
"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
"checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970"
@ -2943,7 +3017,6 @@ dependencies = [
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
"checksum syn 0.15.12 (registry+https://github.com/rust-lang/crates.io-index)" = "34ab9797e47d24cb76b8dc4d24ff36807018c7cc549c4cba050b068be0c586b0"
"checksum synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85bb9b7550d063ea184027c9b8c20ac167cd36d3e06b3a40bceb9d746dc1a7b7"
"checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
"checksum tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)" = "83b0d14b53dbfd62681933fadd651e815f99e6084b649e049ab99296e05ab3de"
"checksum tempfile 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "55c1195ef8513f3273d55ff59fe5da6940287a0d7a98331254397f464833675b"
"checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561"
@ -2951,6 +3024,7 @@ dependencies = [
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b"
"checksum tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6e93c78d23cc61aa245a8acd2c4a79c4d7fa7fb5c3ca90d5737029f043a84895"
@ -2970,7 +3044,9 @@ dependencies = [
"checksum tokio-udp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "da941144b816d0dcda4db3a1ba87596e4df5e860a72b70783fe435891f80601c"
"checksum tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22e3aa6d1fcc19e635418dc0a30ab5bd65d347973d6f43f1a37bf8d9d1335fc9"
"checksum toml 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "4a2ecc31b0351ea18b3fe11274b8db6e4d82bce861bbb22e6dbed40417902c65"
"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079"
"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"
"checksum typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "653be63c80a3296da5551e1bfd2cca35227e13cdd08c6668903ae2f4f77aa1f6"
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"
@ -2981,6 +3057,7 @@ dependencies = [
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
"checksum unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f30360d7979f5e9c6e6cea48af192ea8fab4afb3cf72597154b8f08935bc9c7f"
"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f"
"checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6"
"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4"
@ -3005,4 +3082,5 @@ dependencies = [
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c"
"checksum xi-unicode 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "12ea8eda4b1eb72f02d148402e23832d56a33f55d8c1b2d5bcdde91d79d47cb1"
"checksum yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "95acf0db5515d07da9965ec0e0ba6cc2d825e2caeb7303b66ca441729801254e"
"checksum zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "36b9e08fb518a65cf7e08a1e482573eb87a2f4f8c6619316612a3c1f162fe822"

View file

@ -24,7 +24,7 @@ humansize = "1.1.0"
daemonize = "0.3"
serde = "1"
serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
term = "0.5"
grin_api = { path = "./api" }

View file

@ -15,7 +15,7 @@ ring = "0.13"
serde = "1"
serde_derive = "1"
serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
tokio = "0.1.7"
tokio-core = "0.1.17"
tokio-tcp = "0.1"

View file

@ -14,7 +14,8 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::{Arc, RwLock, Weak};
use std::sync::{Arc, Weak};
use util::RwLock;
use failure::ResultExt;
use futures::future::ok;
@ -36,7 +37,6 @@ use types::*;
use url::form_urlencoded;
use util;
use util::secp::pedersen::Commitment;
use util::LOGGER;
use web::*;
// All handlers use `Weak` references instead of `Arc` to avoid cycles that
@ -205,12 +205,8 @@ impl OutputHandler {
}
debug!(
LOGGER,
"outputs_block_batch: {}-{}, {:?}, {:?}",
start_height,
end_height,
commitments,
include_rp,
start_height, end_height, commitments, include_rp,
);
let mut return_vec = vec![];
@ -695,7 +691,7 @@ struct PoolInfoHandler {
impl Handler for PoolInfoHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let pool_arc = w(&self.tx_pool);
let pool = pool_arc.read().unwrap();
let pool = pool_arc.read();
json_response(&PoolInfo {
pool_size: pool.total_size(),
@ -744,7 +740,6 @@ impl PoolPushHandler {
identifier: "?.?.?.?".to_string(),
};
info!(
LOGGER,
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(),
tx.inputs().len(),
@ -753,12 +748,12 @@ impl PoolPushHandler {
);
// Push to tx pool.
let mut tx_pool = pool_arc.write().unwrap();
let mut tx_pool = pool_arc.write();
let header = tx_pool.blockchain.chain_head().unwrap();
tx_pool
.add_to_pool(source, tx, !fluff, &header)
.map_err(|e| {
error!(LOGGER, "update_pool: failed with error: {:?}", e);
error!("update_pool: failed with error: {:?}", e);
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
})
}),
@ -807,7 +802,7 @@ pub fn start_rest_apis(
router.add_middleware(basic_auth_middleware);
}
info!(LOGGER, "Starting HTTP API server at {}.", addr);
info!("Starting HTTP API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
apis.start(socket_addr, router, tls_config).is_ok()
}

View file

@ -33,7 +33,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate futures;
extern crate http;
extern crate hyper_rustls;

View file

@ -33,7 +33,6 @@ use std::sync::Arc;
use std::{io, thread};
use tokio_rustls::ServerConfigExt;
use tokio_tcp;
use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)]
@ -243,13 +242,10 @@ impl ApiServer {
// TODO re-enable stop after investigation
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
//tx.send(()).expect("Failed to stop API server");
info!(LOGGER, "API server has been stoped");
info!("API server has been stoped");
true
} else {
error!(
LOGGER,
"Can't stop API server, it's not running or doesn't spport stop operation"
);
error!("Can't stop API server, it's not running or doesn't spport stop operation");
false
}
}
@ -263,7 +259,7 @@ impl Handler for LoggingMiddleware {
req: Request<Body>,
mut handlers: Box<Iterator<Item = HandlerObj>>,
) -> ResponseFuture {
debug!(LOGGER, "REST call: {} {}", req.method(), req.uri().path());
debug!("REST call: {} {}", req.method(), req.uri().path());
handlers.next().unwrap().call(req, handlers)
}
}

View file

@ -12,7 +12,7 @@ lmdb-zero = "0.4.4"
failure = "0.1"
failure_derive = "0.1"
croaring = "0.3"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
serde = "1"
serde_derive = "1"
chrono = "0.4.4"

View file

@ -18,8 +18,9 @@
use std::collections::HashMap;
use std::fs::File;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::time::{Duration, Instant};
use util::RwLock;
use lmdb;
use lru_cache::LruCache;
@ -37,7 +38,6 @@ use store;
use txhashset;
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::secp::pedersen::{Commitment, RangeProof};
use util::LOGGER;
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200;
@ -75,7 +75,7 @@ impl OrphanBlockPool {
}
fn len(&self) -> usize {
let orphans = self.orphans.read().unwrap();
let orphans = self.orphans.read();
orphans.len()
}
@ -84,8 +84,8 @@ impl OrphanBlockPool {
}
fn add(&self, orphan: Orphan) {
let mut orphans = self.orphans.write().unwrap();
let mut height_idx = self.height_idx.write().unwrap();
let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write();
{
let height_hashes = height_idx
.entry(orphan.block.header.height)
@ -125,15 +125,15 @@ impl OrphanBlockPool {
/// Get an orphan from the pool indexed by the hash of its parent, removing
/// it at the same time, preventing clone
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
let mut orphans = self.orphans.write().unwrap();
let mut height_idx = self.height_idx.write().unwrap();
let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write();
height_idx
.remove(height)
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
}
pub fn contains(&self, hash: &Hash) -> bool {
let orphans = self.orphans.read().unwrap();
let orphans = self.orphans.read();
orphans.contains_key(hash)
}
}
@ -183,7 +183,6 @@ impl Chain {
let head = store.head()?;
debug!(
LOGGER,
"Chain init: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
@ -221,7 +220,7 @@ impl Chain {
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
let maybe_new_head: Result<Option<Tip>, Error>;
{
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
@ -235,7 +234,7 @@ impl Chain {
let add_to_hash_cache = |hash: Hash| {
// only add to hash cache below if block is definitively accepted
// or rejected
let mut cache = self.block_hashes_cache.write().unwrap();
let mut cache = self.block_hashes_cache.write();
cache.insert(hash, true);
};
@ -260,7 +259,6 @@ impl Chain {
&self.orphans.add(orphan);
debug!(
LOGGER,
"process_block: orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
@ -274,7 +272,6 @@ impl Chain {
}
ErrorKind::Unfit(ref msg) => {
debug!(
LOGGER,
"Block {} at {} is unfit at this time: {}",
b.hash(),
b.header.height,
@ -284,7 +281,6 @@ impl Chain {
}
_ => {
info!(
LOGGER,
"Rejected block {} at {}: {:?}",
b.hash(),
b.header.height,
@ -299,7 +295,7 @@ impl Chain {
/// Process a block header received during "header first" propagation.
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
pipe::process_block_header(bh, &mut ctx)?;
@ -315,7 +311,7 @@ impl Chain {
headers: &Vec<BlockHeader>,
opts: Options,
) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
@ -359,7 +355,6 @@ impl Chain {
// Is there an orphan in our orphans that we can now process?
loop {
trace!(
LOGGER,
"check_orphans: at {}, # orphans {}",
height,
self.orphans.len(),
@ -372,7 +367,6 @@ impl Chain {
let orphans_len = orphans.len();
for (i, orphan) in orphans.into_iter().enumerate() {
debug!(
LOGGER,
"check_orphans: get block {} at {}{}",
orphan.block.hash(),
height,
@ -401,7 +395,6 @@ impl Chain {
if initial_height != height {
debug!(
LOGGER,
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
height - initial_height,
initial_height,
@ -417,7 +410,7 @@ impl Chain {
/// current chain state, specifically the current winning (valid, most
/// work) fork.
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let res = txhashset.is_unspent(output_ref);
match res {
Err(e) => Err(e),
@ -427,7 +420,7 @@ impl Chain {
/// Validate the tx against the current UTXO set.
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
let txhashset = self.txhashset.read().unwrap();
let txhashset = self.txhashset.read();
txhashset::utxo_view(&txhashset, |utxo| {
utxo.validate_tx(tx)?;
Ok(())
@ -443,7 +436,7 @@ impl Chain {
/// that has not yet sufficiently matured.
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
let height = self.next_block_height()?;
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.verify_coinbase_maturity(&tx.inputs(), height)?;
Ok(())
@ -470,7 +463,7 @@ impl Chain {
return Ok(());
}
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
// Now create an extension from the txhashset and validate against the
// latest block header. Rewind the extension to the specified header to
@ -485,7 +478,7 @@ impl Chain {
/// Sets the txhashset roots on a brand new block by applying the block on
/// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut txhashset, |extension| {
if is_fork {
@ -526,7 +519,7 @@ impl Chain {
output: &OutputIdentifier,
block_header: &BlockHeader,
) -> Result<MerkleProof, Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&block_header)?;
@ -539,13 +532,13 @@ impl Chain {
/// Return a merkle proof valid for the current output pmmr state at the
/// given pos
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.merkle_proof(commit)
}
/// Returns current txhashset roots
pub fn get_txhashset_roots(&self) -> TxHashSetRoots {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.roots()
}
@ -560,7 +553,7 @@ impl Chain {
// to rewind after receiving the txhashset zip.
let header = self.get_block_header(&h)?;
{
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header)?;
extension.snapshot()?;
@ -588,7 +581,6 @@ impl Chain {
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!(
LOGGER,
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
);
@ -605,8 +597,8 @@ impl Chain {
})?;
debug!(
LOGGER,
"chain: validate_kernel_history: validated kernel root on {} headers", count,
"chain: validate_kernel_history: validated kernel root on {} headers",
count,
);
Ok(())
@ -617,7 +609,7 @@ impl Chain {
/// have an MMR we can safely rewind based on the headers received from a peer.
/// TODO - think about how to optimize this.
pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let mut batch = self.store.batch()?;
txhashset::sync_extending(&mut txhashset, &mut batch, |extension| {
extension.rebuild(head, &self.genesis)?;
@ -681,10 +673,7 @@ impl Chain {
self.validate_kernel_history(&header, &txhashset)?;
// all good, prepare a new batch and update all the required records
debug!(
LOGGER,
"chain: txhashset_write: rewinding a 2nd time (writeable)"
);
debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
let mut batch = self.store.batch()?;
@ -708,10 +697,7 @@ impl Chain {
Ok(())
})?;
debug!(
LOGGER,
"chain: txhashset_write: finished validating and rebuilding"
);
debug!("chain: txhashset_write: finished validating and rebuilding");
status.on_save();
@ -726,21 +712,15 @@ impl Chain {
// Commit all the changes to the db.
batch.commit()?;
debug!(
LOGGER,
"chain: txhashset_write: finished committing the batch (head etc.)"
);
debug!("chain: txhashset_write: finished committing the batch (head etc.)");
// Replace the chain txhashset with the newly built one.
{
let mut txhashset_ref = self.txhashset.write().unwrap();
let mut txhashset_ref = self.txhashset.write();
*txhashset_ref = txhashset;
}
debug!(
LOGGER,
"chain: txhashset_write: replaced our txhashset with the new one"
);
debug!("chain: txhashset_write: replaced our txhashset with the new one");
// Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1);
@ -749,33 +729,11 @@ impl Chain {
Ok(())
}
/// Triggers chain compaction, cleaning up some unnecessary historical
/// information. We introduce a chain depth called horizon, which is
/// typically in the range of a couple days. Before that horizon, this
/// method will:
///
/// * compact the MMRs data files and flushing the corresponding remove logs
/// * delete old records from the k/v store (older blocks, indexes, etc.)
///
/// This operation can be resource intensive and takes some time to execute.
/// Meanwhile, the chain will not be able to accept new blocks. It should
/// therefore be called judiciously.
pub fn compact(&self) -> Result<(), Error> {
if self.archive_mode {
debug!(
LOGGER,
"Blockchain compaction disabled, node running in archive mode."
);
return Ok(());
}
debug!(LOGGER, "Starting blockchain compaction.");
// Compact the txhashset via the extension.
fn compact_txhashset(&self) -> Result<(), Error> {
debug!("Starting blockchain compaction.");
{
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.compact()?;
// print out useful debug info after compaction
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.dump_output_pmmr();
Ok(())
@ -784,23 +742,33 @@ impl Chain {
// Now check we can still successfully validate the chain state after
// compacting, shouldn't be necessary once all of this is well-oiled
debug!(LOGGER, "Validating state after compaction.");
debug!("Validating state after compaction.");
self.validate(true)?;
Ok(())
}
// we need to be careful here in testing as 20 blocks is not that long
// in wall clock time
let horizon = global::cut_through_horizon() as u64;
let head = self.head()?;
if head.height <= horizon {
/// Cleanup old blocks from the db.
/// Determine the cutoff height from the horizon and the current block height.
/// *Only* runs if we are not in archive mode.
fn compact_blocks_db(&self) -> Result<(), Error> {
if self.archive_mode {
return Ok(());
}
let horizon = global::cut_through_horizon() as u64;
let head = self.head()?;
let cutoff = head.height.saturating_sub(horizon);
debug!(
LOGGER,
"Compaction remove blocks older than {}.",
head.height - horizon
"chain: compact_blocks_db: head height: {}, horizon: {}, cutoff: {}",
head.height, horizon, cutoff,
);
if cutoff == 0 {
return Ok(());
}
let mut count = 0;
let batch = self.store.batch()?;
let mut current = batch.get_header_by_height(head.height - horizon - 1)?;
@ -830,25 +798,40 @@ impl Chain {
}
}
batch.commit()?;
debug!(LOGGER, "Compaction removed {} blocks, done.", count);
debug!("chain: compact_blocks_db: removed {} blocks.", count);
Ok(())
}
/// Triggers chain compaction.
///
/// * compacts the txhashset based on current prune_list
/// * removes historical blocks and associated data from the db (unless archive mode)
///
pub fn compact(&self) -> Result<(), Error> {
self.compact_txhashset()?;
if !self.archive_mode {
self.compact_blocks_db()?;
}
Ok(())
}
/// returns the last n nodes inserted into the output sum tree
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.last_n_output(distance)
}
/// as above, for rangeproofs
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.last_n_rangeproof(distance)
}
/// as above, for kernels
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
txhashset.last_n_kernel(distance)
}
@ -858,7 +841,7 @@ impl Chain {
start_index: u64,
max: u64,
) -> Result<(u64, u64, Vec<Output>), Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let max_index = txhashset.highest_output_insertion_index();
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
@ -945,7 +928,7 @@ impl Chain {
&self,
output_ref: &OutputIdentifier,
) -> Result<BlockHeader, Error> {
let mut txhashset = self.txhashset.write().unwrap();
let mut txhashset = self.txhashset.write();
let (_, pos) = txhashset.is_unspent(output_ref)?;
let mut min = 1;
let mut max = {
@ -1051,7 +1034,6 @@ fn setup_head(
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{
debug!(
LOGGER,
"chain: init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
@ -1072,7 +1054,6 @@ fn setup_head(
}
debug!(
LOGGER,
"chain: init: rewinding and validating before we start... {} at {}",
header.hash(),
header.height,
@ -1109,7 +1090,7 @@ fn setup_head(
// Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
info!("chain: init: saved genesis: {:?}", genesis.hash());
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
};

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate failure;
#[macro_use]

View file

@ -14,7 +14,8 @@
//! Implementation of the chain block acceptance (or refusal) pipeline.
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chrono::prelude::Utc;
use chrono::Duration;
@ -34,7 +35,6 @@ use grin_store;
use store;
use txhashset;
use types::{Options, Tip};
use util::LOGGER;
/// Contextual information required to process a new block and either reject or
/// accept it.
@ -70,7 +70,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// spend resources reading the full block when its header is invalid
debug!(
LOGGER,
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
b.hash(),
b.header.height,
@ -167,7 +166,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
})?;
trace!(
LOGGER,
"pipe: process_block: {} at {} is valid, save and append.",
b.hash(),
b.header.height,
@ -189,7 +187,6 @@ pub fn sync_block_headers(
) -> Result<Option<Tip>, Error> {
if let Some(header) = headers.first() {
debug!(
LOGGER,
"pipe: sync_block_headers: {} headers from {} at {}",
headers.len(),
header.hash(),
@ -250,7 +247,6 @@ pub fn sync_block_headers(
/// it.
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
debug!(
LOGGER,
"pipe: process_block_header: {} at {}",
header.hash(),
header.height,
@ -288,7 +284,7 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
/// Keeps duplicates from the network in check.
/// Checks against the cache of recently processed block hashes.
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
let mut cache = ctx.block_hashes_cache.write().unwrap();
let mut cache = ctx.block_hashes_cache.write();
if cache.contains_key(&header.hash()) {
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
}
@ -355,8 +351,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) {
error!(
LOGGER,
"Invalid block header version received ({}), maybe update Grin?", header.version
"Invalid block header version received ({}), maybe update Grin?",
header.version
);
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
}
@ -377,8 +373,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
error!(
LOGGER,
"pipe: error validating header with cuckoo edge_bits {}", edge_bits
"pipe: error validating header with cuckoo edge_bits {}",
edge_bits
);
return Err(ErrorKind::InvalidPow.into());
}
@ -433,7 +429,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty {
info!(
LOGGER,
"validate_header: header target difficulty {} != {}",
target_difficulty.to_num(),
next_header_info.difficulty.to_num()
@ -553,8 +548,8 @@ fn update_head(b: &Block, ctx: &BlockContext) -> Result<Option<Tip>, Error> {
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
debug!(
LOGGER,
"pipe: head updated to {} at {}", tip.last_block_h, tip.height
"pipe: head updated to {} at {}",
tip.last_block_h, tip.height
);
Ok(Some(tip))
@ -574,7 +569,7 @@ fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
batch
.save_sync_head(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
debug!("sync head {} @ {}", bh.hash(), bh.height);
Ok(())
}
@ -588,8 +583,8 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
debug!(
LOGGER,
"pipe: header_head updated to {} at {}", tip.last_block_h, tip.height
"pipe: header_head updated to {} at {}",
tip.last_block_h, tip.height
);
Ok(Some(tip))
@ -621,7 +616,6 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
let forked_header = ext.batch.get_block_header(&current)?;
trace!(
LOGGER,
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
forked_header.height,
forked_header.hash(),
@ -632,11 +626,7 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
// Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?;
trace!(
LOGGER,
"rewind_and_apply_fork: blocks on fork: {:?}",
fork_hashes,
);
trace!("rewind_and_apply_fork: blocks on fork: {:?}", fork_hashes,);
// Now re-apply all blocks on this fork.
for (_, h) in fork_hashes {

View file

@ -14,7 +14,8 @@
//! Implements storage primitives required by the chain
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use croaring::Bitmap;
use lmdb;
@ -96,7 +97,7 @@ impl ChainStore {
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
{
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
let mut block_sums_cache = self.block_sums_cache.write();
// cache hit - return the value from the cache
if let Some(block_sums) = block_sums_cache.get_mut(h) {
@ -112,7 +113,7 @@ impl ChainStore {
// cache miss - so adding to the cache for next time
if let Ok(block_sums) = block_sums {
{
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, block_sums.clone());
}
Ok(block_sums)
@ -123,7 +124,7 @@ impl ChainStore {
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{
let mut header_cache = self.header_cache.write().unwrap();
let mut header_cache = self.header_cache.write();
// cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) {
@ -140,7 +141,7 @@ impl ChainStore {
// cache miss - so adding to the cache for next time
if let Ok(header) = header {
{
let mut header_cache = self.header_cache.write().unwrap();
let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone());
}
Ok(header)
@ -310,7 +311,7 @@ impl<'a> Batch<'a> {
let hash = header.hash();
{
let mut header_cache = self.header_cache.write().unwrap();
let mut header_cache = self.header_cache.write();
header_cache.insert(hash, header.clone());
}
@ -350,7 +351,7 @@ impl<'a> Batch<'a> {
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{
let mut header_cache = self.header_cache.write().unwrap();
let mut header_cache = self.header_cache.write();
// cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) {
@ -367,7 +368,7 @@ impl<'a> Batch<'a> {
// cache miss - so adding to the cache for next time
if let Ok(header) = header {
{
let mut header_cache = self.header_cache.write().unwrap();
let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone());
}
Ok(header)
@ -390,7 +391,7 @@ impl<'a> Batch<'a> {
pub fn save_block_sums(&self, h: &Hash, sums: &BlockSums) -> Result<(), Error> {
{
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, sums.clone());
}
@ -400,7 +401,7 @@ impl<'a> Batch<'a> {
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
{
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
let mut block_sums_cache = self.block_sums_cache.write();
// cache hit - return the value from the cache
if let Some(block_sums) = block_sums_cache.get_mut(h) {
@ -416,7 +417,7 @@ impl<'a> Batch<'a> {
// cache miss - so adding to the cache for next time
if let Ok(block_sums) = block_sums {
{
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, block_sums.clone());
}
Ok(block_sums)
@ -511,7 +512,7 @@ impl<'a> Batch<'a> {
self.save_block_input_bitmap(&block.hash(), &bitmap)?;
// Finally cache it locally for use later.
let mut cache = self.block_input_bitmap_cache.write().unwrap();
let mut cache = self.block_input_bitmap_cache.write();
cache.insert(block.hash(), bitmap.serialize());
Ok(bitmap)
@ -519,7 +520,7 @@ impl<'a> Batch<'a> {
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
{
let mut cache = self.block_input_bitmap_cache.write().unwrap();
let mut cache = self.block_input_bitmap_cache.write();
// cache hit - return the value from the cache
if let Some(bytes) = cache.get_mut(bh) {

View file

@ -40,7 +40,7 @@ use grin_store::types::prune_noop;
use store::{Batch, ChainStore};
use txhashset::{RewindableKernelView, UTXOView};
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::{file, secp_static, zip, LOGGER};
use util::{file, secp_static, zip};
const HEADERHASHSET_SUBDIR: &'static str = "header";
const TXHASHSET_SUBDIR: &'static str = "txhashset";
@ -328,7 +328,7 @@ where
// we explicitly rewind the extension.
let header = batch.head_header()?;
trace!(LOGGER, "Starting new txhashset (readonly) extension.");
trace!("Starting new txhashset (readonly) extension.");
let res = {
let mut extension = Extension::new(trees, &batch, header);
@ -340,14 +340,14 @@ where
inner(&mut extension)
};
trace!(LOGGER, "Rollbacking txhashset (readonly) extension.");
trace!("Rollbacking txhashset (readonly) extension.");
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
trace!(LOGGER, "TxHashSet (readonly) extension done.");
trace!("TxHashSet (readonly) extension done.");
res
}
@ -423,7 +423,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset extension.");
trace!("Starting new txhashset extension.");
// TODO - header_mmr may be out ahead via the header_head
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
@ -436,10 +436,7 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset extension: {}", e
);
debug!("Error returned, discarding txhashset extension: {}", e);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
@ -448,13 +445,13 @@ where
}
Ok(r) => {
if rollback {
trace!(LOGGER, "Rollbacking txhashset extension. sizes {:?}", sizes);
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
} else {
trace!(LOGGER, "Committing txhashset extension. sizes {:?}", sizes);
trace!("Committing txhashset extension. sizes {:?}", sizes);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.output_pmmr_h.backend.sync()?;
@ -466,7 +463,7 @@ where
trees.kernel_pmmr_h.last_pos = sizes.3;
}
trace!(LOGGER, "TxHashSet extension done.");
trace!("TxHashSet extension done.");
Ok(r)
}
}
@ -497,7 +494,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset sync_head extension.");
trace!("Starting new txhashset sync_head extension.");
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
@ -510,31 +507,23 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset sync_head extension: {}", e
"Error returned, discarding txhashset sync_head extension: {}",
e
);
trees.sync_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!(
LOGGER,
"Rollbacking txhashset sync_head extension. size {:?}",
size
);
trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
trees.sync_pmmr_h.backend.discard();
} else {
trace!(
LOGGER,
"Committing txhashset sync_head extension. size {:?}",
size
);
trace!("Committing txhashset sync_head extension. size {:?}", size);
child_batch.commit()?;
trees.sync_pmmr_h.backend.sync()?;
trees.sync_pmmr_h.last_pos = size;
}
trace!(LOGGER, "TxHashSet sync_head extension done.");
trace!("TxHashSet sync_head extension done.");
Ok(r)
}
}
@ -564,7 +553,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset header extension.");
trace!("Starting new txhashset header extension.");
let pmmr = DBPMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
@ -579,31 +568,23 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset header extension: {}", e
"Error returned, discarding txhashset header extension: {}",
e
);
trees.header_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!(
LOGGER,
"Rollbacking txhashset header extension. size {:?}",
size
);
trace!("Rollbacking txhashset header extension. size {:?}", size);
trees.header_pmmr_h.backend.discard();
} else {
trace!(
LOGGER,
"Committing txhashset header extension. size {:?}",
size
);
trace!("Committing txhashset header extension. size {:?}", size);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.header_pmmr_h.last_pos = size;
}
trace!(LOGGER, "TxHashSet header extension done.");
trace!("TxHashSet header extension done.");
Ok(r)
}
}
@ -643,9 +624,7 @@ impl<'a> HeaderExtension<'a> {
/// This may be either the header MMR or the sync MMR depending on the
/// extension.
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.pmmr
.push(header.clone())
.map_err(&ErrorKind::TxHashSetErr)?;
self.pmmr.push(&header).map_err(&ErrorKind::TxHashSetErr)?;
self.header = header.clone();
Ok(())
}
@ -654,7 +633,6 @@ impl<'a> HeaderExtension<'a> {
/// Note the close relationship between header height and insertion index.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind header extension to {} at {}",
header.hash(),
header.height
@ -675,7 +653,7 @@ impl<'a> HeaderExtension<'a> {
/// Used when rebuilding the header MMR by reapplying all headers
/// including the genesis block header.
pub fn truncate(&mut self) -> Result<(), Error> {
debug!(LOGGER, "Truncating header extension.");
debug!("Truncating header extension.");
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
@ -689,7 +667,6 @@ impl<'a> HeaderExtension<'a> {
/// Requires *all* header hashes to be iterated over in ascending order.
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"About to rebuild header extension from {:?} to {:?}.",
genesis.hash(),
head.last_block_h,
@ -712,7 +689,6 @@ impl<'a> HeaderExtension<'a> {
if header_hashes.len() > 0 {
debug!(
LOGGER,
"Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(),
header_hashes.first().unwrap(),
@ -983,7 +959,7 @@ impl<'a> Extension<'a> {
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.header_pmmr
.push(header.clone())
.push(&header)
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
@ -995,10 +971,7 @@ impl<'a> Extension<'a> {
/// We need the hash of each sibling pos from the pos up to the peak
/// including the sibling leaf node which may have been removed.
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
debug!(
LOGGER,
"txhashset: merkle_proof: output: {:?}", output.commit,
);
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
// then calculate the Merkle Proof based on the known pos
let pos = self.batch.get_output_pos(&output.commit)?;
let merkle_proof = self
@ -1027,12 +1000,7 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
/// and last kernel pos of that block.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind to header {} at {}",
header.hash(),
header.height,
);
debug!("Rewind to header {} at {}", header.hash(), header.height,);
// We need to build bitmaps of added and removed output positions
// so we can correctly rewind all operations applied to the output MMR
@ -1067,11 +1035,8 @@ impl<'a> Extension<'a> {
rewind_rm_pos: &Bitmap,
) -> Result<(), Error> {
debug!(
LOGGER,
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
header_pos,
output_pos,
kernel_pos,
header_pos, output_pos, kernel_pos,
);
self.header_pmmr
@ -1191,7 +1156,6 @@ impl<'a> Extension<'a> {
}
debug!(
LOGGER,
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
self.header_pmmr.unpruned_size(),
self.output_pmmr.unpruned_size(),
@ -1270,22 +1234,22 @@ impl<'a> Extension<'a> {
/// Dumps the output MMR.
/// We use this after compacting for visual confirmation that it worked.
pub fn dump_output_pmmr(&self) {
debug!(LOGGER, "-- outputs --");
debug!("-- outputs --");
self.output_pmmr.dump_from_file(false);
debug!(LOGGER, "--");
debug!("--");
self.output_pmmr.dump_stats();
debug!(LOGGER, "-- end of outputs --");
debug!("-- end of outputs --");
}
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
/// version only prints the Output tree.
pub fn dump(&self, short: bool) {
debug!(LOGGER, "-- outputs --");
debug!("-- outputs --");
self.output_pmmr.dump(short);
if !short {
debug!(LOGGER, "-- range proofs --");
debug!("-- range proofs --");
self.rproof_pmmr.dump(short);
debug!(LOGGER, "-- kernels --");
debug!("-- kernels --");
self.kernel_pmmr.dump(short);
}
}
@ -1318,7 +1282,6 @@ impl<'a> Extension<'a> {
}
debug!(
LOGGER,
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
@ -1353,8 +1316,8 @@ impl<'a> Extension<'a> {
commits.clear();
proofs.clear();
debug!(
LOGGER,
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
}
@ -1370,13 +1333,12 @@ impl<'a> Extension<'a> {
commits.clear();
proofs.clear();
debug!(
LOGGER,
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
debug!(
LOGGER,
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count,
self.rproof_pmmr.unpruned_size(),
@ -1452,10 +1414,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
// Removing unexpected directories if needed
if !dir_difference.is_empty() {
debug!(
LOGGER,
"Unexpected folder(s) found in txhashset folder, removing."
);
debug!("Unexpected folder(s) found in txhashset folder, removing.");
for diff in dir_difference {
let diff_path = txhashset_path.join(diff);
file::delete(diff_path)?;
@ -1492,7 +1451,6 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
.collect();
if !difference.is_empty() {
debug!(
LOGGER,
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
&subdirectory_path
);
@ -1520,10 +1478,8 @@ pub fn input_pos_to_rewind(
if head_header.height < block_header.height {
debug!(
LOGGER,
"input_pos_to_rewind: {} < {}, nothing to rewind",
head_header.height,
block_header.height
head_header.height, block_header.height
);
return Ok(Bitmap::create());
}

View file

@ -24,7 +24,8 @@ extern crate rand;
use chrono::Duration;
use std::fs;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter;
use chain::Chain;

View file

@ -23,7 +23,8 @@ extern crate rand;
use chrono::Duration;
use std::fs;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter;
use chain::Chain;

View file

@ -18,12 +18,14 @@ extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_keychain as keychain;
extern crate grin_store as store;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
use chrono::Duration;
use std::fs;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter;
use chain::ErrorKind;

View file

@ -213,7 +213,6 @@ fn comments() -> HashMap<String, String> {
#peer_min_preferred_count = 8
# 7 = Bit flags for FULL_NODE
# 6 = Bit flags for FAST_SYNC_NODE
#This structure needs to be changed internally, to make it more configurable
".to_string(),
);
@ -367,7 +366,7 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"stdout_log_level".to_string(),
"
#log level for stdout: Critical, Error, Warning, Info, Debug, Trace
#log level for stdout: Error, Warning, Info, Debug, Trace
".to_string(),
);
@ -381,7 +380,7 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"file_log_level".to_string(),
"
#log level for file: Critical, Error, Warning, Info, Debug, Trace
#log level for file: Error, Warning, Info, Debug, Trace
".to_string(),
);
@ -399,6 +398,14 @@ fn comments() -> HashMap<String, String> {
".to_string(),
);
retval.insert(
"log_max_size".to_string(),
"
#maximum log file size in bytes before performing log rotation
#comment it to disable log rotation
".to_string(),
);
retval
}

View file

@ -107,7 +107,7 @@ fn check_api_secret(api_secret_path: &PathBuf) -> Result<(), ConfigError> {
}
/// Check that the api secret file exists and is valid
pub fn check_api_secret_file() -> Result<(), ConfigError> {
fn check_api_secret_file() -> Result<(), ConfigError> {
let grin_path = get_grin_path()?;
let mut api_secret_path = grin_path.clone();
api_secret_path.push(API_SECRET_FILE_NAME);
@ -233,8 +233,7 @@ impl GlobalConfig {
file.read_to_string(&mut contents)?;
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
match decoded {
Ok(mut gc) => {
gc.server.validation_check();
Ok(gc) => {
self.members = Some(gc);
return Ok(self);
}

View file

@ -35,5 +35,5 @@ mod comments;
pub mod config;
pub mod types;
pub use config::{check_api_secret_file, initial_setup_server, initial_setup_wallet};
pub use config::{initial_setup_server, initial_setup_wallet};
pub use types::{ConfigError, ConfigMembers, GlobalConfig, GlobalWalletConfig};

View file

@ -20,7 +20,7 @@ rand = "0.5"
serde = "1"
serde_derive = "1"
siphasher = "0.2"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = "0.4.4"
grin_keychain = { path = "../keychain" }

View file

@ -297,7 +297,7 @@ where
}
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
// Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction)
let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64;

View file

@ -20,7 +20,8 @@ use std::collections::HashSet;
use std::fmt;
use std::iter::FromIterator;
use std::mem;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use consensus::{self, reward, REWARD};
use core::committed::{self, Committed};
@ -35,7 +36,7 @@ use global;
use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, static_secp_instance, LOGGER};
use util::{secp, static_secp_instance};
/// Errors thrown by Block validation
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
@ -285,7 +286,7 @@ impl BlockHeader {
/// Total difficulty accumulated by the proof of work on this header
pub fn total_difficulty(&self) -> Difficulty {
self.pow.total_difficulty.clone()
self.pow.total_difficulty
}
/// The "overage" to use when verifying the kernel sums.
@ -361,10 +362,7 @@ impl Readable for Block {
body.validate_read(true)
.map_err(|_| ser::Error::CorruptedData)?;
Ok(Block {
header: header,
body: body,
})
Ok(Block { header, body })
}
}
@ -420,16 +418,40 @@ impl Block {
Ok(block)
}
/// Extract tx data from this block as a single aggregate tx.
pub fn aggregate_transaction(
&self,
prev_kernel_offset: BlindingFactor,
) -> Result<Option<Transaction>, Error> {
let inputs = self.inputs().iter().cloned().collect();
let outputs = self
.outputs()
.iter()
.filter(|x| !x.features.contains(OutputFeatures::COINBASE_OUTPUT))
.cloned()
.collect();
let kernels = self
.kernels()
.iter()
.filter(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
.cloned()
.collect::<Vec<_>>();
let tx = if kernels.is_empty() {
None
} else {
let tx = Transaction::new(inputs, outputs, kernels)
.with_offset(self.block_kernel_offset(prev_kernel_offset)?);
Some(tx)
};
Ok(tx)
}
/// Hydrate a block from a compact block.
/// Note: caller must validate the block themselves, we do not validate it
/// here.
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
trace!(
LOGGER,
"block: hydrate_from: {}, {} txs",
cb.hash(),
txs.len(),
);
trace!("block: hydrate_from: {}, {} txs", cb.hash(), txs.len(),);
let header = cb.header.clone();
@ -469,7 +491,7 @@ impl Block {
/// Build a new empty block from a specified header
pub fn with_header(header: BlockHeader) -> Block {
Block {
header: header,
header,
..Default::default()
}
}
@ -596,6 +618,23 @@ impl Block {
Ok(())
}
fn block_kernel_offset(
&self,
prev_kernel_offset: BlindingFactor,
) -> Result<BlindingFactor, Error> {
let offset = if self.header.total_kernel_offset() == prev_kernel_offset {
// special case when the sum hasn't changed (typically an empty block),
// zero isn't a valid private key but it's a valid blinding factor
BlindingFactor::zero()
} else {
committed::sum_kernel_offsets(
vec![self.header.total_kernel_offset()],
vec![prev_kernel_offset],
)?
};
Ok(offset)
}
/// Validates all the elements in a block that can be checked without
/// additional data. Includes commitment sums and kernels, Merkle
/// trees, reward, etc.
@ -603,7 +642,7 @@ impl Block {
&self,
prev_kernel_offset: &BlindingFactor,
verifier: Arc<RwLock<VerifierCache>>,
) -> Result<(Commitment), Error> {
) -> Result<Commitment, Error> {
self.body.validate(true, verifier)?;
self.verify_kernel_lock_heights()?;
@ -611,19 +650,10 @@ impl Block {
// take the kernel offset for this block (block offset minus previous) and
// verify.body.outputs and kernel sums
let block_kernel_offset = if self.header.total_kernel_offset() == prev_kernel_offset.clone()
{
// special case when the sum hasn't changed (typically an empty block),
// zero isn't a valid private key but it's a valid blinding factor
BlindingFactor::zero()
} else {
committed::sum_kernel_offsets(
vec![self.header.total_kernel_offset()],
vec![prev_kernel_offset.clone()],
)?
};
let (_utxo_sum, kernel_sum) =
self.verify_kernel_sums(self.header.overage(), block_kernel_offset)?;
let (_utxo_sum, kernel_sum) = self.verify_kernel_sums(
self.header.overage(),
self.block_kernel_offset(*prev_kernel_offset)?,
)?;
Ok(kernel_sum)
}
@ -648,7 +678,7 @@ impl Block {
{
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let over_commit = secp.commit_value(reward(self.total_fees()))?;
let out_adjust_sum = secp.commit_sum(

View file

@ -53,8 +53,8 @@ impl Default for BlockSums {
fn default() -> BlockSums {
let zero_commit = secp_static::commit_to_zero_value();
BlockSums {
utxo_sum: zero_commit.clone(),
kernel_sum: zero_commit.clone(),
utxo_sum: zero_commit,
kernel_sum: zero_commit,
}
}
}

View file

@ -66,7 +66,7 @@ pub trait Committed {
// commit to zero built from the offset
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let mut commits = vec![kernel_sum];
if *offset != BlindingFactor::zero() {
let key = offset.secret_key(&secp)?;
@ -90,7 +90,7 @@ pub trait Committed {
if overage != 0 {
let over_commit = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap()
};
@ -144,7 +144,7 @@ pub fn sum_commits(
positive.retain(|x| *x != zero_commit);
negative.retain(|x| *x != zero_commit);
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?)
}
@ -156,7 +156,7 @@ pub fn sum_kernel_offsets(
negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp);

View file

@ -86,7 +86,7 @@ impl MerkleProof {
pub fn from_hex(hex: &str) -> Result<MerkleProof, String> {
let bytes = util::from_hex(hex.to_string()).unwrap();
let res = ser::deserialize(&mut &bytes[..])
.map_err(|_| format!("failed to deserialize a Merkle Proof"))?;
.map_err(|_| "failed to deserialize a Merkle Proof".to_string())?;
Ok(res)
}
@ -102,7 +102,7 @@ impl MerkleProof {
// calculate the peaks once as these are based on overall MMR size
// (and will not change)
let peaks_pos = pmmr::peaks(self.mmr_size);
proof.verify_consume(root, element, node_pos, peaks_pos)
proof.verify_consume(root, element, node_pos, &peaks_pos)
}
/// Consumes the Merkle proof while verifying it.
@ -113,7 +113,7 @@ impl MerkleProof {
root: Hash,
element: &PMMRIndexHashable,
node_pos: u64,
peaks_pos: Vec<u64>,
peaks_pos: &[u64],
) -> Result<(), MerkleProofError> {
let node_hash = if node_pos > self.mmr_size {
element.hash_with_index(self.mmr_size)
@ -123,7 +123,7 @@ impl MerkleProof {
// handle special case of only a single entry in the MMR
// (no siblings to hash together)
if self.path.len() == 0 {
if self.path.is_empty() {
if root == node_hash {
return Ok(());
} else {

View file

@ -42,8 +42,8 @@ where
/// Build a new db backed MMR.
pub fn new(backend: &'a mut B) -> DBPMMR<T, B> {
DBPMMR {
backend,
last_pos: 0,
backend: backend,
_marker: marker::PhantomData,
}
}
@ -52,8 +52,8 @@ where
/// last_pos with the provided db backend.
pub fn at(backend: &'a mut B, last_pos: u64) -> DBPMMR<T, B> {
DBPMMR {
last_pos: last_pos,
backend: backend,
backend,
last_pos,
_marker: marker::PhantomData,
}
}
@ -98,7 +98,7 @@ where
/// Push a new element into the MMR. Computes new related peaks at
/// the same time if applicable.
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
pub fn push(&mut self, elmt: &T) -> Result<u64, String> {
let elmt_pos = self.last_pos + 1;
let mut current_hash = elmt.hash_with_index(elmt_pos - 1);

View file

@ -22,7 +22,6 @@ use core::merkle_proof::MerkleProof;
use core::pmmr::{Backend, ReadonlyPMMR};
use core::BlockHeader;
use ser::{PMMRIndexHashable, PMMRable};
use util::LOGGER;
/// 64 bits all ones: 0b11111111...1
const ALL_ONES: u64 = u64::MAX;
@ -54,8 +53,8 @@ where
/// Build a new prunable Merkle Mountain Range using the provided backend.
pub fn new(backend: &'a mut B) -> PMMR<T, B> {
PMMR {
backend,
last_pos: 0,
backend: backend,
_marker: marker::PhantomData,
}
}
@ -64,8 +63,8 @@ where
/// last_pos with the provided backend.
pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> {
PMMR {
last_pos: last_pos,
backend: backend,
backend,
last_pos,
_marker: marker::PhantomData,
}
}
@ -91,7 +90,7 @@ where
let rhs = self.bag_the_rhs(peak_pos);
let mut res = peaks(self.last_pos)
.into_iter()
.filter(|x| x < &peak_pos)
.filter(|x| *x < peak_pos)
.filter_map(|x| self.backend.get_from_file(x))
.collect::<Vec<_>>();
res.reverse();
@ -108,7 +107,7 @@ where
pub fn bag_the_rhs(&self, peak_pos: u64) -> Option<Hash> {
let rhs = peaks(self.last_pos)
.into_iter()
.filter(|x| x > &peak_pos)
.filter(|x| *x > peak_pos)
.filter_map(|x| self.backend.get_from_file(x))
.collect::<Vec<_>>();
@ -137,7 +136,7 @@ where
/// Build a Merkle proof for the element at the given position.
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
debug!(LOGGER, "merkle_proof {}, last_pos {}", pos, self.last_pos);
debug!("merkle_proof {}, last_pos {}", pos, self.last_pos);
// check this pos is actually a leaf in the MMR
if !is_leaf(pos) {
@ -146,7 +145,7 @@ where
// check we actually have a hash in the MMR at this pos
self.get_hash(pos)
.ok_or(format!("no element at pos {}", pos))?;
.ok_or_else(|| format!("no element at pos {}", pos))?;
let mmr_size = self.unpruned_size();
@ -384,14 +383,14 @@ where
None => hashes.push_str(&format!("{:>8} ", "??")),
}
}
trace!(LOGGER, "{}", idx);
trace!(LOGGER, "{}", hashes);
trace!("{}", idx);
trace!("{}", hashes);
}
}
/// Prints PMMR statistics to the logs, used for debugging.
pub fn dump_stats(&self) {
debug!(LOGGER, "pmmr: unpruned - {}", self.unpruned_size());
debug!("pmmr: unpruned - {}", self.unpruned_size());
self.backend.dump_stats();
}
@ -418,8 +417,8 @@ where
None => hashes.push_str(&format!("{:>8} ", " .")),
}
}
debug!(LOGGER, "{}", idx);
debug!(LOGGER, "{}", hashes);
debug!("{}", idx);
debug!("{}", hashes);
}
}
}
@ -511,7 +510,7 @@ pub fn peak_map_height(mut pos: u64) -> (u64, u64) {
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap = bitmap << 1;
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;

View file

@ -41,8 +41,8 @@ where
/// Build a new readonly PMMR.
pub fn new(backend: &'a B) -> ReadonlyPMMR<T, B> {
ReadonlyPMMR {
backend,
last_pos: 0,
backend: backend,
_marker: marker::PhantomData,
}
}
@ -51,8 +51,8 @@ where
/// last_pos with the provided backend.
pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<T, B> {
ReadonlyPMMR {
last_pos: last_pos,
backend: backend,
backend,
last_pos,
_marker: marker::PhantomData,
}
}

View file

@ -43,8 +43,8 @@ where
/// Build a new readonly PMMR.
pub fn new(backend: &'a B) -> RewindablePMMR<T, B> {
RewindablePMMR {
backend,
last_pos: 0,
backend: backend,
_marker: marker::PhantomData,
}
}
@ -53,8 +53,8 @@ where
/// last_pos with the provided backend.
pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<T, B> {
RewindablePMMR {
last_pos: last_pos,
backend: backend,
backend,
last_pos,
_marker: marker::PhantomData,
}
}

View file

@ -17,8 +17,9 @@
use std::cmp::max;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::{error, fmt};
use util::RwLock;
use consensus::{self, VerifySortOrder};
use core::hash::Hashed;
@ -176,7 +177,7 @@ impl Readable for TxKernel {
let features =
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
Ok(TxKernel {
features: features,
features,
fee: reader.read_u64()?,
lock_height: reader.read_u64()?,
excess: Commitment::read(reader)?,
@ -197,7 +198,7 @@ impl TxKernel {
pub fn verify(&self) -> Result<(), secp::Error> {
let msg = Message::from_slice(&kernel_sig_msg(self.fee, self.lock_height))?;
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let sig = &self.excess_sig;
// Verify aggsig directly in libsecp
let pubkey = &self.excess.to_pubkey(&secp)?;
@ -229,13 +230,13 @@ impl TxKernel {
/// Builds a new tx kernel with the provided fee.
pub fn with_fee(self, fee: u64) -> TxKernel {
TxKernel { fee: fee, ..self }
TxKernel { fee, ..self }
}
/// Builds a new tx kernel with the provided lock_height.
pub fn with_lock_height(self, lock_height: u64) -> TxKernel {
TxKernel {
lock_height: lock_height,
lock_height,
..self
}
}
@ -355,9 +356,9 @@ impl TransactionBody {
verify_sorted: bool,
) -> Result<TransactionBody, Error> {
let body = TransactionBody {
inputs: inputs,
outputs: outputs,
kernels: kernels,
inputs,
outputs,
kernels,
};
if verify_sorted {
@ -435,7 +436,7 @@ impl TransactionBody {
/// Calculate transaction weight from transaction details
pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> u32 {
let mut body_weight = -1 * (input_len as i32) + (4 * output_len as i32) + kernel_len as i32;
let mut body_weight = -(input_len as i32) + (4 * output_len as i32) + kernel_len as i32;
if body_weight < 1 {
body_weight = 1;
}
@ -553,12 +554,12 @@ impl TransactionBody {
// Find all the outputs that have not had their rangeproofs verified.
let outputs = {
let mut verifier = verifier.write().unwrap();
let mut verifier = verifier.write();
verifier.filter_rangeproof_unverified(&self.outputs)
};
// Now batch verify all those unverified rangeproofs
if outputs.len() > 0 {
if !outputs.is_empty() {
let mut commits = vec![];
let mut proofs = vec![];
for x in &outputs {
@ -570,7 +571,7 @@ impl TransactionBody {
// Find all the kernels that have not yet been verified.
let kernels = {
let mut verifier = verifier.write().unwrap();
let mut verifier = verifier.write();
verifier.filter_kernel_sig_unverified(&self.kernels)
};
@ -583,7 +584,7 @@ impl TransactionBody {
// Cache the successful verification results for the new outputs and kernels.
{
let mut verifier = verifier.write().unwrap();
let mut verifier = verifier.write();
verifier.add_rangeproof_verified(outputs);
verifier.add_kernel_sig_verified(kernels);
}
@ -686,10 +687,7 @@ impl Transaction {
/// Creates a new transaction using this transaction as a template
/// and with the specified offset.
pub fn with_offset(self, offset: BlindingFactor) -> Transaction {
Transaction {
offset: offset,
..self
}
Transaction { offset, ..self }
}
/// Builds a new transaction with the provided inputs added. Existing
@ -911,7 +909,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
// now compute the total kernel offset
let total_kernel_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let mut positive_key = vec![mk_tx.offset]
.into_iter()
.filter(|x| *x != BlindingFactor::zero())
@ -1071,7 +1069,7 @@ impl Readable for Output {
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
Ok(Output {
features: features,
features,
commit: Commitment::read(reader)?,
proof: RangeProof::read(reader)?,
})
@ -1092,7 +1090,7 @@ impl Output {
/// Validates the range proof using the commitment
pub fn verify_proof(&self) -> Result<(), secp::Error> {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
match secp.verify_bullet_proof(self.commit, self.proof, None) {
Ok(_) => Ok(()),
Err(e) => Err(e),
@ -1105,7 +1103,7 @@ impl Output {
proofs: &Vec<RangeProof>,
) -> Result<(), secp::Error> {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
match secp.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None) {
Ok(_) => Ok(()),
Err(e) => Err(e),
@ -1130,8 +1128,8 @@ impl OutputIdentifier {
/// Build a new output_identifier.
pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier {
OutputIdentifier {
features: features,
commit: commit.clone(),
features,
commit: *commit,
}
}
@ -1151,9 +1149,9 @@ impl OutputIdentifier {
/// Converts this identifier to a full output, provided a RangeProof
pub fn into_output(self, proof: RangeProof) -> Output {
Output {
proof,
features: self.features,
commit: self.commit,
proof: proof,
}
}
@ -1195,8 +1193,8 @@ impl Readable for OutputIdentifier {
let features =
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
Ok(OutputIdentifier {
features,
commit: Commitment::read(reader)?,
features: features,
})
}
}

View file

@ -19,7 +19,6 @@ use lru_cache::LruCache;
use core::hash::{Hash, Hashed};
use core::{Output, TxKernel};
use util::LOGGER;
/// Verifier cache for caching expensive verification results.
/// Specifically the following -
@ -28,10 +27,10 @@ use util::LOGGER;
pub trait VerifierCache: Sync + Send {
/// Takes a vec of tx kernels and returns those kernels
/// that have not yet been verified.
fn filter_kernel_sig_unverified(&mut self, kernels: &Vec<TxKernel>) -> Vec<TxKernel>;
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel>;
/// Takes a vec of tx outputs and returns those outputs
/// that have not yet had their rangeproofs verified.
fn filter_rangeproof_unverified(&mut self, outputs: &Vec<Output>) -> Vec<Output>;
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output>;
/// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above).
fn add_kernel_sig_verified(&mut self, kernels: Vec<TxKernel>);
/// Adds a vec of outputs to the cache (used in conjunction with the the filter above).
@ -46,9 +45,6 @@ pub struct LruVerifierCache {
rangeproof_verification_cache: LruCache<Hash, bool>,
}
unsafe impl Sync for LruVerifierCache {}
unsafe impl Send for LruVerifierCache {}
impl LruVerifierCache {
/// TODO how big should these caches be?
/// They need to be *at least* large enough to cover a maxed out block.
@ -61,7 +57,7 @@ impl LruVerifierCache {
}
impl VerifierCache for LruVerifierCache {
fn filter_kernel_sig_unverified(&mut self, kernels: &Vec<TxKernel>) -> Vec<TxKernel> {
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel> {
let res = kernels
.into_iter()
.filter(|x| {
@ -71,8 +67,7 @@ impl VerifierCache for LruVerifierCache {
.unwrap_or(&mut false)
}).cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,
trace!(
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
kernels.len(),
res.len()
@ -80,7 +75,7 @@ impl VerifierCache for LruVerifierCache {
res
}
fn filter_rangeproof_unverified(&mut self, outputs: &Vec<Output>) -> Vec<Output> {
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output> {
let res = outputs
.into_iter()
.filter(|x| {
@ -90,8 +85,7 @@ impl VerifierCache for LruVerifierCache {
.unwrap_or(&mut false)
}).cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,
trace!(
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
outputs.len(),
res.len()

View file

@ -27,7 +27,7 @@ use pow::{self, CuckatooContext, EdgeType, PoWContext};
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
use std::sync::RwLock;
use util::RwLock;
/// Define these here, as they should be developer-set, not really tweakable
/// by users
@ -70,13 +70,21 @@ pub const TESTNET3_INITIAL_DIFFICULTY: u64 = 30000;
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Testnet 4 initial block difficulty
/// 1_000 times natural scale factor for cuckatoo29
pub const TESTNET4_INITIAL_DIFFICULTY: u64 = 1_000 * UNIT_DIFFICULTY;
/// Trigger compaction check on average every day for FAST_SYNC_NODE,
/// roll the dice on every block to decide,
/// all blocks lower than (BodyHead.height - CUT_THROUGH_HORIZON) will be removed.
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Types of chain a server can run with, dictates the genesis block and
@ -126,7 +134,7 @@ lazy_static!{
/// Set the mining mode
pub fn set_mining_mode(mode: ChainTypes) {
let mut param_ref = CHAIN_TYPE.write().unwrap();
let mut param_ref = CHAIN_TYPE.write();
*param_ref = mode;
}
@ -150,7 +158,7 @@ pub fn pow_type() -> PoWContextTypes {
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
@ -163,7 +171,7 @@ pub fn min_edge_bits() -> u8 {
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
@ -174,7 +182,7 @@ pub fn base_edge_bits() -> u8 {
/// The proofsize
pub fn proofsize() -> usize {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
@ -184,7 +192,7 @@ pub fn proofsize() -> usize {
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
@ -194,7 +202,7 @@ pub fn coinbase_maturity() -> u64 {
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
@ -207,7 +215,7 @@ pub fn initial_block_difficulty() -> u64 {
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT,
@ -221,7 +229,7 @@ pub fn initial_graph_weight() -> u32 {
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON,
@ -231,19 +239,19 @@ pub fn cut_through_horizon() -> u32 {
/// Are we in automated testing mode?
pub fn is_automated_testing_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
ChainTypes::AutomatedTesting == *param_ref
}
/// Are we in user testing mode?
pub fn is_user_testing_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
ChainTypes::UserTesting == *param_ref
}
/// Are we in production mode (a live public network)?
pub fn is_production_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
ChainTypes::Testnet1 == *param_ref
|| ChainTypes::Testnet2 == *param_ref
|| ChainTypes::Testnet3 == *param_ref
@ -256,7 +264,7 @@ pub fn is_production_mode() -> bool {
/// as the genesis block POW solution turns out to be the same for every new
/// block chain at the moment
pub fn get_genesis_nonce() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap();
let param_ref = CHAIN_TYPE.read();
match *param_ref {
// won't make a difference
ChainTypes::AutomatedTesting => 0,

View file

@ -38,7 +38,7 @@ extern crate serde;
extern crate serde_derive;
extern crate siphasher;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate failure;
#[macro_use]

View file

@ -78,19 +78,19 @@ where
}
}
pub fn set_header_nonce(header: Vec<u8>, nonce: Option<u32>) -> Result<[u64; 4], Error> {
pub fn set_header_nonce(header: &[u8], nonce: Option<u32>) -> Result<[u64; 4], Error> {
if let Some(n) = nonce {
let len = header.len();
let mut header = header.clone();
let mut header = header.to_owned();
header.truncate(len - mem::size_of::<u32>());
header.write_u32::<LittleEndian>(n)?;
create_siphash_keys(header)
create_siphash_keys(&header)
} else {
create_siphash_keys(header)
create_siphash_keys(&header)
}
}
pub fn create_siphash_keys(header: Vec<u8>) -> Result<[u64; 4], Error> {
pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> {
let h = blake2b(32, &[], &header);
let hb = h.as_bytes();
let mut rdr = Cursor::new(hb);
@ -163,7 +163,7 @@ where
/// Reset the main keys used for siphash from the header and nonce
pub fn reset_header_nonce(&mut self, header: Vec<u8>, nonce: Option<u32>) -> Result<(), Error> {
self.siphash_keys = set_header_nonce(header, nonce)?;
self.siphash_keys = set_header_nonce(&header, nonce)?;
Ok(())
}
@ -175,7 +175,7 @@ where
);
let mut masked = hash_u64 & self.edge_mask.to_u64().ok_or(ErrorKind::IntegerCast)?;
if shift {
masked = masked << 1;
masked <<= 1;
masked |= uorv;
}
Ok(T::from(masked).ok_or(ErrorKind::IntegerCast)?)

View file

@ -54,14 +54,14 @@ where
pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result<Graph<T>, Error> {
let max_nodes = 2 * to_u64!(max_edges);
Ok(Graph {
max_edges: max_edges,
max_nodes: max_nodes,
max_edges,
max_nodes,
max_sols,
proof_size,
links: vec![],
adj_list: vec![],
visited: Bitmap::create(),
max_sols: max_sols,
solutions: vec![],
proof_size: proof_size,
nil: T::max_value(),
})
}
@ -241,7 +241,7 @@ where
/// Simple implementation of algorithm
pub fn find_cycles_iter<'a, I>(&mut self, iter: I) -> Result<Vec<Proof>, Error>
pub fn find_cycles_iter<I>(&mut self, iter: I) -> Result<Vec<Proof>, Error>
where
I: Iterator<Item = u64>,
{
@ -260,7 +260,7 @@ where
for s in &self.graph.solutions {
self.verify_impl(&s)?;
}
if self.graph.solutions.len() == 0 {
if self.graph.solutions.is_empty() {
Err(ErrorKind::NoSolution)?
} else {
Ok(self.graph.solutions.clone())

View file

@ -77,7 +77,7 @@ where
let params = CuckooParams::new(edge_bits, proof_size)?;
let num_nodes = 2 * params.num_edges as usize;
Ok(CuckooContext {
params: params,
params,
graph: vec![T::zero(); num_nodes],
_max_sols: max_sols,
})
@ -190,7 +190,7 @@ where
cycle.insert(Edge { u: us[0], v: vs[0] });
while nu != 0 {
// u's in even position; v's in odd
nu = nu - 1;
nu -= 1;
cycle.insert(Edge {
u: us[((nu + 1) & !1) as usize],
v: us[(nu | 1) as usize],
@ -214,11 +214,11 @@ where
cycle.remove(&edge);
}
}
return if n == self.params.proof_size {
if n == self.params.proof_size {
Ok(sol)
} else {
Err(ErrorKind::NoCycle)?
};
}
}
/// Searches for a solution (simple implementation)

View file

@ -85,7 +85,7 @@ impl From<ErrorKind> for Error {
impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Error {
Error { inner: inner }
Error { inner }
}
}

View file

@ -37,7 +37,7 @@ impl Lean {
// edge bitmap, before trimming all of them are on
let mut edges = Bitmap::create_with_capacity(params.num_edges as u32);
edges.flip_inplace(0..params.num_edges.into());
edges.flip_inplace(0..params.num_edges);
Lean { params, edges }
}

View file

@ -77,7 +77,7 @@ pub fn mine_genesis_block() -> Result<Block, Error> {
}
// total_difficulty on the genesis header *is* the difficulty of that block
let genesis_difficulty = gen.header.pow.total_difficulty.clone();
let genesis_difficulty = gen.header.pow.total_difficulty;
let sz = global::min_edge_bits();
let proof_size = global::proofsize();

View file

@ -62,7 +62,7 @@ pub fn siphash24(v: &[u64; 4], nonce: u64) -> u64 {
round!();
round!();
return v0 ^ v1 ^ v2 ^ v3;
v0 ^ v1 ^ v2 ^ v3
}
#[cfg(test)]

View file

@ -80,11 +80,6 @@ impl Difficulty {
Difficulty { num: max(num, 1) }
}
/// Compute difficulty scaling factor for graph defined by 2 * 2^edge_bits * edge_bits bits
pub fn scale(edge_bits: u8) -> u64 {
(2 << (edge_bits - global::base_edge_bits()) as u64) * (edge_bits as u64)
}
/// Computes the difficulty from a hash. Divides the maximum target by the
/// provided hash and applies the Cuck(at)oo size adjustment factor (see
/// https://lists.launchpad.net/mimblewimble/msg00494.html).

View file

@ -92,10 +92,7 @@ impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::IOErr(ref e, _) => e,
Error::UnexpectedData {
expected: _,
received: _,
} => "unexpected data",
Error::UnexpectedData { .. } => "unexpected data",
Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr => "too large read",
Error::ConsensusError(_) => "consensus error (sort order)",
@ -231,13 +228,13 @@ where
/// Deserializes a Readeable from any std::io::Read implementation.
pub fn deserialize<T: Readable>(source: &mut Read) -> Result<T, Error> {
let mut reader = BinReader { source: source };
let mut reader = BinReader { source };
T::read(&mut reader)
}
/// Serializes a Writeable into any std::io::Write implementation.
pub fn serialize<W: Writeable>(sink: &mut Write, thing: &W) -> Result<(), Error> {
let mut writer = BinWriter { sink: sink };
let mut writer = BinWriter { sink };
thing.write(&mut writer)
}
@ -319,9 +316,7 @@ impl Readable for Commitment {
fn read(reader: &mut Reader) -> Result<Commitment, Error> {
let a = reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE)?;
let mut c = [0; PEDERSEN_COMMITMENT_SIZE];
for i in 0..PEDERSEN_COMMITMENT_SIZE {
c[i] = a[i];
}
c[..PEDERSEN_COMMITMENT_SIZE].clone_from_slice(&a[..PEDERSEN_COMMITMENT_SIZE]);
Ok(Commitment(c))
}
}
@ -368,9 +363,7 @@ impl Readable for RangeProof {
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
let p = reader.read_limited_vec(MAX_PROOF_SIZE)?;
let mut a = [0; MAX_PROOF_SIZE];
for i in 0..p.len() {
a[i] = p[i];
}
a[..p.len()].clone_from_slice(&p[..]);
Ok(RangeProof {
proof: a,
plen: p.len(),
@ -388,9 +381,7 @@ impl Readable for Signature {
fn read(reader: &mut Reader) -> Result<Signature, Error> {
let a = reader.read_fixed_bytes(AGG_SIGNATURE_SIZE)?;
let mut c = [0; AGG_SIGNATURE_SIZE];
for i in 0..AGG_SIGNATURE_SIZE {
c[i] = a[i];
}
c[..AGG_SIGNATURE_SIZE].clone_from_slice(&a[..AGG_SIGNATURE_SIZE]);
Ok(Signature::from_raw_data(&c).unwrap())
}
}
@ -577,81 +568,81 @@ pub trait AsFixedBytes: Sized + AsRef<[u8]> {
impl<'a> AsFixedBytes for &'a [u8] {
fn len(&self) -> usize {
return 1;
1
}
}
impl AsFixedBytes for Vec<u8> {
fn len(&self) -> usize {
return self.len();
self.len()
}
}
impl AsFixedBytes for [u8; 1] {
fn len(&self) -> usize {
return 1;
1
}
}
impl AsFixedBytes for [u8; 2] {
fn len(&self) -> usize {
return 2;
2
}
}
impl AsFixedBytes for [u8; 4] {
fn len(&self) -> usize {
return 4;
4
}
}
impl AsFixedBytes for [u8; 6] {
fn len(&self) -> usize {
return 6;
6
}
}
impl AsFixedBytes for [u8; 8] {
fn len(&self) -> usize {
return 8;
8
}
}
impl AsFixedBytes for [u8; 20] {
fn len(&self) -> usize {
return 20;
20
}
}
impl AsFixedBytes for [u8; 32] {
fn len(&self) -> usize {
return 32;
32
}
}
impl AsFixedBytes for String {
fn len(&self) -> usize {
return self.len();
self.len()
}
}
impl AsFixedBytes for ::core::hash::Hash {
fn len(&self) -> usize {
return 32;
32
}
}
impl AsFixedBytes for ::util::secp::pedersen::RangeProof {
fn len(&self) -> usize {
return self.plen;
self.plen
}
}
impl AsFixedBytes for ::util::secp::Signature {
fn len(&self) -> usize {
return 64;
64
}
}
impl AsFixedBytes for ::util::secp::pedersen::Commitment {
fn len(&self) -> usize {
return PEDERSEN_COMMITMENT_SIZE;
PEDERSEN_COMMITMENT_SIZE
}
}
impl AsFixedBytes for BlindingFactor {
fn len(&self) -> usize {
return SECRET_KEY_SIZE;
SECRET_KEY_SIZE
}
}
impl AsFixedBytes for ::keychain::Identifier {
fn len(&self) -> usize {
return IDENTIFIER_SIZE;
IDENTIFIER_SIZE
}
}

View file

@ -18,8 +18,9 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::time::Instant;
use util::RwLock;
pub mod common;

View file

@ -480,18 +480,21 @@ fn secondary_pow_scale() {
// difficulty block
hi.is_secondary = false;
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::<Vec<_>>()),
147
);
// all secondary on 90%, factor should go down a bit
hi.is_secondary = true;
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::<Vec<_>>()),
94
);
// all secondary on 1%, factor should go down to bound (divide by 2)
assert_eq!(
secondary_pow_scaling(890_000, &(0..window).map(|_| hi.clone()).collect()),
secondary_pow_scaling(
890_000,
&(0..window).map(|_| hi.clone()).collect::<Vec<_>>()
),
49
);
// same as above, testing lowest bound
@ -510,7 +513,7 @@ fn secondary_pow_scale() {
&(0..(window / 10))
.map(|_| primary_hi.clone())
.chain((0..(window * 9 / 10)).map(|_| hi.clone()))
.collect()
.collect::<Vec<_>>()
),
94
);
@ -521,7 +524,7 @@ fn secondary_pow_scale() {
&(0..(window / 20))
.map(|_| primary_hi.clone())
.chain((0..(window * 95 / 100)).map(|_| hi.clone()))
.collect()
.collect::<Vec<_>>()
),
94
);
@ -532,7 +535,7 @@ fn secondary_pow_scale() {
&(0..(window * 6 / 10))
.map(|_| primary_hi.clone())
.chain((0..(window * 4 / 10)).map(|_| hi.clone()))
.collect()
.collect::<Vec<_>>()
),
84
);

View file

@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
pub mod common;
@ -350,7 +351,7 @@ fn blind_tx() {
let Output { proof, .. } = btx.outputs()[0];
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let info = secp.range_proof_info(proof);
assert!(info.min == 0);

View file

@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
pub mod common;
@ -48,20 +49,20 @@ fn test_verifier_cache_rangeproofs() {
// Check our output is not verified according to the cache.
{
let mut cache = cache.write().unwrap();
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
assert_eq!(unverified, vec![out]);
}
// Add our output to the cache.
{
let mut cache = cache.write().unwrap();
let mut cache = cache.write();
cache.add_rangeproof_verified(vec![out]);
}
// Check it shows as verified according to the cache.
{
let mut cache = cache.write().unwrap();
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
assert_eq!(unverified, vec![]);
}

View file

@ -170,10 +170,6 @@ Receives a transaction, modifying the slate accordingly (which can then be sent
* **Sample Call:**
```javascript
var coinbase_data = {
fees: 0,
height: 123456
}
$.ajax({
url: "/v1/wallet/foreign/build_coinbase",
dataType: "json",

View file

@ -390,10 +390,6 @@ Send a transaction either directly by http or file (then display the slate)
* **Sample Call:**
```javascript
var coinbase_data = {
fees: 0,
height: 123456
}
$.ajax({
url: "/v1/wallet/owner/issue_send_tx",
dataType: "json",
@ -506,10 +502,6 @@ Builds the complete transaction and sends it to a grin node for propagation.
* **Sample Call:**
```javascript
var coinbase_data = {
fees: 0,
height: 123456
}
$.ajax({
url: "/v1/wallet/owner/finalize_tx",
dataType: "json",
@ -555,10 +547,6 @@ Roll back a transaction and all associated outputs with a given transaction id T
* **Sample Call:**
```javascript
var coinbase_data = {
fees: 0,
height: 123456
}
$.ajax({
url: "/v1/wallet/owner/cancel_tx?id=3",
dataType: "json",
@ -569,6 +557,83 @@ Roll back a transaction and all associated outputs with a given transaction id T
});
```
### POST Post Tx
Push new transaction to the connected node transaction pool. Add `?fluff` at the end of the URL to bypass Dandelion relay.
* **URL**
/v1/wallet/owner/post_tx
* **Method:**
`POST`
* **URL Params**
None
* **Data Params**
**Required:** A transaction slate in JSON.
| Field | Type | Description |
|:----------------------|:---------|:--------------------------------------------------------------------------|
| num_participants | number | The number of participants intended to take part in this transaction |
| id | number | Unique transaction ID, selected by sender |
| tx | object | The core transaction data (inputs, outputs, kernels and kernel offset) |
| - offset | []number | The kernel "offset" k2, excess is k1G after splitting the key k = k1 + k2 |
| - body | object | The transaction body - inputs/outputs/kernels |
| - - inputs | []object | List of inputs spent by the transaction |
| - - - features | object | The features of the output being spent |
| - - - - bits | number | Representation of the features in bits |
| - - - commit | []number | The commit referencing the output being spent |
| - - outputs | []object | List of outputs the transaction produces |
| - - - features | object | Options for an output's structure or use |
| - - - - bits | number | Representation of the features in bits |
| - - - commit | []number | The homomorphic commitment representing the output amount |
| - - - proof | []number | A proof that the commitment is in the right range |
| - - kernels | []object | List of kernels that make up this transaction (usually a single kernel) |
| - - - features | object | Options for a kernel's structure or use |
| - - - - bits | number | Representation of the features in bits |
| - - - fee | number | Fee originally included in the transaction this proof is for |
| - - - lock_height | number | The max lock_height of all inputs to this transaction |
| - - - excess | []number | Remainder of the sum of all transaction commitments |
| - - - excess_sig | []number | The signature proving the excess is a valid public key (signs the tx fee) |
| amount | number | Base amount (excluding fee) |
| fee | number | Fee amount |
| height | number | Block height for the transaction |
| lock_height | number | Lock height |
| participant_data | object | Participant data |
| - id | number | Id of participant in the transaction. (For now, 0=sender, 1=rec) |
| - public_blind_excess | []number | Public key corresponding to private blinding factor |
| - public_nonce | []number | Public key corresponding to private nonce |
| - part_sig | []number | Public partial signature |
* **Success Response:**
* **Code:** 200
* **Error Response:**
* **Code:** 400
* **Sample Call:**
```javascript
$.ajax({
url: "/v1/wallet/owner/post_tx",
dataType: "json",
type : "POST",
success : function(r) {
console.log(r);
},
data: {
file: tx.json
},
});
```
### POST Issue Burn Tx
Issue a burn TX.
@ -600,10 +665,6 @@ Issue a burn TX.
* **Sample Call:**
```javascript
var coinbase_data = {
fees: 0,
height: 123456
}
$.ajax({
url: "/v1/wallet/owner/issue_burn_tx",
dataType: "json",

68
doc/wallet/tls-setup.md Normal file
View file

@ -0,0 +1,68 @@
# Wallet TLS setup
## What you need
* A server with a static IP address (eg `3.3.3.3`)
* A domain name ownership (`example.com`)
* DNS configuration for this IP (`grin1.example.com` -> `3.3.3.3`)
If you don't have a static IP you may want to consider using services like DynDNS which support dynamic IP resolving, this case is not covered by this guide, but all the next steps are equally applicable.
If you don't have a domain name there is a possibility to get a TLS certificate for your IP, but you have to pay for that (so perhaps it's cheaper to buy a domain name) and it's rarely supported by certificate providers.
## I have a TLS certificate already
Uncomment and update the following lines in wallet config (by default `~/.grin/grin-wallet.toml`):
```
tls_certificate_file = "/path/to/my/cerificate/fullchain.pem"
tls_certificate_key = "/path/to/my/cerificate/privkey.pem"
```
Make sure your user has read access to the files (see below for how to do it). Restart wallet. When you (or someone else) send grins to this wallet the destination (`-d` option) must start with `https://`, not with `http://`.
## I don't have a TLS certificate
You can get it for free from [Let's Encrypt](https://letsencrypt.org/). To simplify the process we need `certbot`.
### Install certbot
Go to [Certbot home page](https://certbot.eff.org/), choose I'm using `None of the above` and your OS (eg `Ubuntu 18.04` which will be used as an example). You will be redirected to a page with instructions like [steps for Ubuntu](https://certbot.eff.org/lets-encrypt/ubuntubionic-other). Follow instructions from `Install` section. As result you should have `certbot` installed.
### Obtain certificate
If you have experince with `certboot` feel free to use any type of challenge. This guide covers the simplest case of HTTP challenge. For this you need to have a web server listening on port `80`, which requires running it as root in the simplest case. We will use the server provided by certbot. **Make sure you have port 80 open**
```
sudo certbot certonly --standalone -d grin1.example.com
```
It will ask you some questions, as result you should see something like:
```
Congratulations! Your certificate and chain have been saved at:
/etc/letsencrypt/live/grin1.example.com/fullchain.pem
Your key file has been saved at:
/etc/letsencrypt/live/grin1.example.com/privkey.pem
Your cert will expire on 2019-01-16. To obtain a new or tweaked
version of this certificate in the future, simply run certbot
again. To non-interactively renew *all* of your certificates, run
"certbot renew"
```
### Change permissions
Now you have the certificate files but only root user can read it. We run grin as `ubuntu` user. There are different scenarios how to fix it, the simplest one is to create a group which will have access to `/etc/letsencrypt` directory and add our user to this group.
```
$ sudo groupadd tls-cert`
$ sudo usermod -a -G tls-cert ubuntu`
$ chgrp -R tls-cert /etc/letsencrypt`
$ chmod -R g=rX /etc/letsencrypt`
$ sudo chmod 2755 /etc/letsencrypt`
```
The last step is needed for renewal, it makes sure that all new files will have the same group ownership.
### Update wallet config
Refer to `I have a TLS certificate already` because you have it now. Use the folowing values:
```
tls_certificate_file = "/etc/letsencrypt/live/grin1.example.com/fullchain.pem"
tls_certificate_key = "/etc/letsencrypt/live/grin1.example.com/privkey.pem"
```

View file

@ -9,7 +9,7 @@ publish = false
byteorder = "1"
blake2-rfc = "0.2"
rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
serde = "1"
serde_derive = "1"
serde_json = "1"

View file

@ -24,10 +24,10 @@ extern crate serde;
extern crate serde_derive;
extern crate digest;
extern crate hmac;
extern crate log;
extern crate ripemd160;
extern crate serde_json;
extern crate sha2;
extern crate slog;
extern crate uuid;
mod base58;

View file

@ -244,7 +244,7 @@ impl Add for BlindingFactor {
//
fn add(self, other: BlindingFactor) -> Self::Output {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let secp = secp.lock();
let keys = vec![self, other]
.into_iter()
.filter(|x| *x != BlindingFactor::zero())

View file

@ -15,7 +15,7 @@ num = "0.1"
rand = "0.5"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = { version = "0.4.4", features = ["serde"] }
grin_core = { path = "../core" }

View file

@ -24,13 +24,13 @@ use std::fs::File;
use std::io::{self, Read, Write};
use std::mem::size_of;
use std::net::TcpStream;
use std::sync::{mpsc, Arc, RwLock};
use std::sync::{mpsc, Arc};
use std::{cmp, thread, time};
use util::RwLock;
use core::ser;
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
use types::Error;
use util::LOGGER;
/// A trait to be implemented in order to receive messages from the
/// connection. Allows providing an optional response.
@ -144,7 +144,6 @@ impl<'a> Response<'a> {
pub const SEND_CHANNEL_CAP: usize = 10;
// TODO count sent and received
pub struct Tracker {
/// Bytes we've sent.
pub sent_bytes: Arc<RwLock<u64>>,
@ -168,9 +167,8 @@ impl Tracker {
self.send_channel.try_send(buf)?;
// Increase sent bytes counter
if let Ok(mut sent_bytes) = self.sent_bytes.write() {
*sent_bytes += buf_len as u64;
}
let mut sent_bytes = self.sent_bytes.write();
*sent_bytes += buf_len as u64;
Ok(())
}
@ -234,14 +232,14 @@ fn poll<H>(
if let Some(h) = try_break!(error_tx, read_header(conn, None)) {
let msg = Message::from_header(h, conn);
trace!(
LOGGER,
"Received message header, type {:?}, len {}.",
msg.header.msg_type,
msg.header.msg_len
);
// Increase received bytes counter
if let Ok(mut received_bytes) = received_bytes.write() {
{
let mut received_bytes = received_bytes.write();
let header_size = size_of::<MsgHeader>() as u64;
*received_bytes += header_size + msg.header.msg_len;
}
@ -275,7 +273,6 @@ fn poll<H>(
// check the close channel
if let Ok(_) = close_rx.try_recv() {
debug!(
LOGGER,
"Connection close with {} initiated by us",
conn.peer_addr()
.map(|a| a.to_string())

View file

@ -14,7 +14,8 @@
use std::collections::VecDeque;
use std::net::{SocketAddr, TcpStream};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chrono::prelude::*;
use rand::{thread_rng, Rng};
@ -24,7 +25,6 @@ use core::pow::Difficulty;
use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT};
use peer::Peer;
use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
use util::LOGGER;
const NONCES_CAP: usize = 100;
@ -114,7 +114,6 @@ impl Handshake {
}
debug!(
LOGGER,
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
shake.total_difficulty.to_num(),
peer_info.addr,
@ -146,7 +145,7 @@ impl Handshake {
});
} else {
// check the nonce to see if we are trying to connect to ourselves
let nonces = self.nonces.read().unwrap();
let nonces = self.nonces.read();
if nonces.contains(&hand.nonce) {
return Err(Error::PeerWithSelf);
}
@ -185,7 +184,7 @@ impl Handshake {
};
write_message(conn, shake, Type::Shake)?;
trace!(LOGGER, "Success handshake with {}.", peer_info.addr);
trace!("Success handshake with {}.", peer_info.addr);
// when more than one protocol version is supported, choosing should go here
Ok(peer_info)
@ -195,7 +194,7 @@ impl Handshake {
fn next_nonce(&self) -> u64 {
let nonce = thread_rng().gen();
let mut nonces = self.nonces.write().unwrap();
let mut nonces = self.nonces.write();
nonces.push_back(nonce);
if nonces.len() >= NONCES_CAP {
nonces.pop_front();

View file

@ -37,7 +37,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
mod conn;

View file

@ -26,7 +26,6 @@ use core::pow::Difficulty;
use core::ser::{self, Readable, Reader, Writeable, Writer};
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
use util::LOGGER;
/// Current latest version of the protocol
pub const PROTOCOL_VERSION: u32 = 1;
@ -207,8 +206,8 @@ pub fn read_header(conn: &mut TcpStream, msg_type: Option<Type>) -> Result<MsgHe
// TODO 4x the limits for now to leave ourselves space to change things
if header.msg_len > max_len * 4 {
error!(
LOGGER,
"Too large read {}, had {}, wanted {}.", header.msg_type as u8, max_len, header.msg_len
"Too large read {}, had {}, wanted {}.",
header.msg_type as u8, max_len, header.msg_len
);
return Err(Error::Serialization(ser::Error::TooLargeReadErr));
}

View file

@ -14,7 +14,8 @@
use std::fs::File;
use std::net::{SocketAddr, TcpStream};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chrono::prelude::{DateTime, Utc};
use conn;
@ -27,7 +28,6 @@ use protocol::Protocol;
use types::{
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
};
use util::LOGGER;
const MAX_TRACK_SIZE: usize = 30;
@ -103,8 +103,8 @@ impl Peer {
if let Some(ref denied) = config.peers_deny {
if denied.contains(&peer) {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly denied", peer_addr
"checking peer allowed/denied: {:?} explicitly denied",
peer_addr
);
return true;
}
@ -112,14 +112,14 @@ impl Peer {
if let Some(ref allowed) = config.peers_allow {
if allowed.contains(&peer) {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr
"checking peer allowed/denied: {:?} explicitly allowed",
peer_addr
);
return false;
} else {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr
"checking peer allowed/denied: {:?} not explicitly allowed, denying",
peer_addr
);
return true;
}
@ -137,12 +137,12 @@ impl Peer {
/// Whether this peer has been banned.
pub fn is_banned(&self) -> bool {
State::Banned == *self.state.read().unwrap()
State::Banned == *self.state.read()
}
/// Whether this peer is stuck on sync.
pub fn is_stuck(&self) -> (bool, Difficulty) {
let peer_live_info = self.info.live_info.read().unwrap();
let peer_live_info = self.info.live_info.read();
let now = Utc::now().timestamp_millis();
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
@ -155,9 +155,8 @@ impl Peer {
/// Number of bytes sent to the peer
pub fn sent_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection {
if let Ok(sent_bytes) = tracker.sent_bytes.read() {
return Some(*sent_bytes);
}
let sent_bytes = tracker.sent_bytes.read();
return Some(*sent_bytes);
}
None
}
@ -165,16 +164,15 @@ impl Peer {
/// Number of bytes received from the peer
pub fn received_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection {
if let Ok(received_bytes) = tracker.received_bytes.read() {
return Some(*received_bytes);
}
let received_bytes = tracker.received_bytes.read();
return Some(*received_bytes);
}
None
}
/// Set this peer status to banned
pub fn set_banned(&self) {
*self.state.write().unwrap() = State::Banned;
*self.state.write() = State::Banned;
}
/// Send a ping to the remote peer, providing our local difficulty and
@ -199,13 +197,10 @@ impl Peer {
.unwrap()
.send(ban_reason_msg, msg::Type::BanReason)
{
Ok(_) => debug!(
LOGGER,
"Sent ban reason {:?} to {}", ban_reason, self.info.addr
),
Ok(_) => debug!("Sent ban reason {:?} to {}", ban_reason, self.info.addr),
Err(e) => error!(
LOGGER,
"Could not send ban reason {:?} to {}: {:?}", ban_reason, self.info.addr, e
"Could not send ban reason {:?} to {}: {:?}",
ban_reason, self.info.addr, e
),
};
}
@ -214,7 +209,7 @@ impl Peer {
/// if the remote peer is known to already have the block.
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr);
trace!("Send block {} to {}", b.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -222,7 +217,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress block send {} to {} (already seen)",
b.hash(),
self.info.addr,
@ -233,12 +227,7 @@ impl Peer {
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!(
LOGGER,
"Send compact block {} to {}",
b.hash(),
self.info.addr
);
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -246,7 +235,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress compact block send {} to {} (already seen)",
b.hash(),
self.info.addr,
@ -257,7 +245,7 @@ impl Peer {
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
if !self.tracking_adapter.has(bh.hash()) {
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr);
debug!("Send header {} to {}", bh.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -265,7 +253,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress header send {} to {} (already seen)",
bh.hash(),
self.info.addr,
@ -278,7 +265,7 @@ impl Peer {
/// dropped if the remote peer is known to already have the transaction.
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
if !self.tracking_adapter.has(tx.hash()) {
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr);
debug!("Send tx {} to {}", tx.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -286,7 +273,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Not sending tx {} to {} (already seen)",
tx.hash(),
self.info.addr
@ -299,7 +285,7 @@ impl Peer {
/// Note: tracking adapter is ignored for stem transactions (while under
/// embargo).
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
debug!(LOGGER, "Send (stem) tx {} to {}", tx.hash(), self.info.addr);
debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -317,10 +303,7 @@ impl Peer {
/// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Requesting block {} from peer {}.", h, self.info.addr
);
debug!("Requesting block {} from peer {}.", h, self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -329,10 +312,7 @@ impl Peer {
/// Sends a request for a specific compact block by hash
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Requesting compact block {} from {}", h, self.info.addr
);
debug!("Requesting compact block {} from {}", h, self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -340,7 +320,7 @@ impl Peer {
}
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
debug!(LOGGER, "Asking {} for more peers.", self.info.addr);
debug!("Asking {} for more peers.", self.info.addr);
self.connection.as_ref().unwrap().send(
&GetPeerAddrs {
capabilities: capab,
@ -351,8 +331,8 @@ impl Peer {
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Asking {} for txhashset archive at {} {}.", self.info.addr, height, hash
"Asking {} for txhashset archive at {} {}.",
self.info.addr, height, hash
);
self.connection.as_ref().unwrap().send(
&TxHashSetRequest { hash, height },
@ -369,7 +349,7 @@ impl Peer {
match self.connection.as_ref().unwrap().error_channel.try_recv() {
Ok(Error::Serialization(e)) => {
let need_stop = {
let mut state = self.state.write().unwrap();
let mut state = self.state.write();
if State::Banned != *state {
*state = State::Disconnected;
true
@ -379,8 +359,8 @@ impl Peer {
};
if need_stop {
debug!(
LOGGER,
"Client {} corrupted, will disconnect ({:?}).", self.info.addr, e
"Client {} corrupted, will disconnect ({:?}).",
self.info.addr, e
);
self.stop();
}
@ -388,7 +368,7 @@ impl Peer {
}
Ok(e) => {
let need_stop = {
let mut state = self.state.write().unwrap();
let mut state = self.state.write();
if State::Disconnected != *state {
*state = State::Disconnected;
true
@ -397,13 +377,13 @@ impl Peer {
}
};
if need_stop {
debug!(LOGGER, "Client {} connection lost: {:?}", self.info.addr, e);
debug!("Client {} connection lost: {:?}", self.info.addr, e);
self.stop();
}
false
}
Err(_) => {
let state = self.state.read().unwrap();
let state = self.state.read();
State::Connected == *state
}
}
@ -427,14 +407,14 @@ impl TrackingAdapter {
}
fn has(&self, hash: Hash) -> bool {
let known = self.known.read().unwrap();
let known = self.known.read();
// may become too slow, an ordered set (by timestamp for eviction) may
// end up being a better choice
known.contains(&hash)
}
fn push(&self, hash: Hash) {
let mut known = self.known.write().unwrap();
let mut known = self.known.write();
if known.len() > MAX_TRACK_SIZE {
known.truncate(MAX_TRACK_SIZE);
}

View file

@ -15,15 +15,17 @@
use std::collections::HashMap;
use std::fs::File;
use std::net::SocketAddr;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use rand::{thread_rng, Rng};
use chrono::prelude::*;
use chrono::Duration;
use core::core;
use core::core::hash::{Hash, Hashed};
use core::global;
use core::pow::Difficulty;
use util::LOGGER;
use peer::Peer;
use store::{PeerData, PeerStore, State};
@ -67,14 +69,15 @@ impl Peers {
flags: State::Healthy,
last_banned: 0,
ban_reason: ReasonForBan::None,
last_connected: Utc::now().timestamp(),
};
addr = peer.info.addr.clone();
}
debug!(LOGGER, "Saving newly connected peer {}.", addr);
debug!("Saving newly connected peer {}.", addr);
self.save_peer(&peer_data)?;
{
let mut peers = self.peers.write().unwrap();
let mut peers = self.peers.write();
peers.insert(addr, peer.clone());
}
Ok(())
@ -88,27 +91,26 @@ impl Peers {
Some(peer) => {
// Clear the map and add new relay
let dandelion_relay = &self.dandelion_relay;
dandelion_relay.write().unwrap().clear();
dandelion_relay.write().clear();
dandelion_relay
.write()
.unwrap()
.insert(Utc::now().timestamp(), peer.clone());
debug!(
LOGGER,
"Successfully updated Dandelion relay to: {}", peer.info.addr
"Successfully updated Dandelion relay to: {}",
peer.info.addr
);
}
None => debug!(LOGGER, "Could not update dandelion relay"),
None => debug!("Could not update dandelion relay"),
};
}
// Get the dandelion relay
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
self.dandelion_relay.read().unwrap().clone()
self.dandelion_relay.read().clone()
}
pub fn is_known(&self, addr: &SocketAddr) -> bool {
self.peers.read().unwrap().contains_key(addr)
self.peers.read().contains_key(addr)
}
/// Get vec of peers we are currently connected to.
@ -116,7 +118,6 @@ impl Peers {
let mut res = self
.peers
.read()
.unwrap()
.values()
.filter(|p| p.is_connected())
.cloned()
@ -136,14 +137,13 @@ impl Peers {
/// Get a peer we're connected to by address.
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> {
self.peers.read().unwrap().get(addr).map(|p| p.clone())
self.peers.read().get(addr).map(|p| p.clone())
}
/// Number of peers we're currently connected to.
pub fn peer_count(&self) -> u32 {
self.peers
.read()
.unwrap()
.values()
.filter(|x| x.is_connected())
.count() as u32
@ -168,37 +168,11 @@ impl Peers {
max_peers
}
// Return vec of connected peers that currently advertise more work
// (total_difficulty) than we do and are also full archival nodes.
pub fn more_work_archival_peers(&self) -> Vec<Arc<Peer>> {
let peers = self.connected_peers();
if peers.len() == 0 {
return vec![];
}
let total_difficulty = self.total_difficulty();
let mut max_peers = peers
.into_iter()
.filter(|x| {
x.info.total_difficulty() > total_difficulty
&& x.info.capabilities.contains(Capabilities::FULL_HIST)
}).collect::<Vec<_>>();
thread_rng().shuffle(&mut max_peers);
max_peers
}
/// Returns single random peer with more work than us.
pub fn more_work_peer(&self) -> Option<Arc<Peer>> {
self.more_work_peers().pop()
}
/// Returns single random archival peer with more work than us.
pub fn more_work_archival_peer(&self) -> Option<Arc<Peer>> {
self.more_work_archival_peers().pop()
}
/// Return vec of connected peers that currently have the most worked
/// branch, showing the highest total difficulty.
pub fn most_work_peers(&self) -> Vec<Arc<Peer>> {
@ -240,11 +214,11 @@ impl Peers {
/// Ban a peer, disconnecting it if we're currently connected
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
error!("Couldn't ban {}: {:?}", peer_addr, e);
}
if let Some(peer) = self.get_connected_peer(peer_addr) {
debug!(LOGGER, "Banning peer {}", peer_addr);
debug!("Banning peer {}", peer_addr);
// setting peer status will get it removed at the next clean_peer
peer.send_ban_reason(ban_reason);
peer.set_banned();
@ -258,13 +232,13 @@ impl Peers {
Ok(_) => {
if self.is_banned(*peer_addr) {
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e);
error!("Couldn't unban {}: {:?}", peer_addr, e);
}
} else {
error!(LOGGER, "Couldn't unban {}: peer is not banned", peer_addr);
error!("Couldn't unban {}: peer is not banned", peer_addr);
}
}
Err(e) => error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e),
Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
};
}
@ -280,7 +254,7 @@ impl Peers {
match inner(&p) {
Ok(true) => count += 1,
Ok(false) => (),
Err(e) => debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e),
Err(e) => debug!("Error sending {} to peer: {:?}", obj_name, e),
}
if count >= num_peers {
@ -299,7 +273,6 @@ impl Peers {
let num_peers = self.config.peer_max_count();
let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b));
debug!(
LOGGER,
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
b.hash(),
b.header.pow.total_difficulty,
@ -317,7 +290,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("header", num_peers, |p| p.send_header(bh));
debug!(
LOGGER,
"broadcast_header: {}, {} at {}, to {} peers, done.",
bh.hash(),
bh.pow.total_difficulty,
@ -330,7 +302,7 @@ impl Peers {
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
let dandelion_relay = self.get_dandelion_relay();
if dandelion_relay.is_empty() {
debug!(LOGGER, "No dandelion relay, updating.");
debug!("No dandelion relay, updating.");
self.update_dandelion_relay();
}
// If still return an error, let the caller handle this as they see fit.
@ -341,10 +313,7 @@ impl Peers {
for relay in dandelion_relay.values() {
if relay.is_connected() {
if let Err(e) = relay.send_stem_transaction(tx) {
debug!(
LOGGER,
"Error sending stem transaction to peer relay: {:?}", e
);
debug!("Error sending stem transaction to peer relay: {:?}", e);
}
}
}
@ -360,7 +329,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx));
trace!(
LOGGER,
"broadcast_transaction: {}, to {} peers, done.",
tx.hash(),
count,
@ -370,7 +338,7 @@ impl Peers {
/// Ping all our connected peers. Always automatically expects a pong back
/// or disconnects. This acts as a liveness test.
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
let peers_map = self.peers.read().unwrap();
let peers_map = self.peers.read();
for p in peers_map.values() {
if p.is_connected() {
let _ = p.send_ping(total_difficulty, height);
@ -417,17 +385,17 @@ impl Peers {
let mut rm = vec![];
// build a list of peers to be cleaned up
for peer in self.peers.read().unwrap().values() {
for peer in self.peers.read().values() {
if peer.is_banned() {
debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr);
debug!("clean_peers {:?}, peer banned", peer.info.addr);
rm.push(peer.clone());
} else if !peer.is_connected() {
debug!(LOGGER, "clean_peers {:?}, not connected", peer.info.addr);
debug!("clean_peers {:?}, not connected", peer.info.addr);
rm.push(peer.clone());
} else {
let (stuck, diff) = peer.is_stuck();
if stuck && diff < self.adapter.total_difficulty() {
debug!(LOGGER, "clean_peers {:?}, stuck peer", peer.info.addr);
debug!("clean_peers {:?}, stuck peer", peer.info.addr);
peer.stop();
let _ = self.update_state(peer.info.addr, State::Defunct);
rm.push(peer.clone());
@ -437,7 +405,7 @@ impl Peers {
// now clean up peer map based on the list to remove
{
let mut peers = self.peers.write().unwrap();
let mut peers = self.peers.write();
for p in rm {
peers.remove(&p.info.addr);
}
@ -463,13 +431,13 @@ impl Peers {
// now remove them taking a short-lived write lock each time
// maybe better to take write lock once and remove them all?
for x in addrs.iter().take(excess_count) {
let mut peers = self.peers.write().unwrap();
let mut peers = self.peers.write();
peers.remove(x);
}
}
pub fn stop(&self) {
let mut peers = self.peers.write().unwrap();
let mut peers = self.peers.write();
for (_, peer) in peers.drain() {
peer.stop();
}
@ -478,6 +446,31 @@ impl Peers {
pub fn enough_peers(&self) -> bool {
self.connected_peers().len() >= self.config.peer_min_preferred_count() as usize
}
/// Removes those peers that seem to have expired
pub fn remove_expired(&self) {
let now = Utc::now();
// Delete defunct peers from storage
let _ = self.store.delete_peers(|peer| {
let diff = now - Utc.timestamp(peer.last_connected, 0);
let should_remove = peer.flags == State::Defunct
&& diff > Duration::seconds(global::PEER_EXPIRATION_REMOVE_TIME);
if should_remove {
debug!(
"removing peer {:?}: last connected {} days {} hours {} minutes ago.",
peer.addr,
diff.num_days(),
diff.num_hours(),
diff.num_minutes()
);
}
should_remove
});
}
}
impl ChainAdapter for Peers {
@ -499,8 +492,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
debug!(
LOGGER,
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
"Received a bad block {} from {}, the peer will be banned",
hash, peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
false
@ -515,10 +508,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
debug!(
LOGGER,
"Received a bad compact block {} from {}, the peer will be banned",
hash,
&peer_addr
hash, &peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
false
@ -568,8 +559,8 @@ impl ChainAdapter for Peers {
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
debug!(
LOGGER,
"Received a bad txhashset data from {}, the peer will be banned", &peer_addr
"Received a bad txhashset data from {}, the peer will be banned",
&peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
false
@ -594,17 +585,13 @@ impl NetAdapter for Peers {
/// addresses.
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
trace!(
LOGGER,
"find_peer_addrs: {} healthy peers picked",
peers.len()
);
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
map_vec!(peers, |p| p.addr)
}
/// A list of peers has been received from one of our peers.
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
trace!("Received {} peer addrs, saving.", peer_addrs.len());
for pa in peer_addrs {
if let Ok(e) = self.exists_peer(pa) {
if e {
@ -618,9 +605,10 @@ impl NetAdapter for Peers {
flags: State::Healthy,
last_banned: 0,
ban_reason: ReasonForBan::None,
last_connected: Utc::now().timestamp(),
};
if let Err(e) = self.save_peer(&peer) {
error!(LOGGER, "Could not save received peer address: {:?}", e);
error!("Could not save received peer address: {:?}", e);
}
}
}

View file

@ -30,7 +30,6 @@ use msg::{
TxHashSetArchive, TxHashSetRequest, Type,
};
use types::{Error, NetAdapter};
use util::LOGGER;
pub struct Protocol {
adapter: Arc<NetAdapter>,
@ -52,10 +51,8 @@ impl MessageHandler for Protocol {
// banned peers up correctly?
if adapter.is_banned(self.addr.clone()) {
debug!(
LOGGER,
"handler: consume: peer {:?} banned, received: {:?}, dropping.",
self.addr,
msg.header.msg_type,
self.addr, msg.header.msg_type,
);
return Ok(None);
}
@ -82,14 +79,14 @@ impl MessageHandler for Protocol {
Type::BanReason => {
let ban_reason: BanReason = msg.body()?;
error!(LOGGER, "handle_payload: BanReason {:?}", ban_reason);
error!("handle_payload: BanReason {:?}", ban_reason);
Ok(None)
}
Type::Transaction => {
debug!(
LOGGER,
"handle_payload: received tx: msg_len: {}", msg.header.msg_len
"handle_payload: received tx: msg_len: {}",
msg.header.msg_len
);
let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, false);
@ -98,8 +95,8 @@ impl MessageHandler for Protocol {
Type::StemTransaction => {
debug!(
LOGGER,
"handle_payload: received stem tx: msg_len: {}", msg.header.msg_len
"handle_payload: received stem tx: msg_len: {}",
msg.header.msg_len
);
let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, true);
@ -109,7 +106,6 @@ impl MessageHandler for Protocol {
Type::GetBlock => {
let h: Hash = msg.body()?;
trace!(
LOGGER,
"handle_payload: Getblock: {}, msg_len: {}",
h,
msg.header.msg_len,
@ -124,8 +120,8 @@ impl MessageHandler for Protocol {
Type::Block => {
debug!(
LOGGER,
"handle_payload: received block: msg_len: {}", msg.header.msg_len
"handle_payload: received block: msg_len: {}",
msg.header.msg_len
);
let b: core::Block = msg.body()?;
@ -145,8 +141,8 @@ impl MessageHandler for Protocol {
Type::CompactBlock => {
debug!(
LOGGER,
"handle_payload: received compact block: msg_len: {}", msg.header.msg_len
"handle_payload: received compact block: msg_len: {}",
msg.header.msg_len
);
let b: core::CompactBlock = msg.body()?;
@ -218,8 +214,8 @@ impl MessageHandler for Protocol {
Type::TxHashSetRequest => {
let sm_req: TxHashSetRequest = msg.body()?;
debug!(
LOGGER,
"handle_payload: txhashset req for {} at {}", sm_req.hash, sm_req.height
"handle_payload: txhashset req for {} at {}",
sm_req.hash, sm_req.height
);
let txhashset = self.adapter.txhashset_read(sm_req.hash);
@ -244,15 +240,11 @@ impl MessageHandler for Protocol {
Type::TxHashSetArchive => {
let sm_arch: TxHashSetArchive = msg.body()?;
debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}. size={}",
sm_arch.hash,
sm_arch.height,
sm_arch.bytes,
sm_arch.hash, sm_arch.height, sm_arch.bytes,
);
if !self.adapter.txhashset_receive_ready() {
error!(
LOGGER,
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
);
return Err(Error::BadMessage);
@ -284,14 +276,13 @@ impl MessageHandler for Protocol {
if let Err(e) = save_txhashset_to_file(tmp.clone()) {
error!(
LOGGER,
"handle_payload: txhashset archive save to file fail. err={:?}", e
"handle_payload: txhashset archive save to file fail. err={:?}",
e
);
return Err(e);
}
trace!(
LOGGER,
"handle_payload: txhashset archive save to file {:?} success",
tmp,
);
@ -302,18 +293,15 @@ impl MessageHandler for Protocol {
.txhashset_write(sm_arch.hash, tmp_zip, self.addr);
debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
sm_arch.hash,
sm_arch.height,
res
sm_arch.hash, sm_arch.height, res
);
Ok(None)
}
_ => {
debug!(LOGGER, "unknown message type {:?}", msg.header.msg_type);
debug!("unknown message type {:?}", msg.header.msg_type);
Ok(None)
}
}
@ -341,12 +329,8 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
let max_size = min_size + 6;
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
debug!(
LOGGER,
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
total_headers,
average_header_size,
min_size,
max_size,
total_headers, average_header_size, min_size, max_size,
);
return Err(Error::Connection(io::Error::new(
io::ErrorKind::InvalidData,

View file

@ -30,7 +30,6 @@ use peer::Peer;
use peers::Peers;
use store::PeerStore;
use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead};
use util::LOGGER;
/// P2P server implementation, handling bootstrapping to find and connect to
/// peers, receiving connections from other peers and keep track of all of them.
@ -50,35 +49,12 @@ impl Server {
/// Creates a new idle p2p server with no peers
pub fn new(
db_env: Arc<lmdb::Environment>,
mut capab: Capabilities,
capab: Capabilities,
config: P2PConfig,
adapter: Arc<ChainAdapter>,
genesis: Hash,
stop: Arc<AtomicBool>,
_archive_mode: bool,
block_1_hash: Option<Hash>,
) -> Result<Server, Error> {
// In the case of an archive node, check that we do have the first block.
// In case of first sync we do not perform this check.
if capab.contains(Capabilities::FULL_HIST) && adapter.total_height() > 0 {
// Check that we have block 1
match block_1_hash {
Some(hash) => match adapter.get_block(hash) {
Some(_) => debug!(LOGGER, "Full block 1 found, archive capabilities confirmed"),
None => {
debug!(
LOGGER,
"Full block 1 not found, archive capabilities disabled"
);
capab.remove(Capabilities::FULL_HIST);
}
},
None => {
debug!(LOGGER, "Block 1 not found, archive capabilities disabled");
capab.remove(Capabilities::FULL_HIST);
}
}
}
Ok(Server {
config: config.clone(),
capabilities: capab,
@ -102,12 +78,7 @@ impl Server {
Ok((stream, peer_addr)) => {
if !self.check_banned(&stream) {
if let Err(e) = self.handle_new_peer(stream) {
warn!(
LOGGER,
"Error accepting peer {}: {:?}",
peer_addr.to_string(),
e
);
warn!("Error accepting peer {}: {:?}", peer_addr.to_string(), e);
}
}
}
@ -115,7 +86,7 @@ impl Server {
// nothing to do, will retry in next iteration
}
Err(e) => {
warn!(LOGGER, "Couldn't establish new client connection: {:?}", e);
warn!("Couldn't establish new client connection: {:?}", e);
}
}
if self.stop.load(Ordering::Relaxed) {
@ -130,10 +101,7 @@ impl Server {
/// we're already connected to the provided address.
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
if Peer::is_denied(&self.config, &addr) {
debug!(
LOGGER,
"connect_peer: peer {} denied, not connecting.", addr
);
debug!("connect_peer: peer {} denied, not connecting.", addr);
return Err(Error::ConnectionClose);
}
@ -148,12 +116,11 @@ impl Server {
if let Some(p) = self.peers.get_connected_peer(addr) {
// if we're already connected to the addr, just return the peer
trace!(LOGGER, "connect_peer: already connected {}", addr);
trace!("connect_peer: already connected {}", addr);
return Ok(p);
}
trace!(
LOGGER,
"connect_peer: on {}:{}. connecting to {}",
self.config.host,
self.config.port,
@ -179,12 +146,8 @@ impl Server {
}
Err(e) => {
debug!(
LOGGER,
"connect_peer: on {}:{}. Could not connect to {}: {:?}",
self.config.host,
self.config.port,
addr,
e
self.config.host, self.config.port, addr, e
);
Err(Error::Connection(e))
}
@ -211,9 +174,9 @@ impl Server {
// peer has been banned, go away!
if let Ok(peer_addr) = stream.peer_addr() {
if self.peers.is_banned(peer_addr) {
debug!(LOGGER, "Peer {} banned, refusing connection.", peer_addr);
debug!("Peer {} banned, refusing connection.", peer_addr);
if let Err(e) = stream.shutdown(Shutdown::Both) {
debug!(LOGGER, "Error shutting down conn: {:?}", e);
debug!("Error shutting down conn: {:?}", e);
}
return true;
}

View file

@ -26,7 +26,6 @@ use core::ser::{self, Readable, Reader, Writeable, Writer};
use grin_store::{self, option_to_not_found, to_key, Error};
use msg::SockAddr;
use types::{Capabilities, ReasonForBan};
use util::LOGGER;
const STORE_SUBPATH: &'static str = "peers";
@ -58,6 +57,8 @@ pub struct PeerData {
pub last_banned: i64,
/// The reason for the ban
pub ban_reason: ReasonForBan,
/// Time when we last connected to this peer.
pub last_connected: i64,
}
impl Writeable for PeerData {
@ -69,7 +70,8 @@ impl Writeable for PeerData {
[write_bytes, &self.user_agent],
[write_u8, self.flags as u8],
[write_i64, self.last_banned],
[write_i32, self.ban_reason as i32]
[write_i32, self.ban_reason as i32],
[write_i64, self.last_connected]
);
Ok(())
}
@ -80,18 +82,27 @@ impl Readable for PeerData {
let addr = SockAddr::read(reader)?;
let (capab, ua, fl, lb, br) =
ser_multiread!(reader, read_u32, read_vec, read_u8, read_i64, read_i32);
let lc = reader.read_i64();
// this only works because each PeerData is read in its own vector and this
// is the last data element
let last_connected = if let Err(_) = lc {
Utc::now().timestamp()
} else {
lc.unwrap()
};
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
let last_banned = lb;
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
match State::from_u8(fl) {
Some(flags) => Ok(PeerData {
addr: addr.0,
capabilities: capabilities,
user_agent: user_agent,
capabilities,
user_agent,
flags: flags,
last_banned: last_banned,
ban_reason: ban_reason,
last_banned: lb,
ban_reason,
last_connected,
}),
None => Err(ser::Error::CorruptedData),
}
@ -111,7 +122,7 @@ impl PeerStore {
}
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!(LOGGER, "save_peer: {:?} marked {:?}", p.addr, p.flags);
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?;
@ -172,6 +183,33 @@ impl PeerStore {
batch.put_ser(&peer_key(peer.addr)[..], &peer)?;
batch.commit()
}
/// Deletes peers from the storage that satisfy some condition `predicate`
pub fn delete_peers<F>(&self, predicate: F) -> Result<(), Error>
where
F: Fn(&PeerData) -> bool,
{
let mut to_remove = vec![];
for x in self.all_peers() {
if predicate(&x) {
to_remove.push(x)
}
}
// Delete peers in single batch
if !to_remove.is_empty() {
let batch = self.db.batch()?;
for peer in to_remove {
batch.delete(&peer_key(peer.addr)[..])?;
}
batch.commit()?;
}
Ok(())
}
}
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {

View file

@ -17,7 +17,8 @@ use std::fs::File;
use std::io;
use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chrono::prelude::*;
@ -131,7 +132,7 @@ impl Default for P2PConfig {
P2PConfig {
host: ipaddr,
port: 13414,
capabilities: Capabilities::FAST_SYNC_NODE,
capabilities: Capabilities::FULL_NODE,
seeding_type: Seeding::default(),
seeds: None,
peers_allow: None,
@ -192,26 +193,27 @@ impl Default for Seeding {
}
bitflags! {
/// Options for what type of interaction a peer supports
#[derive(Serialize, Deserialize)]
pub struct Capabilities: u32 {
/// We don't know (yet) what the peer can do.
const UNKNOWN = 0b00000000;
/// Full archival node, has the whole history without any pruning.
const FULL_HIST = 0b00000001;
/// Can provide block headers and the TxHashSet for some recent-enough
/// height.
const TXHASHSET_HIST = 0b00000010;
/// Can provide a list of healthy peers
const PEER_LIST = 0b00000100;
/// Options for what type of interaction a peer supports
#[derive(Serialize, Deserialize)]
pub struct Capabilities: u32 {
/// We don't know (yet) what the peer can do.
const UNKNOWN = 0b00000000;
/// Can provide full history of headers back to genesis
/// (for at least one arbitrary fork).
const HEADER_HIST = 0b00000001;
/// Can provide block headers and the TxHashSet for some recent-enough
/// height.
const TXHASHSET_HIST = 0b00000010;
/// Can provide a list of healthy peers
const PEER_LIST = 0b00000100;
const FAST_SYNC_NODE = Capabilities::TXHASHSET_HIST.bits
| Capabilities::PEER_LIST.bits;
const FULL_NODE = Capabilities::FULL_HIST.bits
| Capabilities::TXHASHSET_HIST.bits
| Capabilities::PEER_LIST.bits;
}
/// All nodes right now are "full nodes".
/// Some nodes internally may maintain longer block histories (archival_mode)
/// but we do not advertise this to other nodes.
const FULL_NODE = Capabilities::HEADER_HIST.bits
| Capabilities::TXHASHSET_HIST.bits
| Capabilities::PEER_LIST.bits;
}
}
/// Types of connection
@ -258,23 +260,23 @@ pub struct PeerInfo {
impl PeerInfo {
/// The current total_difficulty of the peer.
pub fn total_difficulty(&self) -> Difficulty {
self.live_info.read().unwrap().total_difficulty
self.live_info.read().total_difficulty
}
/// The current height of the peer.
pub fn height(&self) -> u64 {
self.live_info.read().unwrap().height
self.live_info.read().height
}
/// Time of last_seen for this peer (via ping/pong).
pub fn last_seen(&self) -> DateTime<Utc> {
self.live_info.read().unwrap().last_seen
self.live_info.read().last_seen
}
/// Update the total_difficulty, height and last_seen of the peer.
/// Takes a write lock on the live_info.
pub fn update(&self, height: u64, total_difficulty: Difficulty) {
let mut live_info = self.live_info.write().unwrap();
let mut live_info = self.live_info.write();
if total_difficulty != live_info.total_difficulty {
live_info.stuck_detector = Utc::now();
}

View file

@ -58,8 +58,6 @@ fn peer_handshake() {
net_adapter.clone(),
Hash::from_vec(&vec![]),
Arc::new(AtomicBool::new(false)),
false,
None,
).unwrap(),
);

View file

@ -10,7 +10,7 @@ blake2-rfc = "0.2"
rand = "0.5"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = "0.4.4"
grin_core = { path = "../core" }

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
mod pool;

View file

@ -16,7 +16,8 @@
//! Used for both the txpool and stempool layers in the pool.
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use core::consensus;
use core::core::hash::{Hash, Hashed};
@ -25,7 +26,6 @@ use core::core::transaction;
use core::core::verifier_cache::VerifierCache;
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
use util::LOGGER;
// max weight leaving minimum space for a coinbase
const MAX_MINEABLE_WEIGHT: usize =
@ -48,8 +48,8 @@ impl Pool {
) -> Pool {
Pool {
entries: vec![],
blockchain: chain.clone(),
verifier_cache: verifier_cache.clone(),
blockchain: chain,
verifier_cache,
name,
}
}
@ -74,34 +74,34 @@ impl Pool {
&self,
hash: Hash,
nonce: u64,
kern_ids: &Vec<ShortId>,
kern_ids: &[ShortId],
) -> (Vec<Transaction>, Vec<ShortId>) {
let mut rehashed = HashMap::new();
let mut txs = vec![];
let mut found_ids = vec![];
// Rehash all entries in the pool using short_ids based on provided hash and nonce.
for x in &self.entries {
'outer: for x in &self.entries {
for k in x.tx.kernels() {
// rehash each kernel to calculate the block specific short_id
let short_id = k.short_id(&hash, nonce);
rehashed.insert(short_id, x.tx.hash());
if kern_ids.contains(&short_id) {
txs.push(x.tx.clone());
found_ids.push(short_id);
}
if found_ids.len() == kern_ids.len() {
break 'outer;
}
}
}
// Retrive the txs from the pool by the set of unique hashes.
let hashes: HashSet<_> = rehashed.values().collect();
let txs = hashes.into_iter().filter_map(|x| self.get_tx(*x)).collect();
// Calculate the missing ids based on the ids passed in
// and the ids that successfully matched txs.
let matched_ids: HashSet<_> = rehashed.keys().collect();
let all_ids: HashSet<_> = kern_ids.iter().collect();
let missing_ids = all_ids
.difference(&matched_ids)
.map(|x| *x)
.cloned()
.collect();
(txs, missing_ids)
txs.dedup();
(
txs,
kern_ids
.into_iter()
.filter(|id| !found_ids.contains(id))
.cloned()
.collect(),
)
}
/// Take pool transactions, filtering and ordering them in a way that's
@ -133,8 +133,7 @@ impl Pool {
// Iteratively apply the txs to the current chain state,
// rejecting any that do not result in a valid state.
// Return a vec of all the valid txs.
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
let txs = self.validate_raw_txs(flat_txs, None, &header, &block_sums)?;
let txs = self.validate_raw_txs(flat_txs, None, &header)?;
Ok(txs)
}
@ -159,8 +158,7 @@ impl Pool {
extra_tx: Option<Transaction>,
header: &BlockHeader,
) -> Result<Vec<Transaction>, PoolError> {
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
let valid_txs = self.validate_raw_txs(txs, extra_tx, header, &block_sums)?;
let valid_txs = self.validate_raw_txs(txs, extra_tx, header)?;
Ok(valid_txs)
}
@ -173,10 +171,10 @@ impl Pool {
}
// Transition the specified pool entries to the new state.
pub fn transition_to_state(&mut self, txs: &Vec<Transaction>, state: PoolEntryState) {
for x in self.entries.iter_mut() {
pub fn transition_to_state(&mut self, txs: &[Transaction], state: PoolEntryState) {
for x in &mut self.entries {
if txs.contains(&x.tx) {
x.state = state.clone();
x.state = state;
}
}
}
@ -190,18 +188,6 @@ impl Pool {
extra_txs: Vec<Transaction>,
header: &BlockHeader,
) -> Result<(), PoolError> {
debug!(
LOGGER,
"pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})",
self.name,
entry.tx.hash(),
entry.src,
entry.tx.inputs().len(),
entry.tx.outputs().len(),
entry.tx.kernels().len(),
header.hash(),
);
// Combine all the txs from the pool with any extra txs provided.
let mut txs = self.all_transactions();
@ -228,6 +214,17 @@ impl Pool {
// Validate aggregated tx against a known chain state.
self.validate_raw_tx(&agg_tx, header)?;
debug!(
"add_to_pool [{}]: {} ({}), in/out/kern: {}/{}/{}, pool: {} (at block {})",
self.name,
entry.tx.hash(),
entry.src.debug_name,
entry.tx.inputs().len(),
entry.tx.outputs().len(),
entry.tx.kernels().len(),
self.size(),
header.hash(),
);
// If we get here successfully then we can safely add the entry to the pool.
self.entries.push(entry);
@ -239,8 +236,14 @@ impl Pool {
tx: &Transaction,
header: &BlockHeader,
) -> Result<BlockSums, PoolError> {
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
let new_sums = self.apply_txs_to_block_sums(&block_sums, vec![tx.clone()], header)?;
tx.validate(self.verifier_cache.clone())?;
// Validate the tx against current chain state.
// Check all inputs are in the current UTXO set.
// Check all outputs are unique in current UTXO set.
self.blockchain.validate_tx(tx)?;
let new_sums = self.apply_tx_to_block_sums(tx, header)?;
Ok(new_sums)
}
@ -249,7 +252,6 @@ impl Pool {
txs: Vec<Transaction>,
extra_tx: Option<Transaction>,
header: &BlockHeader,
block_sums: &BlockSums,
) -> Result<Vec<Transaction>, PoolError> {
let mut valid_txs = vec![];
@ -260,10 +262,12 @@ impl Pool {
};
candidate_txs.extend(valid_txs.clone());
candidate_txs.push(tx.clone());
if self
.apply_txs_to_block_sums(&block_sums, candidate_txs, header)
.is_ok()
{
// Build a single aggregate tx from candidate txs.
let agg_tx = transaction::aggregate(candidate_txs)?;
// We know the tx is valid if the entire aggregate tx is valid.
if self.validate_raw_tx(&agg_tx, header).is_ok() {
valid_txs.push(tx);
}
}
@ -271,28 +275,20 @@ impl Pool {
Ok(valid_txs)
}
fn apply_txs_to_block_sums(
fn apply_tx_to_block_sums(
&self,
block_sums: &BlockSums,
txs: Vec<Transaction>,
tx: &Transaction,
header: &BlockHeader,
) -> Result<BlockSums, PoolError> {
// Build a single aggregate tx and validate it.
let tx = transaction::aggregate(txs)?;
tx.validate(self.verifier_cache.clone())?;
// Validate the tx against current chain state.
// Check all inputs are in the current UTXO set.
// Check all outputs are unique in current UTXO set.
self.blockchain.validate_tx(&tx)?;
let overage = tx.overage();
let offset = (header.total_kernel_offset() + tx.offset)?;
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
// Verify the kernel sums for the block_sums with the new tx applied,
// accounting for overage and offset.
let (utxo_sum, kernel_sum) =
(block_sums.clone(), &tx as &Committed).verify_kernel_sums(overage, offset)?;
(block_sums, tx as &Committed).verify_kernel_sums(overage, offset)?;
Ok(BlockSums {
utxo_sum,
@ -314,7 +310,7 @@ impl Pool {
}
for x in existing_entries {
let _ = self.add_to_pool(x.clone(), extra_txs.clone(), header);
let _ = self.add_to_pool(x, extra_txs.clone(), header);
}
Ok(())
@ -355,20 +351,7 @@ impl Pool {
tx_buckets
}
// Filter txs in the pool based on the latest block.
// Reject any txs where we see a matching tx kernel in the block.
// Also reject any txs where we see a conflicting tx,
// where an input is spent in a different tx.
fn remaining_transactions(&self, block: &Block) -> Vec<Transaction> {
self.entries
.iter()
.filter(|x| !x.tx.kernels().iter().any(|y| block.kernels().contains(y)))
.filter(|x| !x.tx.inputs().iter().any(|y| block.inputs().contains(y)))
.map(|x| x.tx.clone())
.collect()
}
pub fn find_matching_transactions(&self, kernels: Vec<TxKernel>) -> Vec<Transaction> {
pub fn find_matching_transactions(&self, kernels: &[TxKernel]) -> Vec<Transaction> {
// While the inputs outputs can be cut-through the kernel will stay intact
// In order to deaggregate tx we look for tx with the same kernel
let mut found_txs = vec![];
@ -378,7 +361,7 @@ impl Pool {
// Check each transaction in the pool
for entry in &self.entries {
let entry_kernel_set = entry.tx.kernels().iter().cloned().collect::<HashSet<_>>();
let entry_kernel_set = entry.tx.kernels().iter().collect::<HashSet<_>>();
if entry_kernel_set.is_subset(&kernel_set) {
found_txs.push(entry.tx.clone());
}
@ -388,10 +371,15 @@ impl Pool {
/// Quick reconciliation step - we can evict any txs in the pool where
/// inputs or kernels intersect with the block.
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
let candidate_txs = self.remaining_transactions(block);
self.entries.retain(|x| candidate_txs.contains(&x.tx));
Ok(())
pub fn reconcile_block(&mut self, block: &Block) {
// Filter txs in the pool based on the latest block.
// Reject any txs where we see a matching tx kernel in the block.
// Also reject any txs where we see a conflicting tx,
// where an input is spent in a different tx.
self.entries.retain(|x| {
!x.tx.kernels().iter().any(|y| block.kernels().contains(y))
&& !x.tx.inputs().iter().any(|y| block.inputs().contains(y))
});
}
pub fn size(&self) -> usize {

View file

@ -17,7 +17,9 @@
//! resulting tx pool can be added to the current chain state to produce a
//! valid chain state.
use std::sync::{Arc, RwLock};
use std::collections::VecDeque;
use std::sync::Arc;
use util::RwLock;
use chrono::prelude::Utc;
@ -28,6 +30,9 @@ use core::core::{transaction, Block, BlockHeader, Transaction};
use pool::Pool;
use types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource};
// Cache this many txs to handle a potential fork and re-org.
const REORG_CACHE_SIZE: usize = 100;
/// Transaction pool implementation.
pub struct TransactionPool {
/// Pool Config
@ -36,6 +41,8 @@ pub struct TransactionPool {
pub txpool: Pool,
/// Our Dandelion "stempool".
pub stempool: Pool,
/// Cache of previous txs in case of a re-org.
pub reorg_cache: Arc<RwLock<VecDeque<PoolEntry>>>,
/// The blockchain
pub blockchain: Arc<BlockChain>,
pub verifier_cache: Arc<RwLock<VerifierCache>>,
@ -53,8 +60,13 @@ impl TransactionPool {
) -> TransactionPool {
TransactionPool {
config,
txpool: Pool::new(chain.clone(), verifier_cache.clone(), format!("txpool")),
stempool: Pool::new(chain.clone(), verifier_cache.clone(), format!("stempool")),
txpool: Pool::new(chain.clone(), verifier_cache.clone(), "txpool".to_string()),
stempool: Pool::new(
chain.clone(),
verifier_cache.clone(),
"stempool".to_string(),
),
reorg_cache: Arc::new(RwLock::new(VecDeque::new())),
blockchain: chain,
verifier_cache,
adapter,
@ -68,13 +80,23 @@ impl TransactionPool {
fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
// Add tx to stempool (passing in all txs from txpool to validate against).
self.stempool
.add_to_pool(entry.clone(), self.txpool.all_transactions(), header)?;
.add_to_pool(entry, self.txpool.all_transactions(), header)?;
// Note: we do not notify the adapter here,
// we let the dandelion monitor handle this.
Ok(())
}
fn add_to_reorg_cache(&mut self, entry: PoolEntry) -> Result<(), PoolError> {
let mut cache = self.reorg_cache.write();
cache.push_back(entry);
if cache.len() > REORG_CACHE_SIZE {
cache.pop_front();
}
debug!("added tx to reorg_cache: size now {}", cache.len());
Ok(())
}
fn add_to_txpool(
&mut self,
mut entry: PoolEntry,
@ -82,9 +104,7 @@ impl TransactionPool {
) -> Result<(), PoolError> {
// First deaggregate the tx based on current txpool txs.
if entry.tx.kernels().len() > 1 {
let txs = self
.txpool
.find_matching_transactions(entry.tx.kernels().clone());
let txs = self.txpool.find_matching_transactions(entry.tx.kernels());
if !txs.is_empty() {
let tx = transaction::deaggregate(entry.tx, txs)?;
tx.validate(self.verifier_cache.clone())?;
@ -96,8 +116,10 @@ impl TransactionPool {
// We now need to reconcile the stempool based on the new state of the txpool.
// Some stempool txs may no longer be valid and we need to evict them.
let txpool_tx = self.txpool.aggregate_transaction()?;
self.stempool.reconcile(txpool_tx, header)?;
{
let txpool_tx = self.txpool.aggregate_transaction()?;
self.stempool.reconcile(txpool_tx, header)?;
}
self.adapter.tx_accepted(&entry.tx);
Ok(())
@ -123,7 +145,7 @@ impl TransactionPool {
// Make sure the transaction is valid before anything else.
tx.validate(self.verifier_cache.clone())
.map_err(|e| PoolError::InvalidTx(e))?;
.map_err(PoolError::InvalidTx)?;
// Check the tx lock_time is valid based on current chain state.
self.blockchain.verify_tx_lock_height(&tx)?;
@ -135,28 +157,46 @@ impl TransactionPool {
state: PoolEntryState::Fresh,
src,
tx_at: Utc::now(),
tx: tx.clone(),
tx,
};
if stem {
// TODO - what happens to txs in the stempool in a re-org scenario?
self.add_to_stempool(entry, header)?;
} else {
self.add_to_txpool(entry, header)?;
self.add_to_txpool(entry.clone(), header)?;
self.add_to_reorg_cache(entry)?;
}
Ok(())
}
fn reconcile_reorg_cache(&mut self, header: &BlockHeader) -> Result<(), PoolError> {
let entries = self.reorg_cache.read().iter().cloned().collect::<Vec<_>>();
debug!("reconcile_reorg_cache: size: {} ...", entries.len());
for entry in entries {
let _ = &self.add_to_txpool(entry.clone(), header);
}
debug!("reconcile_reorg_cache: ... done.");
Ok(())
}
/// Reconcile the transaction pool (both txpool and stempool) against the
/// provided block.
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
// First reconcile the txpool.
self.txpool.reconcile_block(block)?;
self.txpool.reconcile_block(block);
self.txpool.reconcile(None, &block.header)?;
// Then reconcile the stempool, accounting for the txpool txs.
let txpool_tx = self.txpool.aggregate_transaction()?;
self.stempool.reconcile_block(block)?;
self.stempool.reconcile(txpool_tx, &block.header)?;
// Take our "reorg_cache" and see if this block means
// we need to (re)add old txs due to a fork and re-org.
self.reconcile_reorg_cache(&block.header)?;
// Now reconcile our stempool, accounting for the updated txpool txs.
self.stempool.reconcile_block(block);
{
let txpool_tx = self.txpool.aggregate_transaction()?;
self.stempool.reconcile(txpool_tx, &block.header)?;
}
Ok(())
}
@ -168,7 +208,7 @@ impl TransactionPool {
&self,
hash: Hash,
nonce: u64,
kern_ids: &Vec<ShortId>,
kern_ids: &[ShortId],
) -> (Vec<Transaction>, Vec<ShortId>) {
self.txpool.retrieve_transactions(hash, nonce, kern_ids)
}

View file

@ -18,6 +18,7 @@
use chrono::prelude::{DateTime, Utc};
use core::consensus;
use core::core::block;
use core::core::committed;
use core::core::hash::Hash;
use core::core::transaction::{self, Transaction};
@ -128,7 +129,7 @@ pub struct PoolEntry {
}
/// The possible states a pool entry can be in.
#[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PoolEntryState {
/// A new entry, not yet processed.
Fresh,
@ -163,6 +164,8 @@ pub struct TxSource {
pub enum PoolError {
/// An invalid pool entry caused by underlying tx validation error
InvalidTx(transaction::Error),
/// An invalid pool entry caused by underlying block validation error
InvalidBlock(block::Error),
/// Underlying keychain error.
Keychain(keychain::Error),
/// Underlying "committed" error.
@ -192,6 +195,12 @@ impl From<transaction::Error> for PoolError {
}
}
impl From<block::Error> for PoolError {
fn from(e: block::Error) -> PoolError {
PoolError::InvalidBlock(e)
}
}
impl From<keychain::Error> for PoolError {
fn from(e: keychain::Error) -> PoolError {
PoolError::Keychain(e)

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use core::core::verifier_cache::LruVerifierCache;
use core::core::{Block, BlockHeader, Transaction};
@ -80,7 +81,7 @@ fn test_transaction_pool_block_building() {
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
// Add the three root txs to the pool.
write_pool
@ -105,7 +106,7 @@ fn test_transaction_pool_block_building() {
}
let txs = {
let read_pool = pool.read().unwrap();
let read_pool = pool.read();
read_pool.prepare_mineable_transactions().unwrap()
};
// children should have been aggregated into parents
@ -123,7 +124,7 @@ fn test_transaction_pool_block_building() {
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 0);

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use core::core::{Block, BlockHeader};
@ -127,7 +128,7 @@ fn test_transaction_pool_block_reconciliation() {
// First we add the above transactions to the pool.
// All should be accepted.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0);
for tx in &txs_to_add {
@ -165,13 +166,13 @@ fn test_transaction_pool_block_reconciliation() {
// Check the pool still contains everything we expect at this point.
{
let write_pool = pool.write().unwrap();
let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// And reconcile the pool with this latest block.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 4);

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use common::*;
use core::core::hash::Hash;
@ -83,7 +84,7 @@ fn test_coinbase_maturity() {
let pool = RwLock::new(test_setup(chain, verifier_cache));
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![50], vec![49]);
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
Err(PoolError::ImmatureCoinbase) => {}

View file

@ -28,7 +28,8 @@ extern crate rand;
use std::collections::HashSet;
use std::fs;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use core::core::hash::{Hash, Hashed};
use core::core::verifier_cache::VerifierCache;
@ -98,7 +99,7 @@ impl ChainAdapter {
batch.commit().unwrap();
{
let mut utxo = self.utxo.write().unwrap();
let mut utxo = self.utxo.write();
for x in block.inputs() {
utxo.remove(&x.commitment());
}
@ -129,7 +130,7 @@ impl BlockChain for ChainAdapter {
}
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
let utxo = self.utxo.read().unwrap();
let utxo = self.utxo.read();
for x in tx.outputs() {
if utxo.contains(&x.commitment()) {

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use common::*;
use core::core::verifier_cache::LruVerifierCache;
@ -72,7 +73,7 @@ fn test_the_transaction_pool() {
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
@ -86,7 +87,7 @@ fn test_the_transaction_pool() {
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
@ -110,7 +111,7 @@ fn test_the_transaction_pool() {
// This will fail during tx aggregation due to duplicate outputs and duplicate
// kernels.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
assert!(
write_pool
.add_to_pool(test_source(), tx1.clone(), true, &header)
@ -122,7 +123,7 @@ fn test_the_transaction_pool() {
// tx).
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
assert!(
write_pool
.add_to_pool(test_source(), tx1a, true, &header)
@ -133,7 +134,7 @@ fn test_the_transaction_pool() {
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
assert!(
write_pool
.add_to_pool(test_source(), bad_tx, true, &header)
@ -147,7 +148,7 @@ fn test_the_transaction_pool() {
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
assert!(
write_pool
.add_to_pool(test_source(), tx, true, &header)
@ -157,7 +158,7 @@ fn test_the_transaction_pool() {
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(
write_pool
@ -171,7 +172,7 @@ fn test_the_transaction_pool() {
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.aggregate_transaction()
@ -189,7 +190,7 @@ fn test_the_transaction_pool() {
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
@ -210,7 +211,7 @@ fn test_the_transaction_pool() {
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let mut write_pool = pool.write().unwrap();
let mut write_pool = pool.write();
let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };

View file

@ -13,8 +13,8 @@ hyper-staticfile = "0.3"
itertools = "0.7"
lmdb-zero = "0.4.4"
rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
serde = "1"
log = "0.4"
serde_derive = "1"
serde_json = "1"
chrono = "0.4.4"

View file

@ -17,9 +17,10 @@
use std::fs::File;
use std::net::SocketAddr;
use std::sync::{Arc, RwLock, Weak};
use std::sync::{Arc, Weak};
use std::thread;
use std::time::Instant;
use util::RwLock;
use chain::{self, ChainAdapter, Options};
use chrono::prelude::{DateTime, Utc};
@ -34,7 +35,7 @@ use p2p;
use pool;
use rand::prelude::*;
use store;
use util::{OneTime, LOGGER};
use util::OneTime;
/// Implementation of the NetAdapter for the . Gets notified when new
/// blocks and transactions are received and forwards to the chain and pool
@ -73,7 +74,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let header = self.chain().head_header().unwrap();
debug!(
LOGGER,
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
tx_hash,
tx.inputs().len(),
@ -82,18 +82,17 @@ impl p2p::ChainAdapter for NetToChainAdapter {
);
let res = {
let mut tx_pool = self.tx_pool.write().unwrap();
let mut tx_pool = self.tx_pool.write();
tx_pool.add_to_pool(source, tx, stem, &header)
};
if let Err(e) = res {
debug!(LOGGER, "Transaction {} rejected: {:?}", tx_hash, e);
debug!("Transaction {} rejected: {:?}", tx_hash, e);
}
}
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
debug!(
LOGGER,
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
b.hash(),
b.header.height,
@ -108,7 +107,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = cb.hash();
debug!(
LOGGER,
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
bhash,
cb.header.height,
@ -124,7 +122,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
match core::Block::hydrate_from(cb, vec![]) {
Ok(block) => self.process_block(block, addr),
Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e);
debug!("Invalid hydrated block {}: {}", cb_hash, e);
return false;
}
}
@ -134,17 +132,16 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain()
.process_block_header(&cb.header, self.chain_opts())
{
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e);
debug!("Invalid compact block header {}: {}", cb_hash, e);
return !e.is_bad_data();
}
let (txs, missing_short_ids) = {
let tx_pool = self.tx_pool.read().unwrap();
let tx_pool = self.tx_pool.read();
tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids())
};
debug!(
LOGGER,
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
txs.len(),
missing_short_ids.len(),
@ -158,7 +155,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let block = match core::Block::hydrate_from(cb.clone(), txs) {
Ok(block) => block,
Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb.hash(), e);
debug!("Invalid hydrated block {}: {}", cb.hash(), e);
return false;
}
};
@ -168,29 +165,22 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok()
{
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
debug!("adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr)
} else {
if self.sync_state.status() == SyncStatus::NoSync {
debug!(
LOGGER,
"adapter: block invalid after hydration, requesting full block"
);
debug!("adapter: block invalid after hydration, requesting full block");
self.request_block(&cb.header, &addr);
true
} else {
debug!(
LOGGER,
"adapter: block invalid after hydration, ignoring it, cause still syncing"
);
true
}
}
} else {
debug!(
LOGGER,
"adapter: failed to retrieve previous block header (still syncing?)"
);
debug!("adapter: failed to retrieve previous block header (still syncing?)");
true
}
}
@ -199,8 +189,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
let bhash = bh.hash();
debug!(
LOGGER,
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
"Received block header {} at {} from {}, going to process.",
bhash, bh.height, addr,
);
// pushing the new block header through the header chain pipeline
@ -208,16 +198,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let res = self.chain().process_block_header(&bh, self.chain_opts());
if let &Err(ref e) = &res {
debug!(
LOGGER,
"Block header {} refused by chain: {:?}",
bhash,
e.kind()
);
debug!("Block header {} refused by chain: {:?}", bhash, e.kind());
if e.is_bad_data() {
debug!(
LOGGER,
"header_received: {} is a bad header, resetting header head", bhash
"header_received: {} is a bad header, resetting header head",
bhash
);
let _ = self.chain().reset_head();
return false;
@ -238,7 +223,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
info!(
LOGGER,
"Received block headers {:?} from {}",
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
addr,
@ -251,7 +235,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
// try to add headers to our header chain
let res = self.chain().sync_block_headers(&bhs, self.chain_opts());
if let &Err(ref e) = &res {
debug!(LOGGER, "Block headers refused by chain: {:?}", e);
debug!("Block headers refused by chain: {:?}", e);
if e.is_bad_data() {
return false;
@ -261,14 +245,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
debug!(LOGGER, "locate_headers: {:?}", locator,);
debug!("locate_headers: {:?}", locator,);
let header = match self.find_common_header(locator) {
Some(header) => header,
None => return vec![],
};
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
debug!("locate_headers: common header: {:?}", header.hash(),);
// looks like we know one, getting as many following headers as allowed
let hh = header.height;
@ -280,18 +264,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
Err(e) => match e.kind() {
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
_ => {
error!(LOGGER, "Could not build header locator: {:?}", e);
error!("Could not build header locator: {:?}", e);
return vec![];
}
},
}
}
debug!(
LOGGER,
"locate_headers: returning headers: {}",
headers.len(),
);
debug!("locate_headers: returning headers: {}", headers.len(),);
headers
}
@ -316,10 +296,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
reader: read,
}),
Err(e) => {
warn!(
LOGGER,
"Couldn't produce txhashset data for block {}: {:?}", h, e
);
warn!("Couldn't produce txhashset data for block {}: {:?}", h, e);
None
}
}
@ -366,12 +343,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain()
.txhashset_write(h, txhashset_data, self.sync_state.as_ref())
{
error!(LOGGER, "Failed to save txhashset archive: {}", e);
error!("Failed to save txhashset archive: {}", e);
let is_good_data = !e.is_bad_data();
self.sync_state.set_sync_error(types::Error::Chain(e));
is_good_data
} else {
info!(LOGGER, "Received valid txhashset data for {}.", h);
info!("Received valid txhashset data for {}.", h);
true
}
}
@ -446,7 +423,7 @@ impl NetToChainAdapter {
self.find_common_header(locator[1..].to_vec())
}
_ => {
error!(LOGGER, "Could not build header locator: {:?}", e);
error!("Could not build header locator: {:?}", e);
None
}
},
@ -456,14 +433,11 @@ impl NetToChainAdapter {
// pushing the new block through the chain pipeline
// remembering to reset the head if we have a bad block
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
if !self.archive_mode {
// We cannot process blocks earlier than the horizon so check for this here.
{
let head = self.chain().head().unwrap();
// we have a fast sync'd node and are sent a block older than our horizon,
// only sync can do something with that
if b.header.height < head
.height
.saturating_sub(global::cut_through_horizon() as u64)
{
let horizon = head.height.saturating_sub(global::cut_through_horizon() as u64);
if b.header.height < horizon {
return true;
}
}
@ -478,8 +452,8 @@ impl NetToChainAdapter {
}
Err(ref e) if e.is_bad_data() => {
debug!(
LOGGER,
"adapter: process_block: {} is a bad block, resetting head", bhash
"adapter: process_block: {} is a bad block, resetting head",
bhash
);
let _ = self.chain().reset_head();
@ -494,14 +468,13 @@ impl NetToChainAdapter {
chain::ErrorKind::Orphan => {
// make sure we did not miss the parent block
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
debug!("adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
self.request_block_by_hash(prev_hash, &addr)
}
true
}
_ => {
debug!(
LOGGER,
"adapter: process_block: block {} refused by chain: {}",
bhash,
e.kind()
@ -526,8 +499,8 @@ impl NetToChainAdapter {
let now = Instant::now();
debug!(
LOGGER,
"adapter: process_block: ***** validating full chain state at {}", bhash,
"adapter: process_block: ***** validating full chain state at {}",
bhash,
);
self.chain()
@ -535,7 +508,6 @@ impl NetToChainAdapter {
.expect("chain validation failed, hard stop");
debug!(
LOGGER,
"adapter: process_block: ***** done validating full chain state, took {}s",
now.elapsed().as_secs(),
);
@ -557,7 +529,7 @@ impl NetToChainAdapter {
.name("compactor".to_string())
.spawn(move || {
if let Err(e) = chain.compact() {
error!(LOGGER, "Could not compact chain: {:?}", e);
error!("Could not compact chain: {:?}", e);
}
});
}
@ -591,23 +563,19 @@ impl NetToChainAdapter {
match self.chain().block_exists(h) {
Ok(false) => match self.peers().get_connected_peer(addr) {
None => debug!(
LOGGER,
"send_block_request_to_peer: can't send request to peer {:?}, not connected",
addr
),
Some(peer) => {
if let Err(e) = f(&peer, h) {
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e)
error!("send_block_request_to_peer: failed: {:?}", e)
}
}
},
Ok(true) => debug!(
LOGGER,
"send_block_request_to_peer: block {} already known", h
),
Ok(true) => debug!("send_block_request_to_peer: block {} already known", h),
Err(e) => error!(
LOGGER,
"send_block_request_to_peer: failed to check block exists: {:?}", e
"send_block_request_to_peer: failed to check block exists: {:?}",
e
),
}
}
@ -638,11 +606,10 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
return;
}
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
debug!("adapter: block_accepted: {:?}", b.hash());
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
if let Err(e) = self.tx_pool.write().reconcile_block(b) {
error!(
LOGGER,
"Pool could not update itself at block {}: {:?}",
b.hash(),
e,

View file

@ -15,11 +15,11 @@
//! Server stat collection types, to be used by tests, logging or GUI/TUI
//! to collect information about server status
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::time::SystemTime;
use util::RwLock;
use core::pow::Difficulty;
use core::consensus::graph_weight;
use chrono::prelude::*;
@ -31,8 +31,6 @@ use p2p;
/// and populated when required
#[derive(Clone)]
pub struct ServerStateInfo {
/// whether we're in a state of waiting for peers at startup
pub awaiting_peers: Arc<AtomicBool>,
/// Stratum stats
pub stratum_stats: Arc<RwLock<StratumStats>>,
}
@ -40,7 +38,6 @@ pub struct ServerStateInfo {
impl Default for ServerStateInfo {
fn default() -> ServerStateInfo {
ServerStateInfo {
awaiting_peers: Arc::new(AtomicBool::new(false)),
stratum_stats: Arc::new(RwLock::new(StratumStats::default())),
}
}
@ -57,8 +54,6 @@ pub struct ServerStats {
pub header_head: chain::Tip,
/// Whether we're currently syncing
pub sync_status: SyncStatus,
/// Whether we're awaiting peers
pub awaiting_peers: bool,
/// Handle to current stratum server stats
pub stratum_stats: StratumStats,
/// Peer stats
@ -163,8 +158,7 @@ pub struct PeerStats {
impl StratumStats {
/// Calculate network hashrate
pub fn network_hashrate(&self) -> f64 {
42.0 * (self.network_difficulty as f64 / Difficulty::scale(self.edge_bits as u8) as f64)
/ 60.0
42.0 * (self.network_difficulty as f64 / graph_weight(self.edge_bits as u8) as f64) / 60.0
}
}

View file

@ -14,7 +14,8 @@
//! Server types
use std::convert::From;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use api;
use chain;
@ -24,7 +25,6 @@ use core::{core, pow};
use p2p;
use pool;
use store;
use util::LOGGER;
use wallet;
/// Error type wrapping underlying module errors.
@ -44,6 +44,8 @@ pub enum Error {
Wallet(wallet::Error),
/// Error originating from the cuckoo miner
Cuckoo(pow::Error),
/// Error originating from the transaction pool.
Pool(pool::PoolError),
}
impl From<core::block::Error> for Error {
@ -87,6 +89,12 @@ impl From<wallet::Error> for Error {
}
}
impl From<pool::PoolError> for Error {
fn from(e: pool::PoolError) -> Error {
Error::Pool(e)
}
}
/// Type of seeding the server will use to find other peers on the network.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ChainValidationMode {
@ -160,28 +168,6 @@ pub struct ServerConfig {
pub stratum_mining_config: Option<StratumServerConfig>,
}
impl ServerConfig {
/// Configuration items validation check
pub fn validation_check(&mut self) {
// check [server.p2p_config.capabilities] with 'archive_mode' in [server]
if let Some(archive) = self.archive_mode {
// note: slog not available before config loaded, only print here.
if archive != self
.p2p_config
.capabilities
.contains(p2p::Capabilities::FULL_HIST)
{
// if conflict, 'archive_mode' win
self.p2p_config
.capabilities
.toggle(p2p::Capabilities::FULL_HIST);
}
}
// todo: other checks if needed
}
}
impl Default for ServerConfig {
fn default() -> ServerConfig {
ServerConfig {
@ -250,6 +236,9 @@ pub enum SyncStatus {
Initial,
/// Not syncing
NoSync,
/// Not enough peers to do anything yet, boolean indicates whether
/// we should wait at all or ignore and start ASAP
AwaitingPeers(bool),
/// Downloading block headers
HeaderSync {
current_height: u64,
@ -297,12 +286,12 @@ impl SyncState {
/// Whether the current state matches any active syncing operation.
/// Note: This includes our "initial" state.
pub fn is_syncing(&self) -> bool {
*self.current.read().unwrap() != SyncStatus::NoSync
*self.current.read() != SyncStatus::NoSync
}
/// Current syncing status
pub fn status(&self) -> SyncStatus {
*self.current.read().unwrap()
*self.current.read()
}
/// Update the syncing status
@ -311,12 +300,9 @@ impl SyncState {
return;
}
let mut status = self.current.write().unwrap();
let mut status = self.current.write();
debug!(
LOGGER,
"sync_state: sync_status: {:?} -> {:?}", *status, new_status,
);
debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
*status = new_status;
}
@ -324,7 +310,7 @@ impl SyncState {
/// Update txhashset downloading progress
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
if let SyncStatus::TxHashsetDownload { .. } = new_status {
let mut status = self.current.write().unwrap();
let mut status = self.current.write();
*status = new_status;
true
} else {
@ -334,7 +320,7 @@ impl SyncState {
/// Communicate sync error
pub fn set_sync_error(&self, error: Error) {
*self.sync_error.write().unwrap() = Some(error);
*self.sync_error.write() = Some(error);
}
/// Get sync error
@ -344,7 +330,7 @@ impl SyncState {
/// Clear sync error
pub fn clear_sync_error(&self) {
*self.sync_error.write().unwrap() = None;
*self.sync_error.write() = None;
}
}
@ -354,7 +340,7 @@ impl chain::TxHashsetWriteStatus for SyncState {
}
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
let mut status = self.current.write().unwrap();
let mut status = self.current.write();
match *status {
SyncStatus::TxHashsetValidation {
kernels,

View file

@ -15,15 +15,15 @@
use chrono::prelude::Utc;
use rand::{thread_rng, Rng};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use util::RwLock;
use core::core::hash::Hashed;
use core::core::transaction;
use core::core::verifier_cache::VerifierCache;
use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
use util::LOGGER;
/// A process to monitor transactions in the stempool.
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
@ -39,7 +39,7 @@ pub fn monitor_transactions(
verifier_cache: Arc<RwLock<VerifierCache>>,
stop: Arc<AtomicBool>,
) {
debug!(LOGGER, "Started Dandelion transaction monitor.");
debug!("Started Dandelion transaction monitor.");
let _ = thread::Builder::new()
.name("dandelion".to_string())
@ -57,26 +57,26 @@ pub fn monitor_transactions(
// Aggregate them up to give a single (valid) aggregated tx and propagate it
// to the next Dandelion relay along the stem.
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with stem phase.");
error!("dand_mon: Problem with stem phase.");
}
// Step 2: find all "ToFluff" entries in stempool from last run.
// Aggregate them up to give a single (valid) aggregated tx and (re)add it
// to our pool with stem=false (which will then broadcast it).
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with fluff phase.");
error!("dand_mon: Problem with fluff phase.");
}
// Step 3: now find all "Fresh" entries in stempool since last run.
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
// We will process these in the next run (waiting patience secs).
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
error!("dand_mon: Problem processing fresh pool entries.");
}
// Step 4: now find all expired entries based on embargo timer.
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
error!("dand_mon: Problem processing fresh pool entries.");
}
}
});
@ -86,14 +86,20 @@ fn process_stem_phase(
tx_pool: Arc<RwLock<TransactionPool>>,
verifier_cache: Arc<RwLock<VerifierCache>>,
) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap();
let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?;
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
let stem_txs = tx_pool
.stempool
.get_transactions_in_state(PoolEntryState::ToStem);
if stem_txs.is_empty() {
return Ok(());
}
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
let stem_txs = tx_pool
.stempool
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
@ -102,21 +108,14 @@ fn process_stem_phase(
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
if stem_txs.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} txs for stemming.",
stem_txs.len()
);
debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?;
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
if res.is_err() {
debug!(
LOGGER,
"dand_mon: Unable to propagate stem tx. No relay, fluffing instead."
);
debug!("dand_mon: Unable to propagate stem tx. No relay, fluffing instead.");
let src = TxSource {
debug_name: "no_relay".to_string(),
@ -133,14 +132,20 @@ fn process_fluff_phase(
tx_pool: Arc<RwLock<TransactionPool>>,
verifier_cache: Arc<RwLock<VerifierCache>>,
) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap();
let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?;
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
let stem_txs = tx_pool
.stempool
.get_transactions_in_state(PoolEntryState::ToFluff);
if stem_txs.is_empty() {
return Ok(());
}
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
let stem_txs = tx_pool
.stempool
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
@ -149,11 +154,7 @@ fn process_fluff_phase(
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
if stem_txs.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} txs for fluffing.",
stem_txs.len()
);
debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?;
@ -172,7 +173,7 @@ fn process_fresh_entries(
dandelion_config: DandelionConfig,
tx_pool: Arc<RwLock<TransactionPool>>,
) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap();
let mut tx_pool = tx_pool.write();
let mut rng = thread_rng();
@ -185,7 +186,6 @@ fn process_fresh_entries(
if fresh_entries.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} fresh entries in stempool.",
fresh_entries.len()
);
@ -212,31 +212,23 @@ fn process_expired_entries(
let mut expired_entries = vec![];
{
let tx_pool = tx_pool.read().unwrap();
let tx_pool = tx_pool.read();
for entry in tx_pool
.stempool
.entries
.iter()
.filter(|x| x.tx_at.timestamp() < cutoff)
{
debug!(
LOGGER,
"dand_mon: Embargo timer expired for {:?}",
entry.tx.hash()
);
debug!("dand_mon: Embargo timer expired for {:?}", entry.tx.hash());
expired_entries.push(entry.clone());
}
}
if expired_entries.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} expired txs.",
expired_entries.len()
);
debug!("dand_mon: Found {} expired txs.", expired_entries.len());
{
let mut tx_pool = tx_pool.write().unwrap();
let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?;
for entry in expired_entries {
@ -245,11 +237,8 @@ fn process_expired_entries(
identifier: "?.?.?.?".to_string(),
};
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
Ok(_) => debug!(
LOGGER,
"dand_mon: embargo expired, fluffed tx successfully."
),
Err(e) => debug!(LOGGER, "dand_mon: Failed to fluff expired tx - {:?}", e),
Ok(_) => debug!("dand_mon: embargo expired, fluffed tx successfully."),
Err(e) => debug!("dand_mon: Failed to fluff expired tx - {:?}", e),
};
}
}

View file

@ -12,9 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Mining plugin manager, using the cuckoo-miner crate to provide
//! a mining worker implementation
//!
//! Seeds a server with initial peers on first start and keep monitoring
//! peer counts to connect to more if neeed. Seedin strategy is
//! configurable with either no peers, a user-defined list or a preset
//! list of DNS records (the default).
use chrono::prelude::Utc;
use chrono::{Duration, MIN_DATE};
@ -27,7 +28,6 @@ use std::{cmp, io, str, thread, time};
use p2p;
use p2p::ChainAdapter;
use pool::DandelionConfig;
use util::LOGGER;
// DNS Seeds with contact email associated
const DNS_SEEDS: &'static [&'static str] = &[
@ -60,10 +60,18 @@ pub fn connect_and_monitor(
);
let mut prev = MIN_DATE.and_hms(0, 0, 0);
let mut prev_expire_check = MIN_DATE.and_hms(0, 0, 0);
let mut prev_ping = Utc::now();
let mut start_attempt = 0;
while !stop.load(Ordering::Relaxed) {
// Check for and remove expired peers from the storage
if Utc::now() - prev_expire_check > Duration::hours(1) {
peers.remove_expired();
prev_expire_check = Utc::now();
}
// make several attempts to get peers as quick as possible
// with exponential backoff
if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) {
@ -111,6 +119,7 @@ fn monitor_peers(
let mut healthy_count = 0;
let mut banned_count = 0;
let mut defuncts = vec![];
for x in peers.all_peers() {
match x.flags {
p2p::State::Banned => {
@ -119,8 +128,8 @@ fn monitor_peers(
if interval >= config.ban_window() {
peers.unban_peer(&x.addr);
debug!(
LOGGER,
"monitor_peers: unbanned {} after {} seconds", x.addr, interval
"monitor_peers: unbanned {} after {} seconds",
x.addr, interval
);
} else {
banned_count += 1;
@ -132,7 +141,6 @@ fn monitor_peers(
}
debug!(
LOGGER,
"monitor_peers: on {}:{}, {} connected ({} most_work). \
all {} = {} healthy + {} banned + {} defunct",
config.host,
@ -158,8 +166,8 @@ fn monitor_peers(
let mut connected_peers: Vec<SocketAddr> = vec![];
for p in peers.connected_peers() {
debug!(
LOGGER,
"monitor_peers: {}:{} ask {} for more peers", config.host, config.port, p.info.addr,
"monitor_peers: {}:{} ask {} for more peers",
config.host, config.port, p.info.addr,
);
let _ = p.send_peer_request(capabilities);
connected_peers.push(p.info.addr)
@ -178,7 +186,7 @@ fn monitor_peers(
}
}
}
None => debug!(LOGGER, "monitor_peers: no preferred peers"),
None => debug!("monitor_peers: no preferred peers"),
}
// take a random defunct peer and mark it healthy: over a long period any
@ -197,8 +205,8 @@ fn monitor_peers(
);
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
debug!(
LOGGER,
"monitor_peers: on {}:{}, queue to soon try {}", config.host, config.port, p.addr,
"monitor_peers: on {}:{}, queue to soon try {}",
config.host, config.port, p.addr,
);
tx.send(p.addr).unwrap();
}
@ -208,13 +216,13 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
// Dandelion Relay Updater
let dandelion_relay = peers.get_dandelion_relay();
if dandelion_relay.is_empty() {
debug!(LOGGER, "monitor_peers: no dandelion relay updating");
debug!("monitor_peers: no dandelion relay updating");
peers.update_dandelion_relay();
} else {
for last_added in dandelion_relay.keys() {
let dandelion_interval = Utc::now().timestamp() - last_added;
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
debug!(LOGGER, "monitor_peers: updating expired dandelion relay");
debug!("monitor_peers: updating expired dandelion relay");
peers.update_dandelion_relay();
}
}
@ -230,7 +238,7 @@ fn connect_to_seeds_and_preferred_peers(
peers_preferred_list: Option<Vec<SocketAddr>>,
) {
// check if we have some peers in db
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_HIST, 100);
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_NODE, 100);
// if so, get their addresses, otherwise use our seeds
let mut peer_addrs = if peers.len() > 3 {
@ -242,11 +250,11 @@ fn connect_to_seeds_and_preferred_peers(
// If we have preferred peers add them to the connection
match peers_preferred_list {
Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred),
None => debug!(LOGGER, "No preferred peers"),
None => debug!("No preferred peers"),
};
if peer_addrs.len() == 0 {
warn!(LOGGER, "No seeds were retrieved.");
warn!("No seeds were retrieved.");
}
// connect to this first set of addresses
@ -311,7 +319,7 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
let mut addresses: Vec<SocketAddr> = vec![];
for dns_seed in DNS_SEEDS {
let temp_addresses = addresses.clone();
debug!(LOGGER, "Retrieving seed nodes from dns {}", dns_seed);
debug!("Retrieving seed nodes from dns {}", dns_seed);
match (dns_seed.to_owned(), 0).to_socket_addrs() {
Ok(addrs) => addresses.append(
&mut (addrs
@ -321,13 +329,10 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
}).filter(|addr| !temp_addresses.contains(addr))
.collect()),
),
Err(e) => debug!(
LOGGER,
"Failed to resolve seed {:?} got error {:?}", dns_seed, e
),
Err(e) => debug!("Failed to resolve seed {:?} got error {:?}", dns_seed, e),
}
}
debug!(LOGGER, "Retrieved seed addresses: {:?}", addresses);
debug!("Retrieved seed addresses: {:?}", addresses);
addresses
})
}

View file

@ -18,8 +18,9 @@
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::{thread, time};
use util::RwLock;
use api;
use chain;
@ -27,8 +28,7 @@ use common::adapters::{
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
};
use common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
use common::types::{Error, ServerConfig, StratumServerConfig, SyncState};
use core::core::hash::Hashed;
use common::types::{Error, ServerConfig, StratumServerConfig, SyncState, SyncStatus};
use core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use core::{consensus, genesis, global, pow};
use grin::{dandelion_monitor, seed, sync};
@ -38,7 +38,6 @@ use p2p;
use pool;
use store;
use util::file::get_first_line;
use util::LOGGER;
/// Grin server holding internal structures.
pub struct Server {
@ -79,7 +78,7 @@ impl Server {
if let Some(s) = enable_stratum_server {
if s {
{
let mut stratum_stats = serv.state_info.stratum_stats.write().unwrap();
let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true;
}
serv.start_stratum_server(c.clone());
@ -111,18 +110,6 @@ impl Server {
Some(b) => b,
};
// If archive mode is enabled then the flags should contains the FULL_HIST flag
if archive_mode && !config
.p2p_config
.capabilities
.contains(p2p::Capabilities::FULL_HIST)
{
config
.p2p_config
.capabilities
.insert(p2p::Capabilities::FULL_HIST);
}
let stop = Arc::new(AtomicBool::new(false));
// Shared cache for verification results.
@ -155,7 +142,7 @@ impl Server {
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
};
info!(LOGGER, "Starting server, genesis block: {}", genesis.hash());
info!("Starting server, genesis block: {}", genesis.hash());
let db_env = Arc::new(store::new_env(config.db_root.clone()));
let shared_chain = Arc::new(chain::Chain::init(
@ -170,8 +157,6 @@ impl Server {
pool_adapter.set_chain(shared_chain.clone());
let awaiting_peers = Arc::new(AtomicBool::new(false));
let net_adapter = Arc::new(NetToChainAdapter::new(
sync_state.clone(),
archive_mode,
@ -181,11 +166,6 @@ impl Server {
config.clone(),
));
let block_1_hash = match shared_chain.get_header_by_height(1) {
Ok(header) => Some(header.hash()),
Err(_) => None,
};
let peer_db_env = Arc::new(store::new_named_env(config.db_root.clone(), "peer".into()));
let p2p_server = Arc::new(p2p::Server::new(
peer_db_env,
@ -194,8 +174,6 @@ impl Server {
net_adapter.clone(),
genesis.hash(),
stop.clone(),
archive_mode,
block_1_hash,
)?);
chain_adapter.init(p2p_server.peers.clone());
pool_net_adapter.init(p2p_server.peers.clone());
@ -204,10 +182,7 @@ impl Server {
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
let seeder = match config.p2p_config.seeding_type.clone() {
p2p::Seeding::None => {
warn!(
LOGGER,
"No seed configured, will stay solo until connected to"
);
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => {
@ -234,18 +209,13 @@ impl Server {
// Defaults to None (optional) in config file.
// This translates to false here so we do not skip by default.
let skip_sync_wait = match config.skip_sync_wait {
None => false,
Some(b) => b,
};
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
sync::run_sync(
sync_state.clone(),
awaiting_peers.clone(),
p2p_server.peers.clone(),
shared_chain.clone(),
skip_sync_wait,
archive_mode,
stop.clone(),
);
@ -254,7 +224,7 @@ impl Server {
.name("p2p-server".to_string())
.spawn(move || p2p_inner.listen());
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
api::start_rest_apis(
config.api_http_addr.clone(),
@ -265,10 +235,7 @@ impl Server {
None,
);
info!(
LOGGER,
"Starting dandelion monitor: {}", &config.api_http_addr
);
info!("Starting dandelion monitor: {}", &config.api_http_addr);
dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
@ -276,7 +243,7 @@ impl Server {
stop.clone(),
);
warn!(LOGGER, "Grin server started.");
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
@ -285,7 +252,6 @@ impl Server {
verifier_cache,
sync_state,
state_info: ServerStateInfo {
awaiting_peers: awaiting_peers,
..Default::default()
},
stop,
@ -335,7 +301,7 @@ impl Server {
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) {
info!(LOGGER, "start_test_miner - start",);
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
@ -388,8 +354,7 @@ impl Server {
/// other
/// consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().unwrap().clone();
let awaiting_peers = self.state_info.awaiting_peers.load(Ordering::Relaxed);
let stratum_stats = self.state_info.stratum_stats.read().clone();
// Fill out stats on our current difficulty calculation
// TODO: check the overhead of calculating this again isn't too much
@ -406,7 +371,6 @@ impl Server {
let mut last_time = last_blocks[0].timestamp;
let tip_height = self.chain.head().unwrap().height as i64;
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
let mut i = 1;
let diff_entries: Vec<DiffBlock> = last_blocks
@ -414,7 +378,7 @@ impl Server {
.skip(1)
.map(|n| {
let dur = n.timestamp - last_time;
let height = earliest_block_height + i + 1;
let height = earliest_block_height + i;
i += 1;
last_time = n.timestamp;
DiffBlock {
@ -450,7 +414,6 @@ impl Server {
head: self.head(),
header_head: self.header_head(),
sync_status: self.sync_state.status(),
awaiting_peers: awaiting_peers,
stratum_stats: stratum_stats,
peer_stats: peer_stats,
diff_stats: diff_stats,
@ -466,6 +429,6 @@ impl Server {
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) {
stop.store(true, Ordering::Relaxed);
info!(LOGGER, "stop_test_miner - stop",);
info!("stop_test_miner - stop",);
}
}

View file

@ -22,7 +22,6 @@ use common::types::{SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed, ZERO_HASH};
use core::global;
use p2p;
use util::LOGGER;
pub struct BodySync {
chain: Arc<chain::Chain>,
@ -87,14 +86,13 @@ impl BodySync {
fn body_sync(&mut self) {
let horizon = global::cut_through_horizon() as u64;
let body_head: chain::Tip = self.chain.head().unwrap();
let header_head: chain::Tip = self.chain.header_head().unwrap();
let sync_head: chain::Tip = self.chain.get_sync_head().unwrap();
let body_head = self.chain.head().unwrap();
let header_head = self.chain.header_head().unwrap();
let sync_head = self.chain.get_sync_head().unwrap();
self.reset();
debug!(
LOGGER,
"body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}",
body_head.last_block_h,
body_head.height,
@ -123,15 +121,16 @@ impl BodySync {
}
hashes.reverse();
if oldest_height < header_head.height.saturating_sub(horizon) {
debug!("body_sync: cannot sync full blocks earlier than horizon.");
return;
}
let peers = self.peers.more_work_peers();
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
// 10) max will be 80 if all 8 peers are advertising more work
// also if the chain is already saturated with orphans, throttle
let peers = if oldest_height < header_head.height.saturating_sub(horizon) {
self.peers.more_work_archival_peers()
} else {
self.peers.more_work_peers()
};
let block_count = cmp::min(
cmp::min(100, peers.len() * p2p::SEND_CHANNEL_CAP),
chain::MAX_ORPHAN_SIZE.saturating_sub(self.chain.orphans_len()) + 1,
@ -148,7 +147,6 @@ impl BodySync {
if hashes_to_get.len() > 0 {
debug!(
LOGGER,
"block_sync: {}/{} requesting blocks {:?} from {} peers",
body_head.height,
header_head.height,
@ -161,7 +159,7 @@ impl BodySync {
for hash in hashes_to_get.clone() {
if let Some(peer) = peers_iter.next() {
if let Err(e) = peer.send_block_request(*hash) {
debug!(LOGGER, "Skipped request to {}: {:?}", peer.info.addr, e);
debug!("Skipped request to {}: {:?}", peer.info.addr, e);
} else {
self.body_sync_hashes.push(hash.clone());
}
@ -199,7 +197,6 @@ impl BodySync {
.filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x))
.collect::<Vec<_>>();
debug!(
LOGGER,
"body_sync: {}/{} blocks received, and no more in 200ms",
self.body_sync_hashes.len() - hashes_not_get.len(),
self.body_sync_hashes.len(),
@ -210,7 +207,6 @@ impl BodySync {
None => {
if Utc::now() - self.sync_start_ts > Duration::seconds(5) {
debug!(
LOGGER,
"body_sync: 0/{} blocks received in 5s",
self.body_sync_hashes.len(),
);

View file

@ -20,7 +20,6 @@ use chain;
use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed};
use p2p::{self, Peer};
use util::LOGGER;
pub struct HeaderSync {
sync_state: Arc<SyncState>,
@ -55,12 +54,11 @@ impl HeaderSync {
let enable_header_sync = match status {
SyncStatus::BodySync { .. } | SyncStatus::HeaderSync { .. } => true,
SyncStatus::NoSync | SyncStatus::Initial => {
SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => {
// Reset sync_head to header_head on transition to HeaderSync,
// but ONLY on initial transition to HeaderSync state.
let sync_head = self.chain.get_sync_head().unwrap();
debug!(
LOGGER,
"sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}",
sync_head.hash(),
sync_head.height,
@ -104,7 +102,7 @@ impl HeaderSync {
// always enable header sync on initial state transition from NoSync / Initial
let force_sync = match self.sync_state.status() {
SyncStatus::NoSync | SyncStatus::Initial => true,
SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => true,
_ => false,
};
@ -141,8 +139,8 @@ impl HeaderSync {
fn request_headers(&mut self, peer: &Peer) {
if let Ok(locator) = self.get_locator() {
debug!(
LOGGER,
"sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator,
"sync: request_headers: asking {} for headers, {:?}",
peer.info.addr, locator,
);
let _ = peer.send_header_request(locator);
@ -165,7 +163,7 @@ impl HeaderSync {
self.history_locators.clear();
}
debug!(LOGGER, "sync: locator heights : {:?}", heights);
debug!("sync: locator heights : {:?}", heights);
let mut locator: Vec<Hash> = vec![];
let mut current = self.chain.get_block_header(&tip.last_block_h);
@ -237,7 +235,7 @@ impl HeaderSync {
}
}
debug!(LOGGER, "sync: locator heights': {:?}", new_heights);
debug!("sync: locator heights': {:?}", new_heights);
// shrink history_locators properly
if heights.len() > 1 {
@ -258,14 +256,13 @@ impl HeaderSync {
}
}
debug!(
LOGGER,
"sync: history locators: len={}, shrunk={}",
self.history_locators.len(),
shrunk_size
);
}
debug!(LOGGER, "sync: locator: {:?}", locator);
debug!("sync: locator: {:?}", locator);
Ok(locator)
}

View file

@ -21,7 +21,6 @@ use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::Hashed;
use core::global;
use p2p::{self, Peer};
use util::LOGGER;
/// Fast sync has 3 "states":
/// * syncing headers
@ -33,7 +32,6 @@ pub struct StateSync {
sync_state: Arc<SyncState>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
archive_mode: bool,
prev_fast_sync: Option<DateTime<Utc>>,
fast_sync_peer: Option<Arc<Peer>>,
@ -44,13 +42,11 @@ impl StateSync {
sync_state: Arc<SyncState>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
archive_mode: bool,
) -> StateSync {
StateSync {
sync_state,
peers,
chain,
archive_mode,
prev_fast_sync: None,
fast_sync_peer: None,
}
@ -65,8 +61,8 @@ impl StateSync {
head: &chain::Tip,
highest_height: u64,
) -> bool {
let need_state_sync = !self.archive_mode
&& highest_height.saturating_sub(head.height) > global::cut_through_horizon() as u64;
let need_state_sync =
highest_height.saturating_sub(head.height) > global::cut_through_horizon() as u64;
if !need_state_sync {
return false;
}
@ -76,11 +72,8 @@ impl StateSync {
// check sync error
{
let clone = self.sync_state.sync_error();
if let Some(ref sync_error) = *clone.read().unwrap() {
error!(
LOGGER,
"fast_sync: error = {:?}. restart fast sync", sync_error
);
if let Some(ref sync_error) = *clone.read() {
error!("fast_sync: error = {:?}. restart fast sync", sync_error);
sync_need_restart = true;
}
drop(clone);
@ -92,8 +85,8 @@ impl StateSync {
if !peer.is_connected() {
sync_need_restart = true;
info!(
LOGGER,
"fast_sync: peer connection lost: {:?}. restart", peer.info.addr,
"fast_sync: peer connection lost: {:?}. restart",
peer.info.addr,
);
}
}
@ -110,10 +103,7 @@ impl StateSync {
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
if download_timeout {
error!(
LOGGER,
"fast_sync: TxHashsetDownload status timeout in 10 minutes!"
);
error!("fast_sync: TxHashsetDownload status timeout in 10 minutes!");
self.sync_state
.set_sync_error(Error::P2P(p2p::Error::Timeout));
}
@ -168,7 +158,6 @@ impl StateSync {
}
let bhash = txhashset_head.hash();
debug!(
LOGGER,
"fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}",
header_head.height,
header_head.last_block_h,
@ -176,7 +165,7 @@ impl StateSync {
bhash
);
if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) {
error!(LOGGER, "fast_sync: send_txhashset_request err! {:?}", e);
error!("fast_sync: send_txhashset_request err! {:?}", e);
return Err(e);
}
return Ok(peer.clone());

View file

@ -23,184 +23,188 @@ use core::pow::Difficulty;
use grin::sync::body_sync::BodySync;
use grin::sync::header_sync::HeaderSync;
use grin::sync::state_sync::StateSync;
use p2p::{self, Peers};
use util::LOGGER;
use p2p;
pub fn run_sync(
sync_state: Arc<SyncState>,
awaiting_peers: Arc<AtomicBool>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
skip_sync_wait: bool,
archive_mode: bool,
stop: Arc<AtomicBool>,
) {
let _ = thread::Builder::new()
.name("sync".to_string())
.spawn(move || {
sync_loop(
sync_state,
awaiting_peers,
peers,
chain,
skip_sync_wait,
archive_mode,
stop,
)
let runner = SyncRunner::new(sync_state, peers, chain, stop);
runner.sync_loop();
});
}
fn wait_for_min_peers(
awaiting_peers: Arc<AtomicBool>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
skip_sync_wait: bool,
) {
// Initial sleep to give us time to peer with some nodes.
// Note: Even if we have "skip_sync_wait" we need to wait a
// short period of time for tests to do the right thing.
let wait_secs = if skip_sync_wait { 3 } else { 30 };
let head = chain.head().unwrap();
awaiting_peers.store(true, Ordering::Relaxed);
let mut n = 0;
const MIN_PEERS: usize = 3;
loop {
let wp = peers.more_work_peers();
// exit loop when:
// * we have more than MIN_PEERS more_work peers
// * we are synced already, e.g. grin was quickly restarted
// * timeout
if wp.len() > MIN_PEERS
|| (wp.len() == 0 && peers.enough_peers() && head.total_difficulty > Difficulty::zero())
|| n > wait_secs
{
break;
}
thread::sleep(time::Duration::from_secs(1));
n += 1;
}
awaiting_peers.store(false, Ordering::Relaxed);
}
/// Starts the syncing loop, just spawns two threads that loop forever
fn sync_loop(
pub struct SyncRunner {
sync_state: Arc<SyncState>,
awaiting_peers: Arc<AtomicBool>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
skip_sync_wait: bool,
archive_mode: bool,
stop: Arc<AtomicBool>,
) {
// Wait for connections reach at least MIN_PEERS
wait_for_min_peers(awaiting_peers, peers.clone(), chain.clone(), skip_sync_wait);
// Our 3 main sync stages
let mut header_sync = HeaderSync::new(sync_state.clone(), peers.clone(), chain.clone());
let mut body_sync = BodySync::new(sync_state.clone(), peers.clone(), chain.clone());
let mut state_sync = StateSync::new(
sync_state.clone(),
peers.clone(),
chain.clone(),
archive_mode,
);
// Highest height seen on the network, generally useful for a fast test on
// whether some sync is needed
let mut highest_height = 0;
// Main syncing loop
while !stop.load(Ordering::Relaxed) {
thread::sleep(time::Duration::from_millis(10));
// check whether syncing is generally needed, when we compare our state with others
let (syncing, most_work_height) =
needs_syncing(sync_state.as_ref(), peers.clone(), chain.clone());
if most_work_height > 0 {
// we can occasionally get a most work height of 0 if read locks fail
highest_height = most_work_height;
}
// quick short-circuit (and a decent sleep) if no syncing is needed
if !syncing {
sync_state.update(SyncStatus::NoSync);
thread::sleep(time::Duration::from_secs(10));
continue;
}
// if syncing is needed
let head = chain.head().unwrap();
let header_head = chain.header_head().unwrap();
// run each sync stage, each of them deciding whether they're needed
// except for body sync that only runs if state sync is off or done
header_sync.check_run(&header_head, highest_height);
if !state_sync.check_run(&header_head, &head, highest_height) {
body_sync.check_run(&head, highest_height);
}
}
}
/// Whether we're currently syncing the chain or we're fully caught up and
/// just receiving blocks through gossip.
fn needs_syncing(
sync_state: &SyncState,
peers: Arc<Peers>,
chain: Arc<chain::Chain>,
) -> (bool, u64) {
let local_diff = chain.head().unwrap().total_difficulty;
let peer = peers.most_work_peer();
let is_syncing = sync_state.is_syncing();
let mut most_work_height = 0;
impl SyncRunner {
fn new(
sync_state: Arc<SyncState>,
peers: Arc<p2p::Peers>,
chain: Arc<chain::Chain>,
stop: Arc<AtomicBool>,
) -> SyncRunner {
SyncRunner {
sync_state,
peers,
chain,
stop,
}
}
// if we're already syncing, we're caught up if no peer has a higher
// difficulty than us
if is_syncing {
if let Some(peer) = peer {
most_work_height = peer.info.height();
if peer.info.total_difficulty() <= local_diff {
let ch = chain.head().unwrap();
info!(
LOGGER,
"synchronized at {} @ {} [{}]",
local_diff.to_num(),
ch.height,
ch.last_block_h
);
fn wait_for_min_peers(&self) {
// Initial sleep to give us time to peer with some nodes.
// Note: Even if we have skip peer wait we need to wait a
// short period of time for tests to do the right thing.
let wait_secs = if let SyncStatus::AwaitingPeers(true) = self.sync_state.status() {
30
} else {
3
};
let _ = chain.reset_head();
return (false, most_work_height);
let head = self.chain.head().unwrap();
let mut n = 0;
const MIN_PEERS: usize = 3;
loop {
let wp = self.peers.more_work_peers();
// exit loop when:
// * we have more than MIN_PEERS more_work peers
// * we are synced already, e.g. grin was quickly restarted
// * timeout
if wp.len() > MIN_PEERS
|| (wp.len() == 0
&& self.peers.enough_peers()
&& head.total_difficulty > Difficulty::zero())
|| n > wait_secs
{
break;
}
thread::sleep(time::Duration::from_secs(1));
n += 1;
}
}
/// Starts the syncing loop, just spawns two threads that loop forever
fn sync_loop(&self) {
// Wait for connections reach at least MIN_PEERS
self.wait_for_min_peers();
// Our 3 main sync stages
let mut header_sync = HeaderSync::new(
self.sync_state.clone(),
self.peers.clone(),
self.chain.clone(),
);
let mut body_sync = BodySync::new(
self.sync_state.clone(),
self.peers.clone(),
self.chain.clone(),
);
let mut state_sync = StateSync::new(
self.sync_state.clone(),
self.peers.clone(),
self.chain.clone(),
);
// Highest height seen on the network, generally useful for a fast test on
// whether some sync is needed
let mut highest_height = 0;
// Main syncing loop
while !self.stop.load(Ordering::Relaxed) {
thread::sleep(time::Duration::from_millis(10));
// check whether syncing is generally needed, when we compare our state with others
let (syncing, most_work_height) = self.needs_syncing();
if most_work_height > 0 {
// we can occasionally get a most work height of 0 if read locks fail
highest_height = most_work_height;
}
// quick short-circuit (and a decent sleep) if no syncing is needed
if !syncing {
self.sync_state.update(SyncStatus::NoSync);
thread::sleep(time::Duration::from_secs(10));
continue;
}
// if syncing is needed
let head = self.chain.head().unwrap();
let header_head = self.chain.header_head().unwrap();
// run each sync stage, each of them deciding whether they're needed
// except for body sync that only runs if state sync is off or done
header_sync.check_run(&header_head, highest_height);
if !state_sync.check_run(&header_head, &head, highest_height) {
body_sync.check_run(&head, highest_height);
}
}
}
/// Whether we're currently syncing the chain or we're fully caught up and
/// just receiving blocks through gossip.
fn needs_syncing(&self) -> (bool, u64) {
let local_diff = self.chain.head().unwrap().total_difficulty;
let peer = self.peers.most_work_peer();
let is_syncing = self.sync_state.is_syncing();
let mut most_work_height = 0;
// if we're already syncing, we're caught up if no peer has a higher
// difficulty than us
if is_syncing {
if let Some(peer) = peer {
most_work_height = peer.info.height();
if peer.info.total_difficulty() <= local_diff {
let ch = self.chain.head().unwrap();
info!(
"synchronized at {} @ {} [{}]",
local_diff.to_num(),
ch.height,
ch.last_block_h
);
let _ = self.chain.reset_head();
return (false, most_work_height);
}
} else {
warn!("sync: no peers available, disabling sync");
return (false, 0);
}
} else {
warn!(LOGGER, "sync: no peers available, disabling sync");
return (false, 0);
}
} else {
if let Some(peer) = peer {
most_work_height = peer.info.height();
if let Some(peer) = peer {
most_work_height = peer.info.height();
// sum the last 5 difficulties to give us the threshold
let threshold = chain
.difficulty_iter()
.map(|x| x.difficulty)
.take(5)
.fold(Difficulty::zero(), |sum, val| sum + val);
// sum the last 5 difficulties to give us the threshold
let threshold = self
.chain
.difficulty_iter()
.map(|x| x.difficulty)
.take(5)
.fold(Difficulty::zero(), |sum, val| sum + val);
let peer_diff = peer.info.total_difficulty();
if peer_diff > local_diff.clone() + threshold.clone() {
info!(
LOGGER,
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
local_diff,
peer_diff,
threshold,
);
return (true, most_work_height);
let peer_diff = peer.info.total_difficulty();
if peer_diff > local_diff.clone() + threshold.clone() {
info!(
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
local_diff,
peer_diff,
threshold,
);
return (true, most_work_height);
}
}
}
(is_syncing, most_work_height)
}
(is_syncing, most_work_height)
}

View file

@ -35,7 +35,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate grin_api as api;

View file

@ -17,9 +17,10 @@
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use rand::{thread_rng, Rng};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use util::RwLock;
use chain;
use common::types::Error;
@ -27,7 +28,7 @@ use core::core::verifier_cache::VerifierCache;
use core::{consensus, core, ser};
use keychain::{ExtKeychain, Identifier, Keychain};
use pool;
use util::{self, LOGGER};
use util;
use wallet::{self, BlockFees};
// Ensure a block suitable for mining is built and returned
@ -54,24 +55,22 @@ pub fn get_block(
self::Error::Chain(c) => match c.kind() {
chain::ErrorKind::DuplicateCommitment(_) => {
debug!(
LOGGER,
"Duplicate commit for potential coinbase detected. Trying next derivation."
);
}
_ => {
error!(LOGGER, "Chain Error: {}", c);
error!("Chain Error: {}", c);
}
},
self::Error::Wallet(_) => {
error!(
LOGGER,
"Error building new block: Can't connect to wallet listener at {:?}; will retry",
wallet_listener_url.as_ref().unwrap()
);
thread::sleep(Duration::from_secs(wallet_retry_interval));
}
ae => {
warn!(LOGGER, "Error building new block: {:?}. Retrying.", ae);
warn!("Error building new block: {:?}. Retrying.", ae);
}
}
thread::sleep(Duration::from_millis(100));
@ -106,15 +105,10 @@ fn build_block(
// Determine the difficulty our block should be at.
// Note: do not keep the difficulty_iter in scope (it has an active batch).
let difficulty = consensus::next_difficulty(1, chain.difficulty_iter());
let difficulty = consensus::next_difficulty(head.height + 1, chain.difficulty_iter());
// extract current transaction from the pool
// TODO - we have a lot of unwrap() going on in this fn...
let txs = tx_pool
.read()
.unwrap()
.prepare_mineable_transactions()
.unwrap();
let txs = tx_pool.read().prepare_mineable_transactions()?;
// build the coinbase and the block itself
let fees = txs.iter().map(|tx| tx.fee()).sum();
@ -137,7 +131,6 @@ fn build_block(
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
debug!(
LOGGER,
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
b.inputs().len(),
b.outputs().len(),
@ -162,10 +155,7 @@ fn build_block(
//Some other issue, possibly duplicate kernel
_ => {
error!(
LOGGER,
"Error setting txhashset root to build a block: {:?}", e
);
error!("Error setting txhashset root to build a block: {:?}", e);
Err(Error::Chain(
chain::ErrorKind::Other(format!("{:?}", e)).into(),
))
@ -179,7 +169,7 @@ fn build_block(
/// Probably only want to do this when testing.
///
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
warn!(LOGGER, "Burning block fees: {:?}", block_fees);
warn!("Burning block fees: {:?}", block_fees);
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let (out, kernel) =
@ -212,7 +202,7 @@ fn get_coinbase(
..block_fees
};
debug!(LOGGER, "get_coinbase: {:?}", block_fees);
debug!("get_coinbase: {:?}", block_fees);
return Ok((output, kernel, block_fees));
}
}

View file

@ -21,9 +21,10 @@ use serde_json::Value;
use std::error::Error;
use std::io::{BufRead, ErrorKind, Write};
use std::net::{TcpListener, TcpStream};
use std::sync::{Arc, Mutex, RwLock};
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use std::{cmp, thread};
use util::{Mutex, RwLock};
use chain;
use common::stats::{StratumStats, WorkerStats};
@ -34,7 +35,7 @@ use core::{pow, ser};
use keychain;
use mining::mine_block;
use pool;
use util::{self, LOGGER};
use util;
// ----------------------------------------
// http://www.jsonrpc.org/specification
@ -113,7 +114,6 @@ fn accept_workers(
match stream {
Ok(stream) => {
warn!(
LOGGER,
"(Server ID: {}) New connection: {}",
id,
stream.peer_addr().unwrap()
@ -122,22 +122,19 @@ fn accept_workers(
.set_nonblocking(true)
.expect("set_nonblocking call failed");
let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream));
workers.lock().unwrap().push(worker);
workers.lock().push(worker);
// stats for this worker (worker stat objects are added and updated but never
// removed)
let mut worker_stats = WorkerStats::default();
worker_stats.is_connected = true;
worker_stats.id = worker_id.to_string();
worker_stats.pow_difficulty = 1; // XXX TODO
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
stratum_stats.worker_stats.push(worker_stats);
worker_id = worker_id + 1;
}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error accepting connection: {:?}", id, e
);
warn!("(Server ID: {}) Error accepting connection: {:?}", id, e);
}
}
}
@ -184,8 +181,8 @@ impl Worker {
}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
return None;
@ -205,16 +202,16 @@ impl Worker {
Ok(_) => {}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
}
},
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
return;
@ -285,7 +282,7 @@ impl StratumServer {
// Handle an RPC request message from the worker(s)
fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) {
let mut workers_l = self.workers.lock().unwrap();
let mut workers_l = self.workers.lock();
for num in 0..workers_l.len() {
match workers_l[num].read_message() {
Some(the_message) => {
@ -295,7 +292,6 @@ impl StratumServer {
Err(e) => {
// not a valid JSON RpcRequest - disconnect the worker
warn!(
LOGGER,
"(Server ID: {}) Failed to parse JSONRpc: {} - {:?}",
self.id,
e.description(),
@ -306,7 +302,7 @@ impl StratumServer {
}
};
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
let worker_stats_id = stratum_stats
.worker_stats
.iter()
@ -408,11 +404,8 @@ impl StratumServer {
let job_template = self.build_block_template();
let response = serde_json::to_value(&job_template).unwrap();
debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to single worker",
self.id,
job_template.height,
job_template.job_id,
self.id, job_template.height, job_template.job_id,
);
return Ok(response);
}
@ -451,8 +444,8 @@ impl StratumServer {
if params.height != self.current_block_versions.last().unwrap().header.height {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Share at height {} submitted too late", self.id, params.height,
"(Server ID: {}) Share at height {} submitted too late",
self.id, params.height,
);
worker_stats.num_stale += 1;
let e = RpcError {
@ -466,11 +459,8 @@ impl StratumServer {
if b.is_none() {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}",
self.id,
params.height,
params.job_id,
self.id, params.height, params.job_id,
);
worker_stats.num_rejected += 1;
let e = RpcError {
@ -490,11 +480,8 @@ impl StratumServer {
if share_difficulty < self.minimum_share_difficulty {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Share rejected due to low difficulty: {}/{}",
self.id,
share_difficulty,
self.minimum_share_difficulty,
self.id, share_difficulty, self.minimum_share_difficulty,
);
worker_stats.num_rejected += 1;
let e = RpcError {
@ -510,7 +497,6 @@ impl StratumServer {
if let Err(e) = res {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: {}: {}",
self.id,
params.height,
@ -527,15 +513,14 @@ impl StratumServer {
share_is_block = true;
// Log message to make it obvious we found a block
warn!(
LOGGER,
"(Server ID: {}) Solution Found for block {} - Yay!!!", self.id, params.height
"(Server ID: {}) Solution Found for block {} - Yay!!!",
self.id, params.height
);
} else {
// Do some validation but dont submit
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
self.id,
params.height,
@ -556,7 +541,6 @@ impl StratumServer {
Some(login) => login.clone(),
};
info!(
LOGGER,
"(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}",
self.id,
b.hash(),
@ -582,18 +566,16 @@ impl StratumServer {
// Purge dead/sick workers - remove all workers marked in error state
fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize {
let mut start = 0;
let mut workers_l = self.workers.lock().unwrap();
let mut workers_l = self.workers.lock();
loop {
for num in start..workers_l.len() {
if workers_l[num].error == true {
warn!(
LOGGER,
"(Server ID: {}) Dropping worker: {}",
self.id,
workers_l[num].id;
);
"(Server ID: {}) Dropping worker: {}",
self.id, workers_l[num].id
);
// Update worker stats
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
let worker_stats_id = stratum_stats
.worker_stats
.iter()
@ -607,7 +589,7 @@ impl StratumServer {
start = num + 1;
}
if start >= workers_l.len() {
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
stratum_stats.num_workers = workers_l.len();
return stratum_stats.num_workers;
}
@ -630,16 +612,13 @@ impl StratumServer {
};
let job_request_json = serde_json::to_string(&job_request).unwrap();
debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to stratum clients",
self.id,
job_template.height,
job_template.job_id,
self.id, job_template.height, job_template.job_id,
);
// Push the new block to all connected clients
// NOTE: We do not give a unique nonce (should we?) so miners need
// to choose one for themselves
let mut workers_l = self.workers.lock().unwrap();
let mut workers_l = self.workers.lock();
for num in 0..workers_l.len() {
workers_l[num].write_message(job_request_json.clone());
}
@ -658,11 +637,8 @@ impl StratumServer {
sync_state: Arc<SyncState>,
) {
info!(
LOGGER,
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
self.id,
edge_bits,
proof_size
self.id, edge_bits, proof_size
);
self.sync_state = sync_state;
@ -691,13 +667,12 @@ impl StratumServer {
// We have started
{
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
stratum_stats.is_running = true;
stratum_stats.edge_bits = edge_bits as u16;
}
warn!(
LOGGER,
"Stratum server started on {}",
self.config.stratum_server_addr.clone().unwrap()
);
@ -753,7 +728,7 @@ impl StratumServer {
deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
{
let mut stratum_stats = stratum_stats.write().unwrap();
let mut stratum_stats = stratum_stats.write();
stratum_stats.block_height = new_block.header.height;
stratum_stats.network_difficulty = self.current_difficulty;
}

View file

@ -19,7 +19,8 @@
use chrono::prelude::Utc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use util::RwLock;
use chain;
use common::types::StratumServerConfig;
@ -30,7 +31,6 @@ use core::global;
use core::pow::PoWContext;
use mining::mine_block;
use pool;
use util::LOGGER;
pub struct Miner {
config: StratumServerConfig,
@ -84,7 +84,6 @@ impl Miner {
let deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
debug!(
LOGGER,
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
self.debug_output_id,
global::min_edge_bits(),
@ -115,10 +114,8 @@ impl Miner {
}
debug!(
LOGGER,
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
self.debug_output_id, iter_count
);
false
}
@ -127,8 +124,8 @@ impl Miner {
/// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, wallet_listener_url: Option<String>) {
info!(
LOGGER,
"(Server ID: {}) Starting test miner loop.", self.debug_output_id
"(Server ID: {}) Starting test miner loop.",
self.debug_output_id
);
// iteration, we keep the returned derivation to provide it back when
@ -136,7 +133,7 @@ impl Miner {
let mut key_id = None;
while !self.stop.load(Ordering::Relaxed) {
trace!(LOGGER, "in miner loop. key_id: {:?}", key_id);
trace!("in miner loop. key_id: {:?}", key_id);
// get the latest chain state and build a block on top of it
let head = self.chain.head_header().unwrap();
@ -160,7 +157,6 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline
if sol {
info!(
LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
@ -168,26 +164,21 @@ impl Miner {
let res = self.chain.process_block(b, chain::Options::MINE);
if let Err(e) = res {
error!(
LOGGER,
"(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
self.debug_output_id, e
);
}
trace!(LOGGER, "resetting key_id in miner to None");
trace!("resetting key_id in miner to None");
key_id = None;
} else {
debug!(
LOGGER,
"setting pubkey in miner to pubkey from block_fees - {:?}", block_fees
"setting pubkey in miner to pubkey from block_fees - {:?}",
block_fees
);
key_id = block_fees.key_id();
}
}
info!(
LOGGER,
"(Server ID: {}) test miner exit.", self.debug_output_id
);
info!("(Server ID: {}) test miner exit.", self.debug_output_id);
}
}

View file

@ -25,8 +25,6 @@ use std::env;
use std::io::Error;
use std::thread;
use util::LOGGER;
/// Future returned from `MainService`.
enum MainFuture {
Root,
@ -94,10 +92,7 @@ pub fn start_webwallet_server() {
let server = Server::bind(&addr)
.serve(|| future::ok::<_, Error>(MainService::new()))
.map_err(|e| eprintln!("server error: {}", e));
warn!(
LOGGER,
"Grin Web-Wallet Application is running at http://{}/", addr
);
warn!("Grin Web-Wallet Application is running at http://{}/", addr);
rt::run(server);
});
}

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -26,18 +26,20 @@ extern crate grin_wallet as wallet;
mod framework;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{thread, time};
use util::Mutex;
use core::global::{self, ChainTypes};
use framework::{LocalServerContainer, LocalServerContainerConfig};
use util::{init_test_logger, LOGGER};
use util::init_test_logger;
#[test]
fn simple_server_wallet() {
init_test_logger();
info!(LOGGER, "starting simple_server_wallet");
info!("starting simple_server_wallet");
let test_name_dir = "test_servers";
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
// Run a separate coinbase wallet for coinbase transactions
@ -52,7 +54,7 @@ fn simple_server_wallet() {
));
let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap();
let mut w = coinbase_wallet.lock();
w.run_wallet(0);
});
@ -81,11 +83,11 @@ fn simple_server_wallet() {
let base_addr = server_config.base_addr;
let api_server_port = server_config.api_server_port;
warn!(LOGGER, "Testing chain handler");
warn!("Testing chain handler");
let tip = get_tip(&base_addr, api_server_port);
assert!(tip.is_ok());
warn!(LOGGER, "Testing status handler");
warn!("Testing status handler");
let status = get_status(&base_addr, api_server_port);
assert!(status.is_ok());
@ -96,7 +98,7 @@ fn simple_server_wallet() {
current_tip = get_tip(&base_addr, api_server_port).unwrap();
}
warn!(LOGGER, "Testing block handler");
warn!("Testing block handler");
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
assert!(last_block_by_height.is_ok());
let last_block_by_height_compact =
@ -110,7 +112,7 @@ fn simple_server_wallet() {
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
assert!(last_block_by_hash_compact.is_ok());
warn!(LOGGER, "Testing chain output handler");
warn!("Testing chain output handler");
let start_height = 0;
let end_height = current_tip.height;
let outputs_by_height =
@ -122,7 +124,7 @@ fn simple_server_wallet() {
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
assert!(outputs_by_ids2.is_ok());
warn!(LOGGER, "Testing txhashset handler");
warn!("Testing txhashset handler");
let roots = get_txhashset_roots(&base_addr, api_server_port);
assert!(roots.is_ok());
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
@ -146,7 +148,7 @@ fn simple_server_wallet() {
#[test]
fn test_p2p() {
init_test_logger();
info!(LOGGER, "starting test_p2p");
info!("starting test_p2p");
global::set_mining_mode(ChainTypes::AutomatedTesting);
let test_name_dir = "test_servers";
@ -187,7 +189,7 @@ fn test_p2p() {
thread::sleep(time::Duration::from_millis(2000));
// Starting tests
warn!(LOGGER, "Starting P2P Tests");
warn!("Starting P2P Tests");
let base_addr = server_config_one.base_addr;
let api_server_port = server_config_one.api_server_port;

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -27,10 +27,9 @@ extern crate grin_wallet as wallet;
mod framework;
use framework::{LocalServerContainer, LocalServerContainerConfig};
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{thread, time};
use util::LOGGER;
use util::Mutex;
/// Start 1 node mining, 1 non mining node and two wallets.
/// Then send a transaction from one wallet to another and propagate it a stem
@ -56,12 +55,12 @@ fn test_dandelion_timeout() {
let coinbase_wallet = Arc::new(Mutex::new(
LocalServerContainer::new(coinbase_config).unwrap(),
));
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap();
let mut w = coinbase_wallet.lock();
w.run_wallet(0);
});
@ -71,12 +70,12 @@ fn test_dandelion_timeout() {
recp_config.wallet_port = 20002;
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
let target_wallet_cloned = target_wallet.clone();
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
//Start up a second wallet, to receive
let _ = thread::spawn(move || {
let mut w = target_wallet_cloned.lock().unwrap();
let mut w = target_wallet_cloned.lock();
w.run_wallet(0);
});
@ -135,7 +134,7 @@ fn test_dandelion_timeout() {
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
}
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
warn!("Sending 50 Grins to recipient wallet");
// Sending stem transaction
LocalServerContainer::send_amount_to(

View file

@ -25,8 +25,9 @@ extern crate blake2_rfc as blake2;
use std::default::Default;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{fs, thread, time};
use util::Mutex;
use framework::keychain::Keychain;
use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig};
@ -532,7 +533,7 @@ impl LocalServerContainerPool {
thread::sleep(time::Duration::from_millis(2000));
}
let server_ref = s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref);
return_container_ref.lock().push(server_ref);
});
// Not a big fan of sleeping hack here, but there appears to be a
// concurrency issue when creating files in rocksdb that causes
@ -575,7 +576,7 @@ impl LocalServerContainerPool {
}
pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) {
let locked_servs = servers.lock().unwrap();
let locked_servs = servers.lock();
for s in locked_servs.deref() {
s.stop();
}

View file

@ -21,19 +21,19 @@ extern crate grin_servers as servers;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
#[macro_use]
extern crate slog;
extern crate log;
mod framework;
use std::default::Default;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{thread, time};
use util::Mutex;
use core::core::hash::Hashed;
use core::global::{self, ChainTypes};
use util::LOGGER;
use wallet::controller;
use wallet::libtx::slate::Slate;
use wallet::libwallet::types::{WalletBackend, WalletInst};
@ -242,7 +242,7 @@ fn simulate_block_propagation() {
thread::sleep(time::Duration::from_millis(1_000));
time_spent += 1;
if time_spent >= 30 {
info!(LOGGER, "simulate_block_propagation - fail on timeout",);
info!("simulate_block_propagation - fail on timeout",);
break;
}
@ -284,7 +284,6 @@ fn simulate_full_sync() {
// Get the current header from s1.
let s1_header = s1.chain.head_header().unwrap();
info!(
LOGGER,
"simulate_full_sync - s1 header head: {} at {}",
s1_header.hash(),
s1_header.height
@ -297,7 +296,6 @@ fn simulate_full_sync() {
time_spent += 1;
if time_spent >= 30 {
info!(
LOGGER,
"sync fail. s2.head().height: {}, s1_header.height: {}",
s2.head().height,
s1_header.height
@ -355,7 +353,6 @@ fn simulate_fast_sync() {
total_wait += 1;
if total_wait >= 30 {
error!(
LOGGER,
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
s2.head().height,
s1_header.height,

View file

@ -23,7 +23,7 @@ extern crate grin_wallet as wallet;
extern crate bufstream;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
mod framework;
@ -38,7 +38,6 @@ use std::sync::Arc;
use std::{thread, time};
use core::global::{self, ChainTypes};
use util::LOGGER;
use framework::{config, stratum_config};
@ -77,7 +76,7 @@ fn basic_stratum_server() {
}
// As this stream falls out of scope it will be disconnected
}
info!(LOGGER, "stratum server connected");
info!("stratum server connected");
// Create a few new worker connections
let mut workers = vec![];
@ -89,7 +88,7 @@ fn basic_stratum_server() {
workers.push(stream);
}
assert!(workers.len() == 5);
info!(LOGGER, "workers length verification ok");
info!("workers length verification ok");
// Simulate a worker lost connection
workers.remove(4);
@ -118,7 +117,7 @@ fn basic_stratum_server() {
assert!(false);
}
}
info!(LOGGER, "a few stratum JSONRpc commands verification ok");
info!("a few stratum JSONRpc commands verification ok");
// keepalive - expected "ok" result
let mut response = String::new();
@ -129,7 +128,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[2].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "keepalive test ok");
info!("keepalive test ok");
// "doesnotexist" - error expected
let mut response = String::new();
@ -140,7 +139,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[3].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "worker doesnotexist test ok");
info!("worker doesnotexist test ok");
// Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap();
@ -148,18 +147,18 @@ fn basic_stratum_server() {
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
info!(LOGGER, "stratum server and worker stats verification ok");
info!("stratum server and worker stats verification ok");
// Start mining blocks
let stop = Arc::new(AtomicBool::new(false));
s.start_test_miner(None, stop.clone());
info!(LOGGER, "test miner started");
info!("test miner started");
// This test is supposed to complete in 3 seconds,
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
let _handler = thread::spawn(|| {
thread::sleep(time::Duration::from_secs(10));
error!(LOGGER, "basic_stratum_server test fail on timeout!");
error!("basic_stratum_server test fail on timeout!");
thread::sleep(time::Duration::from_millis(100));
process::exit(1);
});
@ -177,12 +176,12 @@ fn basic_stratum_server() {
let _st = workers[2].read_line(&mut jobtemplate);
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
assert_eq!(job_template["method"], expected);
info!(LOGGER, "blocks broadcasting to workers test ok");
info!("blocks broadcasting to workers test ok");
// Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap();
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
assert_ne!(stats.stratum_stats.block_height, 1);
info!(LOGGER, "basic_stratum_server test done and ok.");
info!("basic_stratum_server test done and ok.");
}

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -27,10 +27,9 @@ extern crate grin_wallet as wallet;
mod framework;
use framework::{LocalServerContainer, LocalServerContainerConfig};
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::{thread, time};
use util::LOGGER;
use util::Mutex;
/// Start 1 node mining and two wallets, then send a few
/// transactions from one to the other
@ -55,11 +54,11 @@ fn basic_wallet_transactions() {
let coinbase_wallet = Arc::new(Mutex::new(
LocalServerContainer::new(coinbase_config).unwrap(),
));
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap();
let mut w = coinbase_wallet.lock();
w.run_wallet(0);
});
@ -69,11 +68,11 @@ fn basic_wallet_transactions() {
recp_config.wallet_port = 20002;
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
let target_wallet_cloned = target_wallet.clone();
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
//Start up a second wallet, to receive
let _ = thread::spawn(move || {
let mut w = target_wallet_cloned.lock().unwrap();
let mut w = target_wallet_cloned.lock();
w.run_wallet(0);
});
@ -104,7 +103,7 @@ fn basic_wallet_transactions() {
coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
}
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
warn!("Sending 50 Grins to recipient wallet");
LocalServerContainer::send_amount_to(
&coinbase_wallet_config,
"50.00",
@ -124,10 +123,7 @@ fn basic_wallet_transactions() {
println!("Recipient wallet info: {:?}", recipient_info);
assert!(recipient_info.amount_currently_spendable == 50000000000);
warn!(
LOGGER,
"Sending many small transactions to recipient wallet"
);
warn!("Sending many small transactions to recipient wallet");
for _i in 0..10 {
LocalServerContainer::send_amount_to(
&coinbase_wallet_config,

View file

@ -29,7 +29,6 @@ use core::global;
use p2p::Seeding;
use servers;
use tui::ui;
use util::LOGGER;
/// wrap below to allow UI to clean up on stop
fn start_server(config: servers::ServerConfig) {
@ -37,9 +36,9 @@ fn start_server(config: servers::ServerConfig) {
// Just kill process for now, otherwise the process
// hangs around until sigint because the API server
// currently has no shutdown facility
warn!(LOGGER, "Shutting down...");
warn!("Shutting down...");
thread::sleep(Duration::from_millis(1000));
warn!(LOGGER, "Shutdown complete.");
warn!("Shutdown complete.");
exit(0);
}
@ -47,7 +46,7 @@ fn start_server_tui(config: servers::ServerConfig) {
// Run the UI controller.. here for now for simplicity to access
// everything it might need
if config.run_tui.is_some() && config.run_tui.unwrap() {
warn!(LOGGER, "Starting GRIN in UI mode...");
warn!("Starting GRIN in UI mode...");
servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true));
let _ = thread::Builder::new()
@ -60,7 +59,7 @@ fn start_server_tui(config: servers::ServerConfig) {
});
}).unwrap();
} else {
warn!(LOGGER, "Starting GRIN w/o UI...");
warn!("Starting GRIN w/o UI...");
servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
@ -70,7 +69,7 @@ fn start_server_tui(config: servers::ServerConfig) {
while running.load(Ordering::SeqCst) {
thread::sleep(Duration::from_secs(1));
}
warn!(LOGGER, "Received SIGINT (Ctrl+C) or SIGTERM (kill).");
warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
serv.stop();
}).unwrap();
}
@ -170,8 +169,8 @@ pub fn server_command(server_args: Option<&ArgMatches>, mut global_config: Globa
}
});
match daemonize.start() {
Ok(_) => info!(LOGGER, "Grin server successfully started."),
Err(e) => error!(LOGGER, "Error starting: {}", e),
Ok(_) => info!("Grin server successfully started."),
Err(e) => error!("Error starting: {}", e),
}
}
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),

View file

@ -18,9 +18,10 @@ use std::io::Read;
use std::path::PathBuf;
/// Wallet commands processing
use std::process::exit;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::time::Duration;
use std::{process, thread};
use util::Mutex;
use clap::ArgMatches;
@ -34,7 +35,6 @@ use grin_wallet::{
use keychain;
use servers::start_webwallet_server;
use util::file::get_first_line;
use util::LOGGER;
pub fn _init_wallet_seed(wallet_config: WalletConfig) {
if let Err(_) = WalletSeed::from_file(&wallet_config) {
@ -72,7 +72,7 @@ pub fn instantiate_wallet(
println!("Error starting wallet: {}", e);
process::exit(0);
});
info!(LOGGER, "Using LMDB Backend for wallet");
info!("Using LMDB Backend for wallet");
Box::new(db_wallet)
}
@ -106,7 +106,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
// Generate the initial wallet seed if we are running "wallet init".
if let ("init", Some(_)) = wallet_args.subcommand() {
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
info!(LOGGER, "Wallet seed file created");
info!("Wallet seed file created");
let client =
HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
@ -116,7 +116,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
e, wallet_config
);
});
info!(LOGGER, "Wallet database backend created");
info!("Wallet database backend created");
// give logging thread a moment to catch up
thread::sleep(Duration::from_millis(200));
// we are done here with creating the wallet, so just return
@ -267,7 +267,6 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let slate = match result {
Ok(s) => {
info!(
LOGGER,
"Tx created: {} grin to {} (strategy '{}')",
core::amount_to_hr_string(amount, false),
dest,
@ -276,7 +275,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
s
}
Err(e) => {
error!(LOGGER, "Tx not created: {:?}", e);
error!("Tx not created: {:?}", e);
match e.kind() {
// user errors, don't backtrace
libwallet::ErrorKind::NotEnoughFunds { .. } => {}
@ -284,7 +283,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
_ => {
// otherwise give full dump
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Backtrace: {}", e.backtrace().unwrap());
}
};
panic!();
@ -293,18 +292,18 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent",);
info!("Tx sent",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
error!("Tx not sent: {:?}", e);
Err(e)
}
}
} else {
error!(
LOGGER,
"HTTP Destination should start with http://: or https://: {}", dest
"HTTP Destination should start with http://: or https://: {}",
dest
);
panic!();
}
@ -320,7 +319,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
).expect("Send failed");
Ok(())
} else {
error!(LOGGER, "unsupported payment method: {}", method);
error!("unsupported payment method: {}", method);
panic!();
}
}
@ -353,11 +352,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent");
info!("Tx sent");
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
error!("Tx not sent: {:?}", e);
Err(e)
}
}
@ -438,7 +437,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
("repost", Some(repost_args)) => {
let tx_id: u32 = match repost_args.value_of("id") {
None => {
error!(LOGGER, "Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
error!("Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
panic!();
}
Some(tx) => match tx.parse() {
@ -455,11 +454,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_stored_tx(tx_id, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Reposted transaction at {}", tx_id);
info!("Reposted transaction at {}", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
error!("Transaction reposting failed: {}", e);
Err(e)
}
}
@ -468,11 +467,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.dump_stored_tx(tx_id, true, f);
match result {
Ok(_) => {
warn!(LOGGER, "Dumped transaction data for tx {} to {}", tx_id, f);
warn!("Dumped transaction data for tx {} to {}", tx_id, f);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
error!("Transaction reposting failed: {}", e);
Err(e)
}
}
@ -487,11 +486,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.cancel_tx(tx_id);
match result {
Ok(_) => {
info!(LOGGER, "Transaction {} Cancelled", tx_id);
info!("Transaction {} Cancelled", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "TX Cancellation failed: {}", e);
error!("TX Cancellation failed: {}", e);
Err(e)
}
}
@ -500,12 +499,12 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.restore();
match result {
Ok(_) => {
info!(LOGGER, "Wallet restore complete",);
info!("Wallet restore complete",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Wallet restore failed: {:?}", e);
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Wallet restore failed: {:?}", e);
error!("Backtrace: {}", e.backtrace().unwrap());
Err(e)
}
}

View file

@ -24,7 +24,7 @@ extern crate daemonize;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate term;
extern crate grin_api as api;
@ -45,7 +45,7 @@ use clap::{App, Arg, SubCommand};
use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
use core::global;
use util::{init_logger, LOGGER};
use util::init_logger;
// include build information
pub mod built_info {
@ -73,9 +73,9 @@ pub fn info_strings() -> (String, String, String) {
fn log_build_info() {
let (basic_info, detailed_info, deps) = info_strings();
info!(LOGGER, "{}", basic_info);
debug!(LOGGER, "{}", detailed_info);
trace!(LOGGER, "{}", deps);
info!("{}", basic_info);
debug!("{}", detailed_info);
trace!("{}", deps);
}
fn main() {
@ -378,7 +378,6 @@ fn main() {
l.tui_running = Some(false);
init_logger(Some(l));
warn!(
LOGGER,
"Using wallet configuration file at {}",
w.config_file_path.as_ref().unwrap().to_str().unwrap()
);
@ -399,12 +398,11 @@ fn main() {
global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type);
if let Some(file_path) = &s.config_file_path {
info!(
LOGGER,
"Using configuration file at {}",
file_path.to_str().unwrap()
);
} else {
info!(LOGGER, "Node configuration file not found, using default");
info!("Node configuration file not found, using default");
}
node_config = Some(s);
}

View file

@ -87,91 +87,88 @@ impl TUIStatusListener for TUIStatusView {
fn update(c: &mut Cursive, stats: &ServerStats) {
//find and update here as needed
let basic_status = {
if stats.awaiting_peers {
"Waiting for peers".to_string()
} else {
match stats.sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
match stats.sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading headers: {}%, step 1/4", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
current_height * 100 / highest_height
0
};
format!("Downloading headers: {}%, step 1/4", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
0
};
let start = start_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
let start = start_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
format!("Downloading {}(MB) chain state for fast sync: {}% at {:.1?}(kB/s), step 2/4",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { downloaded_size as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;
format!("Downloading {}(MB) chain state for fast sync: {}% at {:.1?}(kB/s), step 2/4",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { downloaded_size as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;
format!("Downloading chain state for fast sync. Waiting remote peer to start: {}s, step 2/4",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Preparing chain state for validation, step 3/4".to_string()
}
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
// 10% of overall progress is attributed to kernel validation
// 90% to range proofs (which are much longer)
let mut percent = if kernel_total > 0 {
kernels * 10 / kernel_total
} else {
0
};
percent += if rproof_total > 0 {
rproofs * 90 / rproof_total
} else {
0
};
format!("Validating chain state: {}%, step 3/4", percent)
}
SyncStatus::TxHashsetSave => {
"Finalizing chain state for fast sync, step 3/4".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading blocks: {}%, step 4/4", percent)
format!("Downloading chain state for fast sync. Waiting remote peer to start: {}s, step 2/4",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Preparing chain state for validation, step 3/4".to_string()
}
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
// 10% of overall progress is attributed to kernel validation
// 90% to range proofs (which are much longer)
let mut percent = if kernel_total > 0 {
kernels * 10 / kernel_total
} else {
0
};
percent += if rproof_total > 0 {
rproofs * 90 / rproof_total
} else {
0
};
format!("Validating chain state: {}%, step 3/4", percent)
}
SyncStatus::TxHashsetSave => {
"Finalizing chain state for fast sync, step 3/4".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading blocks: {}%, step 4/4", percent)
}
}
};
/*let basic_mining_config_status = {

View file

@ -37,7 +37,6 @@ use servers::Server;
use tui::constants::ROOT_STACK;
use tui::types::{TUIStatusListener, UIMessage};
use tui::{menu, mining, peers, status, version};
use util::LOGGER;
use built_info;
@ -172,7 +171,7 @@ impl Controller {
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
while self.ui.step() {
if !running.load(Ordering::SeqCst) {
warn!(LOGGER, "Received SIGINT (Ctrl+C).");
warn!("Received SIGINT (Ctrl+C).");
server.stop();
self.ui.stop();
}

Some files were not shown because too many files have changed in this diff Show more