mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Merge branch 'master' into unitdiff
This commit is contained in:
commit
a42d66efff
131 changed files with 1942 additions and 2372 deletions
200
Cargo.lock
generated
200
Cargo.lock
generated
|
@ -19,6 +19,11 @@ dependencies = [
|
||||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "antidote"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "arc-swap"
|
name = "arc-swap"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
|
@ -315,6 +320,11 @@ dependencies = [
|
||||||
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-channel"
|
name = "crossbeam-channel"
|
||||||
version = "0.2.6"
|
version = "0.2.6"
|
||||||
|
@ -666,10 +676,10 @@ dependencies = [
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"grin_wallet 0.4.0",
|
"grin_wallet 0.4.0",
|
||||||
"humansize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"humansize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"reqwest 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"reqwest 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
@ -691,13 +701,13 @@ dependencies = [
|
||||||
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper-rustls 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper-rustls 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ring 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ring 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustls 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustls 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-rustls 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-rustls 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -722,11 +732,11 @@ dependencies = [
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"grin_wallet 0.4.0",
|
"grin_wallet 0.4.0",
|
||||||
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -760,6 +770,7 @@ dependencies = [
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"grin_wallet 0.4.0",
|
"grin_wallet 0.4.0",
|
||||||
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num-bigint 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num-bigint 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -767,7 +778,6 @@ dependencies = [
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -779,13 +789,13 @@ dependencies = [
|
||||||
"digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hmac 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ripemd160 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ripemd160 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -802,12 +812,12 @@ dependencies = [
|
||||||
"grin_store 0.4.0",
|
"grin_store 0.4.0",
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -822,10 +832,10 @@ dependencies = [
|
||||||
"grin_store 0.4.0",
|
"grin_store 0.4.0",
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"grin_wallet 0.4.0",
|
"grin_wallet 0.4.0",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -852,11 +862,11 @@ dependencies = [
|
||||||
"itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -873,11 +883,11 @@ dependencies = [
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -888,13 +898,13 @@ dependencies = [
|
||||||
"base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"secp256k1zkp 0.7.1 (git+https://github.com/mimblewimble/rust-secp256k1-zkp?tag=grin_integration_28)",
|
"secp256k1zkp 0.7.1 (git+https://github.com/mimblewimble/rust-secp256k1-zkp?tag=grin_integration_28)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"slog-term 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"walkdir 2.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"walkdir 2.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
@ -916,12 +926,12 @@ dependencies = [
|
||||||
"grin_store 0.4.0",
|
"grin_store 0.4.0",
|
||||||
"grin_util 0.4.0",
|
"grin_util 0.4.0",
|
||||||
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"prettytable-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"prettytable-rs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
@ -1076,17 +1086,6 @@ dependencies = [
|
||||||
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "isatty"
|
|
||||||
version = "0.1.9"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itertools"
|
name = "itertools"
|
||||||
version = "0.7.8"
|
version = "0.7.8"
|
||||||
|
@ -1194,6 +1193,11 @@ name = "linked-hash-map"
|
||||||
version = "0.4.2"
|
version = "0.4.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "linked-hash-map"
|
||||||
|
version = "0.5.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lmdb-zero"
|
name = "lmdb-zero"
|
||||||
version = "0.4.4"
|
version = "0.4.4"
|
||||||
|
@ -1228,6 +1232,36 @@ version = "0.4.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log-mdc"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log4rs"
|
||||||
|
version = "0.8.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"flate2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde-value 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_yaml 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1592,6 +1626,15 @@ dependencies = [
|
||||||
"vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ordered-float"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "owning_ref"
|
name = "owning_ref"
|
||||||
version = "0.3.3"
|
version = "0.3.3"
|
||||||
|
@ -2026,6 +2069,15 @@ name = "serde"
|
||||||
version = "1.0.80"
|
version = "1.0.80"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde-value"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"ordered-float 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_derive"
|
name = "serde_derive"
|
||||||
version = "1.0.80"
|
version = "1.0.80"
|
||||||
|
@ -2057,6 +2109,17 @@ dependencies = [
|
||||||
"url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_yaml"
|
||||||
|
version = "0.8.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sha2"
|
name = "sha2"
|
||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
|
@ -2087,33 +2150,6 @@ name = "slab"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "slog"
|
|
||||||
version = "2.3.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "slog-async"
|
|
||||||
version = "2.3.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "slog-term"
|
|
||||||
version = "2.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"isatty 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "smallvec"
|
name = "smallvec"
|
||||||
version = "0.6.5"
|
version = "0.6.5"
|
||||||
|
@ -2173,11 +2209,6 @@ dependencies = [
|
||||||
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "take_mut"
|
|
||||||
version = "0.2.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tar"
|
name = "tar"
|
||||||
version = "0.4.17"
|
version = "0.4.17"
|
||||||
|
@ -2247,6 +2278,16 @@ dependencies = [
|
||||||
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thread-id"
|
||||||
|
version = "3.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thread_local"
|
name = "thread_local"
|
||||||
version = "0.3.6"
|
version = "0.3.6"
|
||||||
|
@ -2473,11 +2514,24 @@ dependencies = [
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "traitobject"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "try-lock"
|
name = "try-lock"
|
||||||
version = "0.2.2"
|
version = "0.2.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "typemap"
|
||||||
|
version = "0.3.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "typenum"
|
name = "typenum"
|
||||||
version = "1.10.0"
|
version = "1.10.0"
|
||||||
|
@ -2540,6 +2594,14 @@ dependencies = [
|
||||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unsafe-any"
|
||||||
|
version = "0.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "untrusted"
|
name = "untrusted"
|
||||||
version = "0.6.2"
|
version = "0.6.2"
|
||||||
|
@ -2712,6 +2774,14 @@ name = "xi-unicode"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "yaml-rust"
|
||||||
|
version = "0.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zip"
|
name = "zip"
|
||||||
version = "0.4.2"
|
version = "0.4.2"
|
||||||
|
@ -2728,6 +2798,7 @@ dependencies = [
|
||||||
"checksum adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e522997b529f05601e05166c07ed17789691f562762c7f3b987263d2dedee5c"
|
"checksum adler32 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7e522997b529f05601e05166c07ed17789691f562762c7f3b987263d2dedee5c"
|
||||||
"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a"
|
"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a"
|
||||||
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
|
||||||
|
"checksum antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5"
|
||||||
"checksum arc-swap 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f344c31716d7f1afc56f8cc08163f7d1826b223924c04b89b0a533459d5f99f"
|
"checksum arc-swap 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f344c31716d7f1afc56f8cc08163f7d1826b223924c04b89b0a533459d5f99f"
|
||||||
"checksum argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f67b0b6a86dae6e67ff4ca2b6201396074996379fba2b92ff649126f37cb392"
|
"checksum argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f67b0b6a86dae6e67ff4ca2b6201396074996379fba2b92ff649126f37cb392"
|
||||||
"checksum array-macro 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8b1b1a00de235e9f2cc0e650423dc249d875c116a5934188c08fdd0c02d840ef"
|
"checksum array-macro 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8b1b1a00de235e9f2cc0e650423dc249d875c116a5934188c08fdd0c02d840ef"
|
||||||
|
@ -2764,6 +2835,7 @@ dependencies = [
|
||||||
"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb"
|
"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb"
|
||||||
"checksum croaring 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "38961600edf0408acc371eb2359901f5ed2e634dde8537fc9a05e88fb26cde0e"
|
"checksum croaring 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "38961600edf0408acc371eb2359901f5ed2e634dde8537fc9a05e88fb26cde0e"
|
||||||
"checksum croaring-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "48a344ef01931b3106d6083c08fefc16e2b0b9d2de695a49a7437e56607cfda1"
|
"checksum croaring-sys 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "48a344ef01931b3106d6083c08fefc16e2b0b9d2de695a49a7437e56607cfda1"
|
||||||
|
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
|
||||||
"checksum crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7b85741761b7f160bc5e7e0c14986ef685b7f8bf9b7ad081c60c604bb4649827"
|
"checksum crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7b85741761b7f160bc5e7e0c14986ef685b7f8bf9b7ad081c60c604bb4649827"
|
||||||
"checksum crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3486aefc4c0487b9cb52372c97df0a48b8c249514af1ee99703bf70d2f2ceda1"
|
"checksum crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3486aefc4c0487b9cb52372c97df0a48b8c249514af1ee99703bf70d2f2ceda1"
|
||||||
"checksum crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30fecfcac6abfef8771151f8be4abc9e4edc112c2bcb233314cafde2680536e9"
|
"checksum crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30fecfcac6abfef8771151f8be4abc9e4edc112c2bcb233314cafde2680536e9"
|
||||||
|
@ -2816,7 +2888,6 @@ dependencies = [
|
||||||
"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
|
"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
|
||||||
"checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220"
|
"checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220"
|
||||||
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
|
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
|
||||||
"checksum isatty 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e31a8281fc93ec9693494da65fbf28c0c2aa60a2eaec25dc58e2f31952e95edc"
|
|
||||||
"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
|
"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450"
|
||||||
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
|
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
|
||||||
"checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c"
|
"checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c"
|
||||||
|
@ -2830,10 +2901,13 @@ dependencies = [
|
||||||
"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
|
"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
|
||||||
"checksum libz-sys 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "4401fe74560a0d46fce3464625ac8aa7a79d291dd28cee021d18852d5191c280"
|
"checksum libz-sys 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "4401fe74560a0d46fce3464625ac8aa7a79d291dd28cee021d18852d5191c280"
|
||||||
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
|
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
|
||||||
|
"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
|
||||||
"checksum lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "13416eee745b087c22934f35f1f24da22da41ba2a5ce197143d168ce055cc58d"
|
"checksum lmdb-zero 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "13416eee745b087c22934f35f1f24da22da41ba2a5ce197143d168ce055cc58d"
|
||||||
"checksum lock_api 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "775751a3e69bde4df9b38dd00a1b5d6ac13791e4223d4a0506577f0dd27cfb7a"
|
"checksum lock_api 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "775751a3e69bde4df9b38dd00a1b5d6ac13791e4223d4a0506577f0dd27cfb7a"
|
||||||
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
|
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
|
||||||
"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
|
"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
|
||||||
|
"checksum log-mdc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7"
|
||||||
|
"checksum log4rs 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25e0fc8737a634116a2deb38d821e4400ed16ce9dcb0d628a978d399260f5902"
|
||||||
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
|
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
|
||||||
"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43"
|
"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43"
|
||||||
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
|
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
|
||||||
|
@ -2873,6 +2947,7 @@ dependencies = [
|
||||||
"checksum openssl 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5af9e83eb3c51ee806387d26a43056f3246d865844caa6dd704d2ba7e831c264"
|
"checksum openssl 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "5af9e83eb3c51ee806387d26a43056f3246d865844caa6dd704d2ba7e831c264"
|
||||||
"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
|
"checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
|
||||||
"checksum openssl-sys 0.9.38 (registry+https://github.com/rust-lang/crates.io-index)" = "ff3d1b390ab1b9700f682ad95a30dc9c0f40dd212ca57266012cfc678b0e365a"
|
"checksum openssl-sys 0.9.38 (registry+https://github.com/rust-lang/crates.io-index)" = "ff3d1b390ab1b9700f682ad95a30dc9c0f40dd212ca57266012cfc678b0e365a"
|
||||||
|
"checksum ordered-float 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7eb5259643245d3f292c7a146b2df53bba24d7eab159410e648eb73dc164669d"
|
||||||
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
|
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
|
||||||
"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
|
"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
|
||||||
"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"
|
"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"
|
||||||
|
@ -2925,16 +3000,15 @@ dependencies = [
|
||||||
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
|
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
|
||||||
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
||||||
"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
|
"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
|
||||||
|
"checksum serde-value 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "52903ade2290cbd61a0937a66a268f26cebf246e3ddd7964a8babb297111fb0d"
|
||||||
"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
|
"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
|
||||||
"checksum serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "43344e7ce05d0d8280c5940cabb4964bea626aa58b1ec0e8c73fa2a8512a38ce"
|
"checksum serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "43344e7ce05d0d8280c5940cabb4964bea626aa58b1ec0e8c73fa2a8512a38ce"
|
||||||
"checksum serde_urlencoded 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aaed41d9fb1e2f587201b863356590c90c1157495d811430a0c0325fe8169650"
|
"checksum serde_urlencoded 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aaed41d9fb1e2f587201b863356590c90c1157495d811430a0c0325fe8169650"
|
||||||
|
"checksum serde_yaml 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "980f5cc4e92ba24ba471b6a7b3df17d5b7b2c16fb1900a1aa0a79062320b16c4"
|
||||||
"checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0"
|
"checksum sha2 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9eb6be24e4c23a84d7184280d2722f7f2731fcdd4a9d886efbfe4413e4847ea0"
|
||||||
"checksum signal-hook 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7ca1f1c0ed6c8beaab713ad902c041e4f09d06e1b4bb74c5fc553c078ed0110"
|
"checksum signal-hook 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7ca1f1c0ed6c8beaab713ad902c041e4f09d06e1b4bb74c5fc553c078ed0110"
|
||||||
"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac"
|
"checksum siphasher 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0b8de496cf83d4ed58b6be86c3a275b8602f6ffe98d3024a869e124147a9a3ac"
|
||||||
"checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"
|
"checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"
|
||||||
"checksum slog 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09e4f1d0276ac7d448d98db16f0dab0220c24d4842d88ce4dad4b306fa234f1d"
|
|
||||||
"checksum slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e544d16c6b230d84c866662fe55e31aacfca6ae71e6fc49ae9a311cb379bfc2f"
|
|
||||||
"checksum slog-term 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5951a808c40f419922ee014c15b6ae1cd34d963538b57d8a4778b9ca3fff1e0b"
|
|
||||||
"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
|
"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
|
||||||
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
|
"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
|
||||||
"checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970"
|
"checksum string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00caf261d6f90f588f8450b8e1230fa0d5be49ee6140fdfbcb55335aff350970"
|
||||||
|
@ -2943,7 +3017,6 @@ dependencies = [
|
||||||
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
|
"checksum syn 0.14.9 (registry+https://github.com/rust-lang/crates.io-index)" = "261ae9ecaa397c42b960649561949d69311f08eeaea86a65696e6e46517cf741"
|
||||||
"checksum syn 0.15.12 (registry+https://github.com/rust-lang/crates.io-index)" = "34ab9797e47d24cb76b8dc4d24ff36807018c7cc549c4cba050b068be0c586b0"
|
"checksum syn 0.15.12 (registry+https://github.com/rust-lang/crates.io-index)" = "34ab9797e47d24cb76b8dc4d24ff36807018c7cc549c4cba050b068be0c586b0"
|
||||||
"checksum synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85bb9b7550d063ea184027c9b8c20ac167cd36d3e06b3a40bceb9d746dc1a7b7"
|
"checksum synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85bb9b7550d063ea184027c9b8c20ac167cd36d3e06b3a40bceb9d746dc1a7b7"
|
||||||
"checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
|
|
||||||
"checksum tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)" = "83b0d14b53dbfd62681933fadd651e815f99e6084b649e049ab99296e05ab3de"
|
"checksum tar 0.4.17 (registry+https://github.com/rust-lang/crates.io-index)" = "83b0d14b53dbfd62681933fadd651e815f99e6084b649e049ab99296e05ab3de"
|
||||||
"checksum tempfile 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "55c1195ef8513f3273d55ff59fe5da6940287a0d7a98331254397f464833675b"
|
"checksum tempfile 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "55c1195ef8513f3273d55ff59fe5da6940287a0d7a98331254397f464833675b"
|
||||||
"checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561"
|
"checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561"
|
||||||
|
@ -2951,6 +3024,7 @@ dependencies = [
|
||||||
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
|
"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
|
||||||
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
|
||||||
"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
|
"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
|
||||||
|
"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
|
||||||
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
|
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
|
||||||
"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b"
|
"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b"
|
||||||
"checksum tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6e93c78d23cc61aa245a8acd2c4a79c4d7fa7fb5c3ca90d5737029f043a84895"
|
"checksum tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6e93c78d23cc61aa245a8acd2c4a79c4d7fa7fb5c3ca90d5737029f043a84895"
|
||||||
|
@ -2970,7 +3044,9 @@ dependencies = [
|
||||||
"checksum tokio-udp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "da941144b816d0dcda4db3a1ba87596e4df5e860a72b70783fe435891f80601c"
|
"checksum tokio-udp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "da941144b816d0dcda4db3a1ba87596e4df5e860a72b70783fe435891f80601c"
|
||||||
"checksum tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22e3aa6d1fcc19e635418dc0a30ab5bd65d347973d6f43f1a37bf8d9d1335fc9"
|
"checksum tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22e3aa6d1fcc19e635418dc0a30ab5bd65d347973d6f43f1a37bf8d9d1335fc9"
|
||||||
"checksum toml 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "4a2ecc31b0351ea18b3fe11274b8db6e4d82bce861bbb22e6dbed40417902c65"
|
"checksum toml 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "4a2ecc31b0351ea18b3fe11274b8db6e4d82bce861bbb22e6dbed40417902c65"
|
||||||
|
"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079"
|
||||||
"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"
|
"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382"
|
||||||
|
"checksum typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "653be63c80a3296da5551e1bfd2cca35227e13cdd08c6668903ae2f4f77aa1f6"
|
||||||
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
|
"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169"
|
||||||
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
|
"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
|
||||||
"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"
|
"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"
|
||||||
|
@ -2981,6 +3057,7 @@ dependencies = [
|
||||||
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
|
"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
|
||||||
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
|
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
|
||||||
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
|
||||||
|
"checksum unsafe-any 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f30360d7979f5e9c6e6cea48af192ea8fab4afb3cf72597154b8f08935bc9c7f"
|
||||||
"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f"
|
"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f"
|
||||||
"checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6"
|
"checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6"
|
||||||
"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4"
|
"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4"
|
||||||
|
@ -3005,4 +3082,5 @@ dependencies = [
|
||||||
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
|
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
|
||||||
"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c"
|
"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c"
|
||||||
"checksum xi-unicode 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "12ea8eda4b1eb72f02d148402e23832d56a33f55d8c1b2d5bcdde91d79d47cb1"
|
"checksum xi-unicode 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "12ea8eda4b1eb72f02d148402e23832d56a33f55d8c1b2d5bcdde91d79d47cb1"
|
||||||
|
"checksum yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "95acf0db5515d07da9965ec0e0ba6cc2d825e2caeb7303b66ca441729801254e"
|
||||||
"checksum zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "36b9e08fb518a65cf7e08a1e482573eb87a2f4f8c6619316612a3c1f162fe822"
|
"checksum zip 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "36b9e08fb518a65cf7e08a1e482573eb87a2f4f8c6619316612a3c1f162fe822"
|
||||||
|
|
|
@ -24,7 +24,7 @@ humansize = "1.1.0"
|
||||||
daemonize = "0.3"
|
daemonize = "0.3"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
term = "0.5"
|
term = "0.5"
|
||||||
|
|
||||||
grin_api = { path = "./api" }
|
grin_api = { path = "./api" }
|
||||||
|
|
|
@ -15,7 +15,7 @@ ring = "0.13"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
tokio = "0.1.7"
|
tokio = "0.1.7"
|
||||||
tokio-core = "0.1.17"
|
tokio-core = "0.1.17"
|
||||||
tokio-tcp = "0.1"
|
tokio-tcp = "0.1"
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use failure::ResultExt;
|
use failure::ResultExt;
|
||||||
use futures::future::ok;
|
use futures::future::ok;
|
||||||
|
@ -36,7 +37,6 @@ use types::*;
|
||||||
use url::form_urlencoded;
|
use url::form_urlencoded;
|
||||||
use util;
|
use util;
|
||||||
use util::secp::pedersen::Commitment;
|
use util::secp::pedersen::Commitment;
|
||||||
use util::LOGGER;
|
|
||||||
use web::*;
|
use web::*;
|
||||||
|
|
||||||
// All handlers use `Weak` references instead of `Arc` to avoid cycles that
|
// All handlers use `Weak` references instead of `Arc` to avoid cycles that
|
||||||
|
@ -205,12 +205,8 @@ impl OutputHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"outputs_block_batch: {}-{}, {:?}, {:?}",
|
"outputs_block_batch: {}-{}, {:?}, {:?}",
|
||||||
start_height,
|
start_height, end_height, commitments, include_rp,
|
||||||
end_height,
|
|
||||||
commitments,
|
|
||||||
include_rp,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut return_vec = vec![];
|
let mut return_vec = vec![];
|
||||||
|
@ -695,7 +691,7 @@ struct PoolInfoHandler {
|
||||||
impl Handler for PoolInfoHandler {
|
impl Handler for PoolInfoHandler {
|
||||||
fn get(&self, _req: Request<Body>) -> ResponseFuture {
|
fn get(&self, _req: Request<Body>) -> ResponseFuture {
|
||||||
let pool_arc = w(&self.tx_pool);
|
let pool_arc = w(&self.tx_pool);
|
||||||
let pool = pool_arc.read().unwrap();
|
let pool = pool_arc.read();
|
||||||
|
|
||||||
json_response(&PoolInfo {
|
json_response(&PoolInfo {
|
||||||
pool_size: pool.total_size(),
|
pool_size: pool.total_size(),
|
||||||
|
@ -744,7 +740,6 @@ impl PoolPushHandler {
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
};
|
};
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
|
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
tx.inputs().len(),
|
tx.inputs().len(),
|
||||||
|
@ -753,12 +748,12 @@ impl PoolPushHandler {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Push to tx pool.
|
// Push to tx pool.
|
||||||
let mut tx_pool = pool_arc.write().unwrap();
|
let mut tx_pool = pool_arc.write();
|
||||||
let header = tx_pool.blockchain.chain_head().unwrap();
|
let header = tx_pool.blockchain.chain_head().unwrap();
|
||||||
tx_pool
|
tx_pool
|
||||||
.add_to_pool(source, tx, !fluff, &header)
|
.add_to_pool(source, tx, !fluff, &header)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
error!(LOGGER, "update_pool: failed with error: {:?}", e);
|
error!("update_pool: failed with error: {:?}", e);
|
||||||
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
|
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
|
||||||
})
|
})
|
||||||
}),
|
}),
|
||||||
|
@ -807,7 +802,7 @@ pub fn start_rest_apis(
|
||||||
router.add_middleware(basic_auth_middleware);
|
router.add_middleware(basic_auth_middleware);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(LOGGER, "Starting HTTP API server at {}.", addr);
|
info!("Starting HTTP API server at {}.", addr);
|
||||||
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
|
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
|
||||||
apis.start(socket_addr, router, tls_config).is_ok()
|
apis.start(socket_addr, router, tls_config).is_ok()
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate futures;
|
extern crate futures;
|
||||||
extern crate http;
|
extern crate http;
|
||||||
extern crate hyper_rustls;
|
extern crate hyper_rustls;
|
||||||
|
|
|
@ -33,7 +33,6 @@ use std::sync::Arc;
|
||||||
use std::{io, thread};
|
use std::{io, thread};
|
||||||
use tokio_rustls::ServerConfigExt;
|
use tokio_rustls::ServerConfigExt;
|
||||||
use tokio_tcp;
|
use tokio_tcp;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Errors that can be returned by an ApiEndpoint implementation.
|
/// Errors that can be returned by an ApiEndpoint implementation.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -243,13 +242,10 @@ impl ApiServer {
|
||||||
// TODO re-enable stop after investigation
|
// TODO re-enable stop after investigation
|
||||||
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
|
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
|
||||||
//tx.send(()).expect("Failed to stop API server");
|
//tx.send(()).expect("Failed to stop API server");
|
||||||
info!(LOGGER, "API server has been stoped");
|
info!("API server has been stoped");
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
error!(
|
error!("Can't stop API server, it's not running or doesn't spport stop operation");
|
||||||
LOGGER,
|
|
||||||
"Can't stop API server, it's not running or doesn't spport stop operation"
|
|
||||||
);
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,7 +259,7 @@ impl Handler for LoggingMiddleware {
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
mut handlers: Box<Iterator<Item = HandlerObj>>,
|
mut handlers: Box<Iterator<Item = HandlerObj>>,
|
||||||
) -> ResponseFuture {
|
) -> ResponseFuture {
|
||||||
debug!(LOGGER, "REST call: {} {}", req.method(), req.uri().path());
|
debug!("REST call: {} {}", req.method(), req.uri().path());
|
||||||
handlers.next().unwrap().call(req, handlers)
|
handlers.next().unwrap().call(req, handlers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ lmdb-zero = "0.4.4"
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
failure_derive = "0.1"
|
failure_derive = "0.1"
|
||||||
croaring = "0.3"
|
croaring = "0.3"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
chrono = "0.4.4"
|
chrono = "0.4.4"
|
||||||
|
|
|
@ -18,8 +18,9 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use lmdb;
|
use lmdb;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
|
@ -37,7 +38,6 @@ use store;
|
||||||
use txhashset;
|
use txhashset;
|
||||||
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
||||||
use util::secp::pedersen::{Commitment, RangeProof};
|
use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
||||||
pub const MAX_ORPHAN_SIZE: usize = 200;
|
pub const MAX_ORPHAN_SIZE: usize = 200;
|
||||||
|
@ -75,7 +75,7 @@ impl OrphanBlockPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
let orphans = self.orphans.read().unwrap();
|
let orphans = self.orphans.read();
|
||||||
orphans.len()
|
orphans.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,8 +84,8 @@ impl OrphanBlockPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add(&self, orphan: Orphan) {
|
fn add(&self, orphan: Orphan) {
|
||||||
let mut orphans = self.orphans.write().unwrap();
|
let mut orphans = self.orphans.write();
|
||||||
let mut height_idx = self.height_idx.write().unwrap();
|
let mut height_idx = self.height_idx.write();
|
||||||
{
|
{
|
||||||
let height_hashes = height_idx
|
let height_hashes = height_idx
|
||||||
.entry(orphan.block.header.height)
|
.entry(orphan.block.header.height)
|
||||||
|
@ -125,15 +125,15 @@ impl OrphanBlockPool {
|
||||||
/// Get an orphan from the pool indexed by the hash of its parent, removing
|
/// Get an orphan from the pool indexed by the hash of its parent, removing
|
||||||
/// it at the same time, preventing clone
|
/// it at the same time, preventing clone
|
||||||
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
|
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
|
||||||
let mut orphans = self.orphans.write().unwrap();
|
let mut orphans = self.orphans.write();
|
||||||
let mut height_idx = self.height_idx.write().unwrap();
|
let mut height_idx = self.height_idx.write();
|
||||||
height_idx
|
height_idx
|
||||||
.remove(height)
|
.remove(height)
|
||||||
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
|
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn contains(&self, hash: &Hash) -> bool {
|
pub fn contains(&self, hash: &Hash) -> bool {
|
||||||
let orphans = self.orphans.read().unwrap();
|
let orphans = self.orphans.read();
|
||||||
orphans.contains_key(hash)
|
orphans.contains_key(hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,6 @@ impl Chain {
|
||||||
|
|
||||||
let head = store.head()?;
|
let head = store.head()?;
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Chain init: {} @ {} [{}]",
|
"Chain init: {} @ {} [{}]",
|
||||||
head.total_difficulty.to_num(),
|
head.total_difficulty.to_num(),
|
||||||
head.height,
|
head.height,
|
||||||
|
@ -221,7 +220,7 @@ impl Chain {
|
||||||
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
|
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
|
||||||
let maybe_new_head: Result<Option<Tip>, Error>;
|
let maybe_new_head: Result<Option<Tip>, Error>;
|
||||||
{
|
{
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
||||||
|
|
||||||
|
@ -235,7 +234,7 @@ impl Chain {
|
||||||
let add_to_hash_cache = |hash: Hash| {
|
let add_to_hash_cache = |hash: Hash| {
|
||||||
// only add to hash cache below if block is definitively accepted
|
// only add to hash cache below if block is definitively accepted
|
||||||
// or rejected
|
// or rejected
|
||||||
let mut cache = self.block_hashes_cache.write().unwrap();
|
let mut cache = self.block_hashes_cache.write();
|
||||||
cache.insert(hash, true);
|
cache.insert(hash, true);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -260,7 +259,6 @@ impl Chain {
|
||||||
&self.orphans.add(orphan);
|
&self.orphans.add(orphan);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"process_block: orphan: {:?}, # orphans {}{}",
|
"process_block: orphan: {:?}, # orphans {}{}",
|
||||||
block_hash,
|
block_hash,
|
||||||
self.orphans.len(),
|
self.orphans.len(),
|
||||||
|
@ -274,7 +272,6 @@ impl Chain {
|
||||||
}
|
}
|
||||||
ErrorKind::Unfit(ref msg) => {
|
ErrorKind::Unfit(ref msg) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Block {} at {} is unfit at this time: {}",
|
"Block {} at {} is unfit at this time: {}",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.height,
|
b.header.height,
|
||||||
|
@ -284,7 +281,6 @@ impl Chain {
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"Rejected block {} at {}: {:?}",
|
"Rejected block {} at {}: {:?}",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.height,
|
b.header.height,
|
||||||
|
@ -299,7 +295,7 @@ impl Chain {
|
||||||
|
|
||||||
/// Process a block header received during "header first" propagation.
|
/// Process a block header received during "header first" propagation.
|
||||||
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
|
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
||||||
pipe::process_block_header(bh, &mut ctx)?;
|
pipe::process_block_header(bh, &mut ctx)?;
|
||||||
|
@ -315,7 +311,7 @@ impl Chain {
|
||||||
headers: &Vec<BlockHeader>,
|
headers: &Vec<BlockHeader>,
|
||||||
opts: Options,
|
opts: Options,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
|
||||||
|
|
||||||
|
@ -359,7 +355,6 @@ impl Chain {
|
||||||
// Is there an orphan in our orphans that we can now process?
|
// Is there an orphan in our orphans that we can now process?
|
||||||
loop {
|
loop {
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"check_orphans: at {}, # orphans {}",
|
"check_orphans: at {}, # orphans {}",
|
||||||
height,
|
height,
|
||||||
self.orphans.len(),
|
self.orphans.len(),
|
||||||
|
@ -372,7 +367,6 @@ impl Chain {
|
||||||
let orphans_len = orphans.len();
|
let orphans_len = orphans.len();
|
||||||
for (i, orphan) in orphans.into_iter().enumerate() {
|
for (i, orphan) in orphans.into_iter().enumerate() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"check_orphans: get block {} at {}{}",
|
"check_orphans: get block {} at {}{}",
|
||||||
orphan.block.hash(),
|
orphan.block.hash(),
|
||||||
height,
|
height,
|
||||||
|
@ -401,7 +395,6 @@ impl Chain {
|
||||||
|
|
||||||
if initial_height != height {
|
if initial_height != height {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
|
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
|
||||||
height - initial_height,
|
height - initial_height,
|
||||||
initial_height,
|
initial_height,
|
||||||
|
@ -417,7 +410,7 @@ impl Chain {
|
||||||
/// current chain state, specifically the current winning (valid, most
|
/// current chain state, specifically the current winning (valid, most
|
||||||
/// work) fork.
|
/// work) fork.
|
||||||
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
|
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let res = txhashset.is_unspent(output_ref);
|
let res = txhashset.is_unspent(output_ref);
|
||||||
match res {
|
match res {
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
|
@ -427,7 +420,7 @@ impl Chain {
|
||||||
|
|
||||||
/// Validate the tx against the current UTXO set.
|
/// Validate the tx against the current UTXO set.
|
||||||
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||||
let txhashset = self.txhashset.read().unwrap();
|
let txhashset = self.txhashset.read();
|
||||||
txhashset::utxo_view(&txhashset, |utxo| {
|
txhashset::utxo_view(&txhashset, |utxo| {
|
||||||
utxo.validate_tx(tx)?;
|
utxo.validate_tx(tx)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -443,7 +436,7 @@ impl Chain {
|
||||||
/// that has not yet sufficiently matured.
|
/// that has not yet sufficiently matured.
|
||||||
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
||||||
let height = self.next_block_height()?;
|
let height = self.next_block_height()?;
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
extension.verify_coinbase_maturity(&tx.inputs(), height)?;
|
extension.verify_coinbase_maturity(&tx.inputs(), height)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -470,7 +463,7 @@ impl Chain {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
|
|
||||||
// Now create an extension from the txhashset and validate against the
|
// Now create an extension from the txhashset and validate against the
|
||||||
// latest block header. Rewind the extension to the specified header to
|
// latest block header. Rewind the extension to the specified header to
|
||||||
|
@ -485,7 +478,7 @@ impl Chain {
|
||||||
/// Sets the txhashset roots on a brand new block by applying the block on
|
/// Sets the txhashset roots on a brand new block by applying the block on
|
||||||
/// the current txhashset state.
|
/// the current txhashset state.
|
||||||
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
|
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let (prev_root, roots, sizes) =
|
let (prev_root, roots, sizes) =
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
if is_fork {
|
if is_fork {
|
||||||
|
@ -526,7 +519,7 @@ impl Chain {
|
||||||
output: &OutputIdentifier,
|
output: &OutputIdentifier,
|
||||||
block_header: &BlockHeader,
|
block_header: &BlockHeader,
|
||||||
) -> Result<MerkleProof, Error> {
|
) -> Result<MerkleProof, Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
|
|
||||||
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
|
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
extension.rewind(&block_header)?;
|
extension.rewind(&block_header)?;
|
||||||
|
@ -539,13 +532,13 @@ impl Chain {
|
||||||
/// Return a merkle proof valid for the current output pmmr state at the
|
/// Return a merkle proof valid for the current output pmmr state at the
|
||||||
/// given pos
|
/// given pos
|
||||||
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
|
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.merkle_proof(commit)
|
txhashset.merkle_proof(commit)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns current txhashset roots
|
/// Returns current txhashset roots
|
||||||
pub fn get_txhashset_roots(&self) -> TxHashSetRoots {
|
pub fn get_txhashset_roots(&self) -> TxHashSetRoots {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.roots()
|
txhashset.roots()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -560,7 +553,7 @@ impl Chain {
|
||||||
// to rewind after receiving the txhashset zip.
|
// to rewind after receiving the txhashset zip.
|
||||||
let header = self.get_block_header(&h)?;
|
let header = self.get_block_header(&h)?;
|
||||||
{
|
{
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
extension.rewind(&header)?;
|
extension.rewind(&header)?;
|
||||||
extension.snapshot()?;
|
extension.snapshot()?;
|
||||||
|
@ -588,7 +581,6 @@ impl Chain {
|
||||||
txhashset: &txhashset::TxHashSet,
|
txhashset: &txhashset::TxHashSet,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
|
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -605,8 +597,8 @@ impl Chain {
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"chain: validate_kernel_history: validated kernel root on {} headers",
|
||||||
"chain: validate_kernel_history: validated kernel root on {} headers", count,
|
count,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -617,7 +609,7 @@ impl Chain {
|
||||||
/// have an MMR we can safely rewind based on the headers received from a peer.
|
/// have an MMR we can safely rewind based on the headers received from a peer.
|
||||||
/// TODO - think about how to optimize this.
|
/// TODO - think about how to optimize this.
|
||||||
pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> {
|
pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let mut batch = self.store.batch()?;
|
let mut batch = self.store.batch()?;
|
||||||
txhashset::sync_extending(&mut txhashset, &mut batch, |extension| {
|
txhashset::sync_extending(&mut txhashset, &mut batch, |extension| {
|
||||||
extension.rebuild(head, &self.genesis)?;
|
extension.rebuild(head, &self.genesis)?;
|
||||||
|
@ -681,10 +673,7 @@ impl Chain {
|
||||||
self.validate_kernel_history(&header, &txhashset)?;
|
self.validate_kernel_history(&header, &txhashset)?;
|
||||||
|
|
||||||
// all good, prepare a new batch and update all the required records
|
// all good, prepare a new batch and update all the required records
|
||||||
debug!(
|
debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
|
||||||
LOGGER,
|
|
||||||
"chain: txhashset_write: rewinding a 2nd time (writeable)"
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut batch = self.store.batch()?;
|
let mut batch = self.store.batch()?;
|
||||||
|
|
||||||
|
@ -708,10 +697,7 @@ impl Chain {
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug!(
|
debug!("chain: txhashset_write: finished validating and rebuilding");
|
||||||
LOGGER,
|
|
||||||
"chain: txhashset_write: finished validating and rebuilding"
|
|
||||||
);
|
|
||||||
|
|
||||||
status.on_save();
|
status.on_save();
|
||||||
|
|
||||||
|
@ -726,21 +712,15 @@ impl Chain {
|
||||||
// Commit all the changes to the db.
|
// Commit all the changes to the db.
|
||||||
batch.commit()?;
|
batch.commit()?;
|
||||||
|
|
||||||
debug!(
|
debug!("chain: txhashset_write: finished committing the batch (head etc.)");
|
||||||
LOGGER,
|
|
||||||
"chain: txhashset_write: finished committing the batch (head etc.)"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Replace the chain txhashset with the newly built one.
|
// Replace the chain txhashset with the newly built one.
|
||||||
{
|
{
|
||||||
let mut txhashset_ref = self.txhashset.write().unwrap();
|
let mut txhashset_ref = self.txhashset.write();
|
||||||
*txhashset_ref = txhashset;
|
*txhashset_ref = txhashset;
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!("chain: txhashset_write: replaced our txhashset with the new one");
|
||||||
LOGGER,
|
|
||||||
"chain: txhashset_write: replaced our txhashset with the new one"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check for any orphan blocks and process them based on the new chain state.
|
// Check for any orphan blocks and process them based on the new chain state.
|
||||||
self.check_orphans(header.height + 1);
|
self.check_orphans(header.height + 1);
|
||||||
|
@ -749,33 +729,11 @@ impl Chain {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Triggers chain compaction, cleaning up some unnecessary historical
|
fn compact_txhashset(&self) -> Result<(), Error> {
|
||||||
/// information. We introduce a chain depth called horizon, which is
|
debug!("Starting blockchain compaction.");
|
||||||
/// typically in the range of a couple days. Before that horizon, this
|
|
||||||
/// method will:
|
|
||||||
///
|
|
||||||
/// * compact the MMRs data files and flushing the corresponding remove logs
|
|
||||||
/// * delete old records from the k/v store (older blocks, indexes, etc.)
|
|
||||||
///
|
|
||||||
/// This operation can be resource intensive and takes some time to execute.
|
|
||||||
/// Meanwhile, the chain will not be able to accept new blocks. It should
|
|
||||||
/// therefore be called judiciously.
|
|
||||||
pub fn compact(&self) -> Result<(), Error> {
|
|
||||||
if self.archive_mode {
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"Blockchain compaction disabled, node running in archive mode."
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(LOGGER, "Starting blockchain compaction.");
|
|
||||||
// Compact the txhashset via the extension.
|
|
||||||
{
|
{
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.compact()?;
|
txhashset.compact()?;
|
||||||
|
|
||||||
// print out useful debug info after compaction
|
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
extension.dump_output_pmmr();
|
extension.dump_output_pmmr();
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -784,23 +742,33 @@ impl Chain {
|
||||||
|
|
||||||
// Now check we can still successfully validate the chain state after
|
// Now check we can still successfully validate the chain state after
|
||||||
// compacting, shouldn't be necessary once all of this is well-oiled
|
// compacting, shouldn't be necessary once all of this is well-oiled
|
||||||
debug!(LOGGER, "Validating state after compaction.");
|
debug!("Validating state after compaction.");
|
||||||
self.validate(true)?;
|
self.validate(true)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
// we need to be careful here in testing as 20 blocks is not that long
|
/// Cleanup old blocks from the db.
|
||||||
// in wall clock time
|
/// Determine the cutoff height from the horizon and the current block height.
|
||||||
let horizon = global::cut_through_horizon() as u64;
|
/// *Only* runs if we are not in archive mode.
|
||||||
let head = self.head()?;
|
fn compact_blocks_db(&self) -> Result<(), Error> {
|
||||||
|
if self.archive_mode {
|
||||||
if head.height <= horizon {
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let horizon = global::cut_through_horizon() as u64;
|
||||||
|
let head = self.head()?;
|
||||||
|
|
||||||
|
let cutoff = head.height.saturating_sub(horizon);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"chain: compact_blocks_db: head height: {}, horizon: {}, cutoff: {}",
|
||||||
"Compaction remove blocks older than {}.",
|
head.height, horizon, cutoff,
|
||||||
head.height - horizon
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if cutoff == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
let batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
let mut current = batch.get_header_by_height(head.height - horizon - 1)?;
|
let mut current = batch.get_header_by_height(head.height - horizon - 1)?;
|
||||||
|
@ -830,25 +798,40 @@ impl Chain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
batch.commit()?;
|
batch.commit()?;
|
||||||
debug!(LOGGER, "Compaction removed {} blocks, done.", count);
|
debug!("chain: compact_blocks_db: removed {} blocks.", count);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Triggers chain compaction.
|
||||||
|
///
|
||||||
|
/// * compacts the txhashset based on current prune_list
|
||||||
|
/// * removes historical blocks and associated data from the db (unless archive mode)
|
||||||
|
///
|
||||||
|
pub fn compact(&self) -> Result<(), Error> {
|
||||||
|
self.compact_txhashset()?;
|
||||||
|
|
||||||
|
if !self.archive_mode {
|
||||||
|
self.compact_blocks_db()?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns the last n nodes inserted into the output sum tree
|
/// returns the last n nodes inserted into the output sum tree
|
||||||
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
|
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.last_n_output(distance)
|
txhashset.last_n_output(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for rangeproofs
|
/// as above, for rangeproofs
|
||||||
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
|
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.last_n_rangeproof(distance)
|
txhashset.last_n_rangeproof(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for kernels
|
/// as above, for kernels
|
||||||
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
|
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
txhashset.last_n_kernel(distance)
|
txhashset.last_n_kernel(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -858,7 +841,7 @@ impl Chain {
|
||||||
start_index: u64,
|
start_index: u64,
|
||||||
max: u64,
|
max: u64,
|
||||||
) -> Result<(u64, u64, Vec<Output>), Error> {
|
) -> Result<(u64, u64, Vec<Output>), Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let max_index = txhashset.highest_output_insertion_index();
|
let max_index = txhashset.highest_output_insertion_index();
|
||||||
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
|
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
|
||||||
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
|
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
|
||||||
|
@ -945,7 +928,7 @@ impl Chain {
|
||||||
&self,
|
&self,
|
||||||
output_ref: &OutputIdentifier,
|
output_ref: &OutputIdentifier,
|
||||||
) -> Result<BlockHeader, Error> {
|
) -> Result<BlockHeader, Error> {
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
let mut txhashset = self.txhashset.write();
|
||||||
let (_, pos) = txhashset.is_unspent(output_ref)?;
|
let (_, pos) = txhashset.is_unspent(output_ref)?;
|
||||||
let mut min = 1;
|
let mut min = 1;
|
||||||
let mut max = {
|
let mut max = {
|
||||||
|
@ -1051,7 +1034,6 @@ fn setup_head(
|
||||||
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
|
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
|
||||||
{
|
{
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"chain: init: building (missing) block sums for {} @ {}",
|
"chain: init: building (missing) block sums for {} @ {}",
|
||||||
header.height,
|
header.height,
|
||||||
header.hash()
|
header.hash()
|
||||||
|
@ -1072,7 +1054,6 @@ fn setup_head(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"chain: init: rewinding and validating before we start... {} at {}",
|
"chain: init: rewinding and validating before we start... {} at {}",
|
||||||
header.hash(),
|
header.hash(),
|
||||||
header.height,
|
header.height,
|
||||||
|
@ -1109,7 +1090,7 @@ fn setup_head(
|
||||||
// Save the block_sums to the db for use later.
|
// Save the block_sums to the db for use later.
|
||||||
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
|
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
|
||||||
|
|
||||||
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
|
info!("chain: init: saved genesis: {:?}", genesis.hash());
|
||||||
}
|
}
|
||||||
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
|
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
|
||||||
};
|
};
|
||||||
|
|
|
@ -30,7 +30,7 @@ extern crate serde;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
extern crate failure;
|
extern crate failure;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
use chrono::Duration;
|
use chrono::Duration;
|
||||||
|
@ -34,7 +35,6 @@ use grin_store;
|
||||||
use store;
|
use store;
|
||||||
use txhashset;
|
use txhashset;
|
||||||
use types::{Options, Tip};
|
use types::{Options, Tip};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Contextual information required to process a new block and either reject or
|
/// Contextual information required to process a new block and either reject or
|
||||||
/// accept it.
|
/// accept it.
|
||||||
|
@ -70,7 +70,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
|
||||||
// spend resources reading the full block when its header is invalid
|
// spend resources reading the full block when its header is invalid
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
|
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.height,
|
b.header.height,
|
||||||
|
@ -167,7 +166,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"pipe: process_block: {} at {} is valid, save and append.",
|
"pipe: process_block: {} at {} is valid, save and append.",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.height,
|
b.header.height,
|
||||||
|
@ -189,7 +187,6 @@ pub fn sync_block_headers(
|
||||||
) -> Result<Option<Tip>, Error> {
|
) -> Result<Option<Tip>, Error> {
|
||||||
if let Some(header) = headers.first() {
|
if let Some(header) = headers.first() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"pipe: sync_block_headers: {} headers from {} at {}",
|
"pipe: sync_block_headers: {} headers from {} at {}",
|
||||||
headers.len(),
|
headers.len(),
|
||||||
header.hash(),
|
header.hash(),
|
||||||
|
@ -250,7 +247,6 @@ pub fn sync_block_headers(
|
||||||
/// it.
|
/// it.
|
||||||
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"pipe: process_block_header: {} at {}",
|
"pipe: process_block_header: {} at {}",
|
||||||
header.hash(),
|
header.hash(),
|
||||||
header.height,
|
header.height,
|
||||||
|
@ -288,7 +284,7 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
|
||||||
/// Keeps duplicates from the network in check.
|
/// Keeps duplicates from the network in check.
|
||||||
/// Checks against the cache of recently processed block hashes.
|
/// Checks against the cache of recently processed block hashes.
|
||||||
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
let mut cache = ctx.block_hashes_cache.write().unwrap();
|
let mut cache = ctx.block_hashes_cache.write();
|
||||||
if cache.contains_key(&header.hash()) {
|
if cache.contains_key(&header.hash()) {
|
||||||
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
|
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
|
||||||
}
|
}
|
||||||
|
@ -355,8 +351,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
// check version, enforces scheduled hard fork
|
// check version, enforces scheduled hard fork
|
||||||
if !consensus::valid_header_version(header.height, header.version) {
|
if !consensus::valid_header_version(header.height, header.version) {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"Invalid block header version received ({}), maybe update Grin?",
|
||||||
"Invalid block header version received ({}), maybe update Grin?", header.version
|
header.version
|
||||||
);
|
);
|
||||||
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
|
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
|
||||||
}
|
}
|
||||||
|
@ -377,8 +373,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
let edge_bits = header.pow.edge_bits();
|
let edge_bits = header.pow.edge_bits();
|
||||||
if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
|
if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"pipe: error validating header with cuckoo edge_bits {}",
|
||||||
"pipe: error validating header with cuckoo edge_bits {}", edge_bits
|
edge_bits
|
||||||
);
|
);
|
||||||
return Err(ErrorKind::InvalidPow.into());
|
return Err(ErrorKind::InvalidPow.into());
|
||||||
}
|
}
|
||||||
|
@ -433,7 +429,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
|
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
|
||||||
if target_difficulty != next_header_info.difficulty {
|
if target_difficulty != next_header_info.difficulty {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"validate_header: header target difficulty {} != {}",
|
"validate_header: header target difficulty {} != {}",
|
||||||
target_difficulty.to_num(),
|
target_difficulty.to_num(),
|
||||||
next_header_info.difficulty.to_num()
|
next_header_info.difficulty.to_num()
|
||||||
|
@ -553,8 +548,8 @@ fn update_head(b: &Block, ctx: &BlockContext) -> Result<Option<Tip>, Error> {
|
||||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"pipe: head updated to {} at {}",
|
||||||
"pipe: head updated to {} at {}", tip.last_block_h, tip.height
|
tip.last_block_h, tip.height
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Some(tip))
|
Ok(Some(tip))
|
||||||
|
@ -574,7 +569,7 @@ fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
|
||||||
batch
|
batch
|
||||||
.save_sync_head(&tip)
|
.save_sync_head(&tip)
|
||||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
|
||||||
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
|
debug!("sync head {} @ {}", bh.hash(), bh.height);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -588,8 +583,8 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
|
||||||
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"pipe: header_head updated to {} at {}",
|
||||||
"pipe: header_head updated to {} at {}", tip.last_block_h, tip.height
|
tip.last_block_h, tip.height
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Some(tip))
|
Ok(Some(tip))
|
||||||
|
@ -621,7 +616,6 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
|
||||||
let forked_header = ext.batch.get_block_header(¤t)?;
|
let forked_header = ext.batch.get_block_header(¤t)?;
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
|
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
|
||||||
forked_header.height,
|
forked_header.height,
|
||||||
forked_header.hash(),
|
forked_header.hash(),
|
||||||
|
@ -632,11 +626,7 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
|
||||||
// Rewind the txhashset state back to the block where we forked from the most work chain.
|
// Rewind the txhashset state back to the block where we forked from the most work chain.
|
||||||
ext.rewind(&forked_header)?;
|
ext.rewind(&forked_header)?;
|
||||||
|
|
||||||
trace!(
|
trace!("rewind_and_apply_fork: blocks on fork: {:?}", fork_hashes,);
|
||||||
LOGGER,
|
|
||||||
"rewind_and_apply_fork: blocks on fork: {:?}",
|
|
||||||
fork_hashes,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Now re-apply all blocks on this fork.
|
// Now re-apply all blocks on this fork.
|
||||||
for (_, h) in fork_hashes {
|
for (_, h) in fork_hashes {
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
//! Implements storage primitives required by the chain
|
//! Implements storage primitives required by the chain
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use croaring::Bitmap;
|
use croaring::Bitmap;
|
||||||
use lmdb;
|
use lmdb;
|
||||||
|
@ -96,7 +97,7 @@ impl ChainStore {
|
||||||
|
|
||||||
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
||||||
{
|
{
|
||||||
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
|
let mut block_sums_cache = self.block_sums_cache.write();
|
||||||
|
|
||||||
// cache hit - return the value from the cache
|
// cache hit - return the value from the cache
|
||||||
if let Some(block_sums) = block_sums_cache.get_mut(h) {
|
if let Some(block_sums) = block_sums_cache.get_mut(h) {
|
||||||
|
@ -112,7 +113,7 @@ impl ChainStore {
|
||||||
// cache miss - so adding to the cache for next time
|
// cache miss - so adding to the cache for next time
|
||||||
if let Ok(block_sums) = block_sums {
|
if let Ok(block_sums) = block_sums {
|
||||||
{
|
{
|
||||||
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
|
let mut block_sums_cache = self.block_sums_cache.write();
|
||||||
block_sums_cache.insert(*h, block_sums.clone());
|
block_sums_cache.insert(*h, block_sums.clone());
|
||||||
}
|
}
|
||||||
Ok(block_sums)
|
Ok(block_sums)
|
||||||
|
@ -123,7 +124,7 @@ impl ChainStore {
|
||||||
|
|
||||||
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write();
|
||||||
|
|
||||||
// cache hit - return the value from the cache
|
// cache hit - return the value from the cache
|
||||||
if let Some(header) = header_cache.get_mut(h) {
|
if let Some(header) = header_cache.get_mut(h) {
|
||||||
|
@ -140,7 +141,7 @@ impl ChainStore {
|
||||||
// cache miss - so adding to the cache for next time
|
// cache miss - so adding to the cache for next time
|
||||||
if let Ok(header) = header {
|
if let Ok(header) = header {
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write();
|
||||||
header_cache.insert(*h, header.clone());
|
header_cache.insert(*h, header.clone());
|
||||||
}
|
}
|
||||||
Ok(header)
|
Ok(header)
|
||||||
|
@ -310,7 +311,7 @@ impl<'a> Batch<'a> {
|
||||||
let hash = header.hash();
|
let hash = header.hash();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write();
|
||||||
header_cache.insert(hash, header.clone());
|
header_cache.insert(hash, header.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,7 +351,7 @@ impl<'a> Batch<'a> {
|
||||||
|
|
||||||
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write();
|
||||||
|
|
||||||
// cache hit - return the value from the cache
|
// cache hit - return the value from the cache
|
||||||
if let Some(header) = header_cache.get_mut(h) {
|
if let Some(header) = header_cache.get_mut(h) {
|
||||||
|
@ -367,7 +368,7 @@ impl<'a> Batch<'a> {
|
||||||
// cache miss - so adding to the cache for next time
|
// cache miss - so adding to the cache for next time
|
||||||
if let Ok(header) = header {
|
if let Ok(header) = header {
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write();
|
||||||
header_cache.insert(*h, header.clone());
|
header_cache.insert(*h, header.clone());
|
||||||
}
|
}
|
||||||
Ok(header)
|
Ok(header)
|
||||||
|
@ -390,7 +391,7 @@ impl<'a> Batch<'a> {
|
||||||
|
|
||||||
pub fn save_block_sums(&self, h: &Hash, sums: &BlockSums) -> Result<(), Error> {
|
pub fn save_block_sums(&self, h: &Hash, sums: &BlockSums) -> Result<(), Error> {
|
||||||
{
|
{
|
||||||
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
|
let mut block_sums_cache = self.block_sums_cache.write();
|
||||||
block_sums_cache.insert(*h, sums.clone());
|
block_sums_cache.insert(*h, sums.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +401,7 @@ impl<'a> Batch<'a> {
|
||||||
|
|
||||||
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
||||||
{
|
{
|
||||||
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
|
let mut block_sums_cache = self.block_sums_cache.write();
|
||||||
|
|
||||||
// cache hit - return the value from the cache
|
// cache hit - return the value from the cache
|
||||||
if let Some(block_sums) = block_sums_cache.get_mut(h) {
|
if let Some(block_sums) = block_sums_cache.get_mut(h) {
|
||||||
|
@ -416,7 +417,7 @@ impl<'a> Batch<'a> {
|
||||||
// cache miss - so adding to the cache for next time
|
// cache miss - so adding to the cache for next time
|
||||||
if let Ok(block_sums) = block_sums {
|
if let Ok(block_sums) = block_sums {
|
||||||
{
|
{
|
||||||
let mut block_sums_cache = self.block_sums_cache.write().unwrap();
|
let mut block_sums_cache = self.block_sums_cache.write();
|
||||||
block_sums_cache.insert(*h, block_sums.clone());
|
block_sums_cache.insert(*h, block_sums.clone());
|
||||||
}
|
}
|
||||||
Ok(block_sums)
|
Ok(block_sums)
|
||||||
|
@ -511,7 +512,7 @@ impl<'a> Batch<'a> {
|
||||||
self.save_block_input_bitmap(&block.hash(), &bitmap)?;
|
self.save_block_input_bitmap(&block.hash(), &bitmap)?;
|
||||||
|
|
||||||
// Finally cache it locally for use later.
|
// Finally cache it locally for use later.
|
||||||
let mut cache = self.block_input_bitmap_cache.write().unwrap();
|
let mut cache = self.block_input_bitmap_cache.write();
|
||||||
cache.insert(block.hash(), bitmap.serialize());
|
cache.insert(block.hash(), bitmap.serialize());
|
||||||
|
|
||||||
Ok(bitmap)
|
Ok(bitmap)
|
||||||
|
@ -519,7 +520,7 @@ impl<'a> Batch<'a> {
|
||||||
|
|
||||||
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
|
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
|
||||||
{
|
{
|
||||||
let mut cache = self.block_input_bitmap_cache.write().unwrap();
|
let mut cache = self.block_input_bitmap_cache.write();
|
||||||
|
|
||||||
// cache hit - return the value from the cache
|
// cache hit - return the value from the cache
|
||||||
if let Some(bytes) = cache.get_mut(bh) {
|
if let Some(bytes) = cache.get_mut(bh) {
|
||||||
|
|
|
@ -40,7 +40,7 @@ use grin_store::types::prune_noop;
|
||||||
use store::{Batch, ChainStore};
|
use store::{Batch, ChainStore};
|
||||||
use txhashset::{RewindableKernelView, UTXOView};
|
use txhashset::{RewindableKernelView, UTXOView};
|
||||||
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
||||||
use util::{file, secp_static, zip, LOGGER};
|
use util::{file, secp_static, zip};
|
||||||
|
|
||||||
const HEADERHASHSET_SUBDIR: &'static str = "header";
|
const HEADERHASHSET_SUBDIR: &'static str = "header";
|
||||||
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
||||||
|
@ -328,7 +328,7 @@ where
|
||||||
// we explicitly rewind the extension.
|
// we explicitly rewind the extension.
|
||||||
let header = batch.head_header()?;
|
let header = batch.head_header()?;
|
||||||
|
|
||||||
trace!(LOGGER, "Starting new txhashset (readonly) extension.");
|
trace!("Starting new txhashset (readonly) extension.");
|
||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut extension = Extension::new(trees, &batch, header);
|
let mut extension = Extension::new(trees, &batch, header);
|
||||||
|
@ -340,14 +340,14 @@ where
|
||||||
inner(&mut extension)
|
inner(&mut extension)
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!(LOGGER, "Rollbacking txhashset (readonly) extension.");
|
trace!("Rollbacking txhashset (readonly) extension.");
|
||||||
|
|
||||||
trees.header_pmmr_h.backend.discard();
|
trees.header_pmmr_h.backend.discard();
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.output_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
trees.kernel_pmmr_h.backend.discard();
|
trees.kernel_pmmr_h.backend.discard();
|
||||||
|
|
||||||
trace!(LOGGER, "TxHashSet (readonly) extension done.");
|
trace!("TxHashSet (readonly) extension done.");
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
@ -423,7 +423,7 @@ where
|
||||||
// index saving can be undone
|
// index saving can be undone
|
||||||
let child_batch = batch.child()?;
|
let child_batch = batch.child()?;
|
||||||
{
|
{
|
||||||
trace!(LOGGER, "Starting new txhashset extension.");
|
trace!("Starting new txhashset extension.");
|
||||||
|
|
||||||
// TODO - header_mmr may be out ahead via the header_head
|
// TODO - header_mmr may be out ahead via the header_head
|
||||||
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
|
||||||
|
@ -436,10 +436,7 @@ where
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
debug!("Error returned, discarding txhashset extension: {}", e);
|
||||||
LOGGER,
|
|
||||||
"Error returned, discarding txhashset extension: {}", e
|
|
||||||
);
|
|
||||||
trees.header_pmmr_h.backend.discard();
|
trees.header_pmmr_h.backend.discard();
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.output_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
|
@ -448,13 +445,13 @@ where
|
||||||
}
|
}
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
if rollback {
|
if rollback {
|
||||||
trace!(LOGGER, "Rollbacking txhashset extension. sizes {:?}", sizes);
|
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
|
||||||
trees.header_pmmr_h.backend.discard();
|
trees.header_pmmr_h.backend.discard();
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.output_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
trees.kernel_pmmr_h.backend.discard();
|
trees.kernel_pmmr_h.backend.discard();
|
||||||
} else {
|
} else {
|
||||||
trace!(LOGGER, "Committing txhashset extension. sizes {:?}", sizes);
|
trace!("Committing txhashset extension. sizes {:?}", sizes);
|
||||||
child_batch.commit()?;
|
child_batch.commit()?;
|
||||||
trees.header_pmmr_h.backend.sync()?;
|
trees.header_pmmr_h.backend.sync()?;
|
||||||
trees.output_pmmr_h.backend.sync()?;
|
trees.output_pmmr_h.backend.sync()?;
|
||||||
|
@ -466,7 +463,7 @@ where
|
||||||
trees.kernel_pmmr_h.last_pos = sizes.3;
|
trees.kernel_pmmr_h.last_pos = sizes.3;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(LOGGER, "TxHashSet extension done.");
|
trace!("TxHashSet extension done.");
|
||||||
Ok(r)
|
Ok(r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -497,7 +494,7 @@ where
|
||||||
// index saving can be undone
|
// index saving can be undone
|
||||||
let child_batch = batch.child()?;
|
let child_batch = batch.child()?;
|
||||||
{
|
{
|
||||||
trace!(LOGGER, "Starting new txhashset sync_head extension.");
|
trace!("Starting new txhashset sync_head extension.");
|
||||||
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
|
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
|
||||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
|
||||||
|
|
||||||
|
@ -510,31 +507,23 @@ where
|
||||||
match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Error returned, discarding txhashset sync_head extension: {}",
|
||||||
"Error returned, discarding txhashset sync_head extension: {}", e
|
e
|
||||||
);
|
);
|
||||||
trees.sync_pmmr_h.backend.discard();
|
trees.sync_pmmr_h.backend.discard();
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
if rollback {
|
if rollback {
|
||||||
trace!(
|
trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
|
||||||
LOGGER,
|
|
||||||
"Rollbacking txhashset sync_head extension. size {:?}",
|
|
||||||
size
|
|
||||||
);
|
|
||||||
trees.sync_pmmr_h.backend.discard();
|
trees.sync_pmmr_h.backend.discard();
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!("Committing txhashset sync_head extension. size {:?}", size);
|
||||||
LOGGER,
|
|
||||||
"Committing txhashset sync_head extension. size {:?}",
|
|
||||||
size
|
|
||||||
);
|
|
||||||
child_batch.commit()?;
|
child_batch.commit()?;
|
||||||
trees.sync_pmmr_h.backend.sync()?;
|
trees.sync_pmmr_h.backend.sync()?;
|
||||||
trees.sync_pmmr_h.last_pos = size;
|
trees.sync_pmmr_h.last_pos = size;
|
||||||
}
|
}
|
||||||
trace!(LOGGER, "TxHashSet sync_head extension done.");
|
trace!("TxHashSet sync_head extension done.");
|
||||||
Ok(r)
|
Ok(r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -564,7 +553,7 @@ where
|
||||||
// index saving can be undone
|
// index saving can be undone
|
||||||
let child_batch = batch.child()?;
|
let child_batch = batch.child()?;
|
||||||
{
|
{
|
||||||
trace!(LOGGER, "Starting new txhashset header extension.");
|
trace!("Starting new txhashset header extension.");
|
||||||
let pmmr = DBPMMR::at(
|
let pmmr = DBPMMR::at(
|
||||||
&mut trees.header_pmmr_h.backend,
|
&mut trees.header_pmmr_h.backend,
|
||||||
trees.header_pmmr_h.last_pos,
|
trees.header_pmmr_h.last_pos,
|
||||||
|
@ -579,31 +568,23 @@ where
|
||||||
match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Error returned, discarding txhashset header extension: {}",
|
||||||
"Error returned, discarding txhashset header extension: {}", e
|
e
|
||||||
);
|
);
|
||||||
trees.header_pmmr_h.backend.discard();
|
trees.header_pmmr_h.backend.discard();
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
if rollback {
|
if rollback {
|
||||||
trace!(
|
trace!("Rollbacking txhashset header extension. size {:?}", size);
|
||||||
LOGGER,
|
|
||||||
"Rollbacking txhashset header extension. size {:?}",
|
|
||||||
size
|
|
||||||
);
|
|
||||||
trees.header_pmmr_h.backend.discard();
|
trees.header_pmmr_h.backend.discard();
|
||||||
} else {
|
} else {
|
||||||
trace!(
|
trace!("Committing txhashset header extension. size {:?}", size);
|
||||||
LOGGER,
|
|
||||||
"Committing txhashset header extension. size {:?}",
|
|
||||||
size
|
|
||||||
);
|
|
||||||
child_batch.commit()?;
|
child_batch.commit()?;
|
||||||
trees.header_pmmr_h.backend.sync()?;
|
trees.header_pmmr_h.backend.sync()?;
|
||||||
trees.header_pmmr_h.last_pos = size;
|
trees.header_pmmr_h.last_pos = size;
|
||||||
}
|
}
|
||||||
trace!(LOGGER, "TxHashSet header extension done.");
|
trace!("TxHashSet header extension done.");
|
||||||
Ok(r)
|
Ok(r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -643,9 +624,7 @@ impl<'a> HeaderExtension<'a> {
|
||||||
/// This may be either the header MMR or the sync MMR depending on the
|
/// This may be either the header MMR or the sync MMR depending on the
|
||||||
/// extension.
|
/// extension.
|
||||||
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
self.pmmr
|
self.pmmr.push(&header).map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
.push(header.clone())
|
|
||||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
|
||||||
self.header = header.clone();
|
self.header = header.clone();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -654,7 +633,6 @@ impl<'a> HeaderExtension<'a> {
|
||||||
/// Note the close relationship between header height and insertion index.
|
/// Note the close relationship between header height and insertion index.
|
||||||
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Rewind header extension to {} at {}",
|
"Rewind header extension to {} at {}",
|
||||||
header.hash(),
|
header.hash(),
|
||||||
header.height
|
header.height
|
||||||
|
@ -675,7 +653,7 @@ impl<'a> HeaderExtension<'a> {
|
||||||
/// Used when rebuilding the header MMR by reapplying all headers
|
/// Used when rebuilding the header MMR by reapplying all headers
|
||||||
/// including the genesis block header.
|
/// including the genesis block header.
|
||||||
pub fn truncate(&mut self) -> Result<(), Error> {
|
pub fn truncate(&mut self) -> Result<(), Error> {
|
||||||
debug!(LOGGER, "Truncating header extension.");
|
debug!("Truncating header extension.");
|
||||||
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
|
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -689,7 +667,6 @@ impl<'a> HeaderExtension<'a> {
|
||||||
/// Requires *all* header hashes to be iterated over in ascending order.
|
/// Requires *all* header hashes to be iterated over in ascending order.
|
||||||
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
|
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"About to rebuild header extension from {:?} to {:?}.",
|
"About to rebuild header extension from {:?} to {:?}.",
|
||||||
genesis.hash(),
|
genesis.hash(),
|
||||||
head.last_block_h,
|
head.last_block_h,
|
||||||
|
@ -712,7 +689,6 @@ impl<'a> HeaderExtension<'a> {
|
||||||
|
|
||||||
if header_hashes.len() > 0 {
|
if header_hashes.len() > 0 {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Re-applying {} headers to extension, from {:?} to {:?}.",
|
"Re-applying {} headers to extension, from {:?} to {:?}.",
|
||||||
header_hashes.len(),
|
header_hashes.len(),
|
||||||
header_hashes.first().unwrap(),
|
header_hashes.first().unwrap(),
|
||||||
|
@ -983,7 +959,7 @@ impl<'a> Extension<'a> {
|
||||||
|
|
||||||
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
self.header_pmmr
|
self.header_pmmr
|
||||||
.push(header.clone())
|
.push(&header)
|
||||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -995,10 +971,7 @@ impl<'a> Extension<'a> {
|
||||||
/// We need the hash of each sibling pos from the pos up to the peak
|
/// We need the hash of each sibling pos from the pos up to the peak
|
||||||
/// including the sibling leaf node which may have been removed.
|
/// including the sibling leaf node which may have been removed.
|
||||||
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
|
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
|
||||||
debug!(
|
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
|
||||||
LOGGER,
|
|
||||||
"txhashset: merkle_proof: output: {:?}", output.commit,
|
|
||||||
);
|
|
||||||
// then calculate the Merkle Proof based on the known pos
|
// then calculate the Merkle Proof based on the known pos
|
||||||
let pos = self.batch.get_output_pos(&output.commit)?;
|
let pos = self.batch.get_output_pos(&output.commit)?;
|
||||||
let merkle_proof = self
|
let merkle_proof = self
|
||||||
|
@ -1027,12 +1000,7 @@ impl<'a> Extension<'a> {
|
||||||
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
|
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
|
||||||
/// and last kernel pos of that block.
|
/// and last kernel pos of that block.
|
||||||
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
debug!(
|
debug!("Rewind to header {} at {}", header.hash(), header.height,);
|
||||||
LOGGER,
|
|
||||||
"Rewind to header {} at {}",
|
|
||||||
header.hash(),
|
|
||||||
header.height,
|
|
||||||
);
|
|
||||||
|
|
||||||
// We need to build bitmaps of added and removed output positions
|
// We need to build bitmaps of added and removed output positions
|
||||||
// so we can correctly rewind all operations applied to the output MMR
|
// so we can correctly rewind all operations applied to the output MMR
|
||||||
|
@ -1067,11 +1035,8 @@ impl<'a> Extension<'a> {
|
||||||
rewind_rm_pos: &Bitmap,
|
rewind_rm_pos: &Bitmap,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
|
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
|
||||||
header_pos,
|
header_pos, output_pos, kernel_pos,
|
||||||
output_pos,
|
|
||||||
kernel_pos,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
self.header_pmmr
|
self.header_pmmr
|
||||||
|
@ -1191,7 +1156,6 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
|
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
|
||||||
self.header_pmmr.unpruned_size(),
|
self.header_pmmr.unpruned_size(),
|
||||||
self.output_pmmr.unpruned_size(),
|
self.output_pmmr.unpruned_size(),
|
||||||
|
@ -1270,22 +1234,22 @@ impl<'a> Extension<'a> {
|
||||||
/// Dumps the output MMR.
|
/// Dumps the output MMR.
|
||||||
/// We use this after compacting for visual confirmation that it worked.
|
/// We use this after compacting for visual confirmation that it worked.
|
||||||
pub fn dump_output_pmmr(&self) {
|
pub fn dump_output_pmmr(&self) {
|
||||||
debug!(LOGGER, "-- outputs --");
|
debug!("-- outputs --");
|
||||||
self.output_pmmr.dump_from_file(false);
|
self.output_pmmr.dump_from_file(false);
|
||||||
debug!(LOGGER, "--");
|
debug!("--");
|
||||||
self.output_pmmr.dump_stats();
|
self.output_pmmr.dump_stats();
|
||||||
debug!(LOGGER, "-- end of outputs --");
|
debug!("-- end of outputs --");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
||||||
/// version only prints the Output tree.
|
/// version only prints the Output tree.
|
||||||
pub fn dump(&self, short: bool) {
|
pub fn dump(&self, short: bool) {
|
||||||
debug!(LOGGER, "-- outputs --");
|
debug!("-- outputs --");
|
||||||
self.output_pmmr.dump(short);
|
self.output_pmmr.dump(short);
|
||||||
if !short {
|
if !short {
|
||||||
debug!(LOGGER, "-- range proofs --");
|
debug!("-- range proofs --");
|
||||||
self.rproof_pmmr.dump(short);
|
self.rproof_pmmr.dump(short);
|
||||||
debug!(LOGGER, "-- kernels --");
|
debug!("-- kernels --");
|
||||||
self.kernel_pmmr.dump(short);
|
self.kernel_pmmr.dump(short);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1318,7 +1282,6 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
|
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
|
||||||
kern_count,
|
kern_count,
|
||||||
self.kernel_pmmr.unpruned_size(),
|
self.kernel_pmmr.unpruned_size(),
|
||||||
|
@ -1353,8 +1316,8 @@ impl<'a> Extension<'a> {
|
||||||
commits.clear();
|
commits.clear();
|
||||||
proofs.clear();
|
proofs.clear();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"txhashset: verify_rangeproofs: verified {} rangeproofs",
|
||||||
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
|
proof_count,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1370,13 +1333,12 @@ impl<'a> Extension<'a> {
|
||||||
commits.clear();
|
commits.clear();
|
||||||
proofs.clear();
|
proofs.clear();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"txhashset: verify_rangeproofs: verified {} rangeproofs",
|
||||||
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
|
proof_count,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
|
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
|
||||||
proof_count,
|
proof_count,
|
||||||
self.rproof_pmmr.unpruned_size(),
|
self.rproof_pmmr.unpruned_size(),
|
||||||
|
@ -1452,10 +1414,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
||||||
|
|
||||||
// Removing unexpected directories if needed
|
// Removing unexpected directories if needed
|
||||||
if !dir_difference.is_empty() {
|
if !dir_difference.is_empty() {
|
||||||
debug!(
|
debug!("Unexpected folder(s) found in txhashset folder, removing.");
|
||||||
LOGGER,
|
|
||||||
"Unexpected folder(s) found in txhashset folder, removing."
|
|
||||||
);
|
|
||||||
for diff in dir_difference {
|
for diff in dir_difference {
|
||||||
let diff_path = txhashset_path.join(diff);
|
let diff_path = txhashset_path.join(diff);
|
||||||
file::delete(diff_path)?;
|
file::delete(diff_path)?;
|
||||||
|
@ -1492,7 +1451,6 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
||||||
.collect();
|
.collect();
|
||||||
if !difference.is_empty() {
|
if !difference.is_empty() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
|
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
|
||||||
&subdirectory_path
|
&subdirectory_path
|
||||||
);
|
);
|
||||||
|
@ -1520,10 +1478,8 @@ pub fn input_pos_to_rewind(
|
||||||
|
|
||||||
if head_header.height < block_header.height {
|
if head_header.height < block_header.height {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"input_pos_to_rewind: {} < {}, nothing to rewind",
|
"input_pos_to_rewind: {} < {}, nothing to rewind",
|
||||||
head_header.height,
|
head_header.height, block_header.height
|
||||||
block_header.height
|
|
||||||
);
|
);
|
||||||
return Ok(Bitmap::create());
|
return Ok(Bitmap::create());
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,8 @@ extern crate rand;
|
||||||
|
|
||||||
use chrono::Duration;
|
use chrono::Duration;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain::types::NoopAdapter;
|
use chain::types::NoopAdapter;
|
||||||
use chain::Chain;
|
use chain::Chain;
|
||||||
|
|
|
@ -23,7 +23,8 @@ extern crate rand;
|
||||||
|
|
||||||
use chrono::Duration;
|
use chrono::Duration;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain::types::NoopAdapter;
|
use chain::types::NoopAdapter;
|
||||||
use chain::Chain;
|
use chain::Chain;
|
||||||
|
|
|
@ -18,12 +18,14 @@ extern crate grin_chain as chain;
|
||||||
extern crate grin_core as core;
|
extern crate grin_core as core;
|
||||||
extern crate grin_keychain as keychain;
|
extern crate grin_keychain as keychain;
|
||||||
extern crate grin_store as store;
|
extern crate grin_store as store;
|
||||||
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
extern crate grin_wallet as wallet;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
|
|
||||||
use chrono::Duration;
|
use chrono::Duration;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain::types::NoopAdapter;
|
use chain::types::NoopAdapter;
|
||||||
use chain::ErrorKind;
|
use chain::ErrorKind;
|
||||||
|
|
|
@ -168,7 +168,7 @@ fn comments() -> HashMap<String, String> {
|
||||||
retval.insert(
|
retval.insert(
|
||||||
"host".to_string(),
|
"host".to_string(),
|
||||||
"
|
"
|
||||||
#The interface on which to listen.
|
#The interface on which to listen.
|
||||||
#0.0.0.0 will listen on all interfaces, allowing others to interact
|
#0.0.0.0 will listen on all interfaces, allowing others to interact
|
||||||
#127.0.0.1 will listen on the local machine only
|
#127.0.0.1 will listen on the local machine only
|
||||||
".to_string(),
|
".to_string(),
|
||||||
|
@ -213,7 +213,6 @@ fn comments() -> HashMap<String, String> {
|
||||||
#peer_min_preferred_count = 8
|
#peer_min_preferred_count = 8
|
||||||
|
|
||||||
# 7 = Bit flags for FULL_NODE
|
# 7 = Bit flags for FULL_NODE
|
||||||
# 6 = Bit flags for FAST_SYNC_NODE
|
|
||||||
#This structure needs to be changed internally, to make it more configurable
|
#This structure needs to be changed internally, to make it more configurable
|
||||||
".to_string(),
|
".to_string(),
|
||||||
);
|
);
|
||||||
|
@ -367,7 +366,7 @@ fn comments() -> HashMap<String, String> {
|
||||||
retval.insert(
|
retval.insert(
|
||||||
"stdout_log_level".to_string(),
|
"stdout_log_level".to_string(),
|
||||||
"
|
"
|
||||||
#log level for stdout: Critical, Error, Warning, Info, Debug, Trace
|
#log level for stdout: Error, Warning, Info, Debug, Trace
|
||||||
".to_string(),
|
".to_string(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -381,7 +380,7 @@ fn comments() -> HashMap<String, String> {
|
||||||
retval.insert(
|
retval.insert(
|
||||||
"file_log_level".to_string(),
|
"file_log_level".to_string(),
|
||||||
"
|
"
|
||||||
#log level for file: Critical, Error, Warning, Info, Debug, Trace
|
#log level for file: Error, Warning, Info, Debug, Trace
|
||||||
".to_string(),
|
".to_string(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -399,6 +398,14 @@ fn comments() -> HashMap<String, String> {
|
||||||
".to_string(),
|
".to_string(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
retval.insert(
|
||||||
|
"log_max_size".to_string(),
|
||||||
|
"
|
||||||
|
#maximum log file size in bytes before performing log rotation
|
||||||
|
#comment it to disable log rotation
|
||||||
|
".to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
retval
|
retval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,7 @@ fn check_api_secret(api_secret_path: &PathBuf) -> Result<(), ConfigError> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check that the api secret file exists and is valid
|
/// Check that the api secret file exists and is valid
|
||||||
pub fn check_api_secret_file() -> Result<(), ConfigError> {
|
fn check_api_secret_file() -> Result<(), ConfigError> {
|
||||||
let grin_path = get_grin_path()?;
|
let grin_path = get_grin_path()?;
|
||||||
let mut api_secret_path = grin_path.clone();
|
let mut api_secret_path = grin_path.clone();
|
||||||
api_secret_path.push(API_SECRET_FILE_NAME);
|
api_secret_path.push(API_SECRET_FILE_NAME);
|
||||||
|
@ -233,8 +233,7 @@ impl GlobalConfig {
|
||||||
file.read_to_string(&mut contents)?;
|
file.read_to_string(&mut contents)?;
|
||||||
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
|
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
|
||||||
match decoded {
|
match decoded {
|
||||||
Ok(mut gc) => {
|
Ok(gc) => {
|
||||||
gc.server.validation_check();
|
|
||||||
self.members = Some(gc);
|
self.members = Some(gc);
|
||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,5 +35,5 @@ mod comments;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use config::{check_api_secret_file, initial_setup_server, initial_setup_wallet};
|
pub use config::{initial_setup_server, initial_setup_wallet};
|
||||||
pub use types::{ConfigError, ConfigMembers, GlobalConfig, GlobalWalletConfig};
|
pub use types::{ConfigError, ConfigMembers, GlobalConfig, GlobalWalletConfig};
|
||||||
|
|
|
@ -20,7 +20,7 @@ rand = "0.5"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
siphasher = "0.2"
|
siphasher = "0.2"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
chrono = "0.4.4"
|
chrono = "0.4.4"
|
||||||
|
|
||||||
grin_keychain = { path = "../keychain" }
|
grin_keychain = { path = "../keychain" }
|
||||||
|
|
|
@ -297,7 +297,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Factor by which the secondary proof of work difficulty will be adjusted
|
/// Factor by which the secondary proof of work difficulty will be adjusted
|
||||||
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
|
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
|
||||||
// Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction)
|
// Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction)
|
||||||
let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64;
|
let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64;
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,8 @@ use std::collections::HashSet;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use consensus::{self, reward, REWARD};
|
use consensus::{self, reward, REWARD};
|
||||||
use core::committed::{self, Committed};
|
use core::committed::{self, Committed};
|
||||||
|
@ -35,7 +36,7 @@ use global;
|
||||||
use keychain::{self, BlindingFactor};
|
use keychain::{self, BlindingFactor};
|
||||||
use pow::{Difficulty, Proof, ProofOfWork};
|
use pow::{Difficulty, Proof, ProofOfWork};
|
||||||
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
||||||
use util::{secp, static_secp_instance, LOGGER};
|
use util::{secp, static_secp_instance};
|
||||||
|
|
||||||
/// Errors thrown by Block validation
|
/// Errors thrown by Block validation
|
||||||
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
|
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
|
||||||
|
@ -285,7 +286,7 @@ impl BlockHeader {
|
||||||
|
|
||||||
/// Total difficulty accumulated by the proof of work on this header
|
/// Total difficulty accumulated by the proof of work on this header
|
||||||
pub fn total_difficulty(&self) -> Difficulty {
|
pub fn total_difficulty(&self) -> Difficulty {
|
||||||
self.pow.total_difficulty.clone()
|
self.pow.total_difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The "overage" to use when verifying the kernel sums.
|
/// The "overage" to use when verifying the kernel sums.
|
||||||
|
@ -361,10 +362,7 @@ impl Readable for Block {
|
||||||
body.validate_read(true)
|
body.validate_read(true)
|
||||||
.map_err(|_| ser::Error::CorruptedData)?;
|
.map_err(|_| ser::Error::CorruptedData)?;
|
||||||
|
|
||||||
Ok(Block {
|
Ok(Block { header, body })
|
||||||
header: header,
|
|
||||||
body: body,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,16 +418,40 @@ impl Block {
|
||||||
Ok(block)
|
Ok(block)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extract tx data from this block as a single aggregate tx.
|
||||||
|
pub fn aggregate_transaction(
|
||||||
|
&self,
|
||||||
|
prev_kernel_offset: BlindingFactor,
|
||||||
|
) -> Result<Option<Transaction>, Error> {
|
||||||
|
let inputs = self.inputs().iter().cloned().collect();
|
||||||
|
let outputs = self
|
||||||
|
.outputs()
|
||||||
|
.iter()
|
||||||
|
.filter(|x| !x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
let kernels = self
|
||||||
|
.kernels()
|
||||||
|
.iter()
|
||||||
|
.filter(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let tx = if kernels.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let tx = Transaction::new(inputs, outputs, kernels)
|
||||||
|
.with_offset(self.block_kernel_offset(prev_kernel_offset)?);
|
||||||
|
Some(tx)
|
||||||
|
};
|
||||||
|
Ok(tx)
|
||||||
|
}
|
||||||
|
|
||||||
/// Hydrate a block from a compact block.
|
/// Hydrate a block from a compact block.
|
||||||
/// Note: caller must validate the block themselves, we do not validate it
|
/// Note: caller must validate the block themselves, we do not validate it
|
||||||
/// here.
|
/// here.
|
||||||
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
|
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
|
||||||
trace!(
|
trace!("block: hydrate_from: {}, {} txs", cb.hash(), txs.len(),);
|
||||||
LOGGER,
|
|
||||||
"block: hydrate_from: {}, {} txs",
|
|
||||||
cb.hash(),
|
|
||||||
txs.len(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let header = cb.header.clone();
|
let header = cb.header.clone();
|
||||||
|
|
||||||
|
@ -469,7 +491,7 @@ impl Block {
|
||||||
/// Build a new empty block from a specified header
|
/// Build a new empty block from a specified header
|
||||||
pub fn with_header(header: BlockHeader) -> Block {
|
pub fn with_header(header: BlockHeader) -> Block {
|
||||||
Block {
|
Block {
|
||||||
header: header,
|
header,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -596,6 +618,23 @@ impl Block {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn block_kernel_offset(
|
||||||
|
&self,
|
||||||
|
prev_kernel_offset: BlindingFactor,
|
||||||
|
) -> Result<BlindingFactor, Error> {
|
||||||
|
let offset = if self.header.total_kernel_offset() == prev_kernel_offset {
|
||||||
|
// special case when the sum hasn't changed (typically an empty block),
|
||||||
|
// zero isn't a valid private key but it's a valid blinding factor
|
||||||
|
BlindingFactor::zero()
|
||||||
|
} else {
|
||||||
|
committed::sum_kernel_offsets(
|
||||||
|
vec![self.header.total_kernel_offset()],
|
||||||
|
vec![prev_kernel_offset],
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
Ok(offset)
|
||||||
|
}
|
||||||
|
|
||||||
/// Validates all the elements in a block that can be checked without
|
/// Validates all the elements in a block that can be checked without
|
||||||
/// additional data. Includes commitment sums and kernels, Merkle
|
/// additional data. Includes commitment sums and kernels, Merkle
|
||||||
/// trees, reward, etc.
|
/// trees, reward, etc.
|
||||||
|
@ -603,7 +642,7 @@ impl Block {
|
||||||
&self,
|
&self,
|
||||||
prev_kernel_offset: &BlindingFactor,
|
prev_kernel_offset: &BlindingFactor,
|
||||||
verifier: Arc<RwLock<VerifierCache>>,
|
verifier: Arc<RwLock<VerifierCache>>,
|
||||||
) -> Result<(Commitment), Error> {
|
) -> Result<Commitment, Error> {
|
||||||
self.body.validate(true, verifier)?;
|
self.body.validate(true, verifier)?;
|
||||||
|
|
||||||
self.verify_kernel_lock_heights()?;
|
self.verify_kernel_lock_heights()?;
|
||||||
|
@ -611,19 +650,10 @@ impl Block {
|
||||||
|
|
||||||
// take the kernel offset for this block (block offset minus previous) and
|
// take the kernel offset for this block (block offset minus previous) and
|
||||||
// verify.body.outputs and kernel sums
|
// verify.body.outputs and kernel sums
|
||||||
let block_kernel_offset = if self.header.total_kernel_offset() == prev_kernel_offset.clone()
|
let (_utxo_sum, kernel_sum) = self.verify_kernel_sums(
|
||||||
{
|
self.header.overage(),
|
||||||
// special case when the sum hasn't changed (typically an empty block),
|
self.block_kernel_offset(*prev_kernel_offset)?,
|
||||||
// zero isn't a valid private key but it's a valid blinding factor
|
)?;
|
||||||
BlindingFactor::zero()
|
|
||||||
} else {
|
|
||||||
committed::sum_kernel_offsets(
|
|
||||||
vec![self.header.total_kernel_offset()],
|
|
||||||
vec![prev_kernel_offset.clone()],
|
|
||||||
)?
|
|
||||||
};
|
|
||||||
let (_utxo_sum, kernel_sum) =
|
|
||||||
self.verify_kernel_sums(self.header.overage(), block_kernel_offset)?;
|
|
||||||
|
|
||||||
Ok(kernel_sum)
|
Ok(kernel_sum)
|
||||||
}
|
}
|
||||||
|
@ -648,7 +678,7 @@ impl Block {
|
||||||
|
|
||||||
{
|
{
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let over_commit = secp.commit_value(reward(self.total_fees()))?;
|
let over_commit = secp.commit_value(reward(self.total_fees()))?;
|
||||||
|
|
||||||
let out_adjust_sum = secp.commit_sum(
|
let out_adjust_sum = secp.commit_sum(
|
||||||
|
|
|
@ -53,8 +53,8 @@ impl Default for BlockSums {
|
||||||
fn default() -> BlockSums {
|
fn default() -> BlockSums {
|
||||||
let zero_commit = secp_static::commit_to_zero_value();
|
let zero_commit = secp_static::commit_to_zero_value();
|
||||||
BlockSums {
|
BlockSums {
|
||||||
utxo_sum: zero_commit.clone(),
|
utxo_sum: zero_commit,
|
||||||
kernel_sum: zero_commit.clone(),
|
kernel_sum: zero_commit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ pub trait Committed {
|
||||||
// commit to zero built from the offset
|
// commit to zero built from the offset
|
||||||
let kernel_sum_plus_offset = {
|
let kernel_sum_plus_offset = {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let mut commits = vec![kernel_sum];
|
let mut commits = vec![kernel_sum];
|
||||||
if *offset != BlindingFactor::zero() {
|
if *offset != BlindingFactor::zero() {
|
||||||
let key = offset.secret_key(&secp)?;
|
let key = offset.secret_key(&secp)?;
|
||||||
|
@ -90,7 +90,7 @@ pub trait Committed {
|
||||||
if overage != 0 {
|
if overage != 0 {
|
||||||
let over_commit = {
|
let over_commit = {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
|
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
|
||||||
secp.commit_value(overage_abs).unwrap()
|
secp.commit_value(overage_abs).unwrap()
|
||||||
};
|
};
|
||||||
|
@ -144,7 +144,7 @@ pub fn sum_commits(
|
||||||
positive.retain(|x| *x != zero_commit);
|
positive.retain(|x| *x != zero_commit);
|
||||||
negative.retain(|x| *x != zero_commit);
|
negative.retain(|x| *x != zero_commit);
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
Ok(secp.commit_sum(positive, negative)?)
|
Ok(secp.commit_sum(positive, negative)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +156,7 @@ pub fn sum_kernel_offsets(
|
||||||
negative: Vec<BlindingFactor>,
|
negative: Vec<BlindingFactor>,
|
||||||
) -> Result<BlindingFactor, Error> {
|
) -> Result<BlindingFactor, Error> {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let positive = to_secrets(positive, &secp);
|
let positive = to_secrets(positive, &secp);
|
||||||
let negative = to_secrets(negative, &secp);
|
let negative = to_secrets(negative, &secp);
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ impl MerkleProof {
|
||||||
pub fn from_hex(hex: &str) -> Result<MerkleProof, String> {
|
pub fn from_hex(hex: &str) -> Result<MerkleProof, String> {
|
||||||
let bytes = util::from_hex(hex.to_string()).unwrap();
|
let bytes = util::from_hex(hex.to_string()).unwrap();
|
||||||
let res = ser::deserialize(&mut &bytes[..])
|
let res = ser::deserialize(&mut &bytes[..])
|
||||||
.map_err(|_| format!("failed to deserialize a Merkle Proof"))?;
|
.map_err(|_| "failed to deserialize a Merkle Proof".to_string())?;
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ impl MerkleProof {
|
||||||
// calculate the peaks once as these are based on overall MMR size
|
// calculate the peaks once as these are based on overall MMR size
|
||||||
// (and will not change)
|
// (and will not change)
|
||||||
let peaks_pos = pmmr::peaks(self.mmr_size);
|
let peaks_pos = pmmr::peaks(self.mmr_size);
|
||||||
proof.verify_consume(root, element, node_pos, peaks_pos)
|
proof.verify_consume(root, element, node_pos, &peaks_pos)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consumes the Merkle proof while verifying it.
|
/// Consumes the Merkle proof while verifying it.
|
||||||
|
@ -113,7 +113,7 @@ impl MerkleProof {
|
||||||
root: Hash,
|
root: Hash,
|
||||||
element: &PMMRIndexHashable,
|
element: &PMMRIndexHashable,
|
||||||
node_pos: u64,
|
node_pos: u64,
|
||||||
peaks_pos: Vec<u64>,
|
peaks_pos: &[u64],
|
||||||
) -> Result<(), MerkleProofError> {
|
) -> Result<(), MerkleProofError> {
|
||||||
let node_hash = if node_pos > self.mmr_size {
|
let node_hash = if node_pos > self.mmr_size {
|
||||||
element.hash_with_index(self.mmr_size)
|
element.hash_with_index(self.mmr_size)
|
||||||
|
@ -123,7 +123,7 @@ impl MerkleProof {
|
||||||
|
|
||||||
// handle special case of only a single entry in the MMR
|
// handle special case of only a single entry in the MMR
|
||||||
// (no siblings to hash together)
|
// (no siblings to hash together)
|
||||||
if self.path.len() == 0 {
|
if self.path.is_empty() {
|
||||||
if root == node_hash {
|
if root == node_hash {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -42,8 +42,8 @@ where
|
||||||
/// Build a new db backed MMR.
|
/// Build a new db backed MMR.
|
||||||
pub fn new(backend: &'a mut B) -> DBPMMR<T, B> {
|
pub fn new(backend: &'a mut B) -> DBPMMR<T, B> {
|
||||||
DBPMMR {
|
DBPMMR {
|
||||||
|
backend,
|
||||||
last_pos: 0,
|
last_pos: 0,
|
||||||
backend: backend,
|
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,8 +52,8 @@ where
|
||||||
/// last_pos with the provided db backend.
|
/// last_pos with the provided db backend.
|
||||||
pub fn at(backend: &'a mut B, last_pos: u64) -> DBPMMR<T, B> {
|
pub fn at(backend: &'a mut B, last_pos: u64) -> DBPMMR<T, B> {
|
||||||
DBPMMR {
|
DBPMMR {
|
||||||
last_pos: last_pos,
|
backend,
|
||||||
backend: backend,
|
last_pos,
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ where
|
||||||
|
|
||||||
/// Push a new element into the MMR. Computes new related peaks at
|
/// Push a new element into the MMR. Computes new related peaks at
|
||||||
/// the same time if applicable.
|
/// the same time if applicable.
|
||||||
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
|
pub fn push(&mut self, elmt: &T) -> Result<u64, String> {
|
||||||
let elmt_pos = self.last_pos + 1;
|
let elmt_pos = self.last_pos + 1;
|
||||||
let mut current_hash = elmt.hash_with_index(elmt_pos - 1);
|
let mut current_hash = elmt.hash_with_index(elmt_pos - 1);
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@ use core::merkle_proof::MerkleProof;
|
||||||
use core::pmmr::{Backend, ReadonlyPMMR};
|
use core::pmmr::{Backend, ReadonlyPMMR};
|
||||||
use core::BlockHeader;
|
use core::BlockHeader;
|
||||||
use ser::{PMMRIndexHashable, PMMRable};
|
use ser::{PMMRIndexHashable, PMMRable};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// 64 bits all ones: 0b11111111...1
|
/// 64 bits all ones: 0b11111111...1
|
||||||
const ALL_ONES: u64 = u64::MAX;
|
const ALL_ONES: u64 = u64::MAX;
|
||||||
|
@ -54,8 +53,8 @@ where
|
||||||
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
||||||
pub fn new(backend: &'a mut B) -> PMMR<T, B> {
|
pub fn new(backend: &'a mut B) -> PMMR<T, B> {
|
||||||
PMMR {
|
PMMR {
|
||||||
|
backend,
|
||||||
last_pos: 0,
|
last_pos: 0,
|
||||||
backend: backend,
|
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,8 +63,8 @@ where
|
||||||
/// last_pos with the provided backend.
|
/// last_pos with the provided backend.
|
||||||
pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> {
|
pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> {
|
||||||
PMMR {
|
PMMR {
|
||||||
last_pos: last_pos,
|
backend,
|
||||||
backend: backend,
|
last_pos,
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,7 +90,7 @@ where
|
||||||
let rhs = self.bag_the_rhs(peak_pos);
|
let rhs = self.bag_the_rhs(peak_pos);
|
||||||
let mut res = peaks(self.last_pos)
|
let mut res = peaks(self.last_pos)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| x < &peak_pos)
|
.filter(|x| *x < peak_pos)
|
||||||
.filter_map(|x| self.backend.get_from_file(x))
|
.filter_map(|x| self.backend.get_from_file(x))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
res.reverse();
|
res.reverse();
|
||||||
|
@ -108,7 +107,7 @@ where
|
||||||
pub fn bag_the_rhs(&self, peak_pos: u64) -> Option<Hash> {
|
pub fn bag_the_rhs(&self, peak_pos: u64) -> Option<Hash> {
|
||||||
let rhs = peaks(self.last_pos)
|
let rhs = peaks(self.last_pos)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| x > &peak_pos)
|
.filter(|x| *x > peak_pos)
|
||||||
.filter_map(|x| self.backend.get_from_file(x))
|
.filter_map(|x| self.backend.get_from_file(x))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
@ -137,7 +136,7 @@ where
|
||||||
|
|
||||||
/// Build a Merkle proof for the element at the given position.
|
/// Build a Merkle proof for the element at the given position.
|
||||||
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
|
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
|
||||||
debug!(LOGGER, "merkle_proof {}, last_pos {}", pos, self.last_pos);
|
debug!("merkle_proof {}, last_pos {}", pos, self.last_pos);
|
||||||
|
|
||||||
// check this pos is actually a leaf in the MMR
|
// check this pos is actually a leaf in the MMR
|
||||||
if !is_leaf(pos) {
|
if !is_leaf(pos) {
|
||||||
|
@ -146,7 +145,7 @@ where
|
||||||
|
|
||||||
// check we actually have a hash in the MMR at this pos
|
// check we actually have a hash in the MMR at this pos
|
||||||
self.get_hash(pos)
|
self.get_hash(pos)
|
||||||
.ok_or(format!("no element at pos {}", pos))?;
|
.ok_or_else(|| format!("no element at pos {}", pos))?;
|
||||||
|
|
||||||
let mmr_size = self.unpruned_size();
|
let mmr_size = self.unpruned_size();
|
||||||
|
|
||||||
|
@ -384,14 +383,14 @@ where
|
||||||
None => hashes.push_str(&format!("{:>8} ", "??")),
|
None => hashes.push_str(&format!("{:>8} ", "??")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
trace!(LOGGER, "{}", idx);
|
trace!("{}", idx);
|
||||||
trace!(LOGGER, "{}", hashes);
|
trace!("{}", hashes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prints PMMR statistics to the logs, used for debugging.
|
/// Prints PMMR statistics to the logs, used for debugging.
|
||||||
pub fn dump_stats(&self) {
|
pub fn dump_stats(&self) {
|
||||||
debug!(LOGGER, "pmmr: unpruned - {}", self.unpruned_size());
|
debug!("pmmr: unpruned - {}", self.unpruned_size());
|
||||||
self.backend.dump_stats();
|
self.backend.dump_stats();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,8 +417,8 @@ where
|
||||||
None => hashes.push_str(&format!("{:>8} ", " .")),
|
None => hashes.push_str(&format!("{:>8} ", " .")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!(LOGGER, "{}", idx);
|
debug!("{}", idx);
|
||||||
debug!(LOGGER, "{}", hashes);
|
debug!("{}", hashes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -511,7 +510,7 @@ pub fn peak_map_height(mut pos: u64) -> (u64, u64) {
|
||||||
let mut peak_size = ALL_ONES >> pos.leading_zeros();
|
let mut peak_size = ALL_ONES >> pos.leading_zeros();
|
||||||
let mut bitmap = 0;
|
let mut bitmap = 0;
|
||||||
while peak_size != 0 {
|
while peak_size != 0 {
|
||||||
bitmap = bitmap << 1;
|
bitmap <<= 1;
|
||||||
if pos >= peak_size {
|
if pos >= peak_size {
|
||||||
pos -= peak_size;
|
pos -= peak_size;
|
||||||
bitmap |= 1;
|
bitmap |= 1;
|
||||||
|
|
|
@ -41,8 +41,8 @@ where
|
||||||
/// Build a new readonly PMMR.
|
/// Build a new readonly PMMR.
|
||||||
pub fn new(backend: &'a B) -> ReadonlyPMMR<T, B> {
|
pub fn new(backend: &'a B) -> ReadonlyPMMR<T, B> {
|
||||||
ReadonlyPMMR {
|
ReadonlyPMMR {
|
||||||
|
backend,
|
||||||
last_pos: 0,
|
last_pos: 0,
|
||||||
backend: backend,
|
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -51,8 +51,8 @@ where
|
||||||
/// last_pos with the provided backend.
|
/// last_pos with the provided backend.
|
||||||
pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<T, B> {
|
pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<T, B> {
|
||||||
ReadonlyPMMR {
|
ReadonlyPMMR {
|
||||||
last_pos: last_pos,
|
backend,
|
||||||
backend: backend,
|
last_pos,
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,8 +43,8 @@ where
|
||||||
/// Build a new readonly PMMR.
|
/// Build a new readonly PMMR.
|
||||||
pub fn new(backend: &'a B) -> RewindablePMMR<T, B> {
|
pub fn new(backend: &'a B) -> RewindablePMMR<T, B> {
|
||||||
RewindablePMMR {
|
RewindablePMMR {
|
||||||
|
backend,
|
||||||
last_pos: 0,
|
last_pos: 0,
|
||||||
backend: backend,
|
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -53,8 +53,8 @@ where
|
||||||
/// last_pos with the provided backend.
|
/// last_pos with the provided backend.
|
||||||
pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<T, B> {
|
pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<T, B> {
|
||||||
RewindablePMMR {
|
RewindablePMMR {
|
||||||
last_pos: last_pos,
|
backend,
|
||||||
backend: backend,
|
last_pos,
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,8 +17,9 @@
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::{error, fmt};
|
use std::{error, fmt};
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use consensus::{self, VerifySortOrder};
|
use consensus::{self, VerifySortOrder};
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
|
@ -176,7 +177,7 @@ impl Readable for TxKernel {
|
||||||
let features =
|
let features =
|
||||||
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||||
Ok(TxKernel {
|
Ok(TxKernel {
|
||||||
features: features,
|
features,
|
||||||
fee: reader.read_u64()?,
|
fee: reader.read_u64()?,
|
||||||
lock_height: reader.read_u64()?,
|
lock_height: reader.read_u64()?,
|
||||||
excess: Commitment::read(reader)?,
|
excess: Commitment::read(reader)?,
|
||||||
|
@ -197,7 +198,7 @@ impl TxKernel {
|
||||||
pub fn verify(&self) -> Result<(), secp::Error> {
|
pub fn verify(&self) -> Result<(), secp::Error> {
|
||||||
let msg = Message::from_slice(&kernel_sig_msg(self.fee, self.lock_height))?;
|
let msg = Message::from_slice(&kernel_sig_msg(self.fee, self.lock_height))?;
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let sig = &self.excess_sig;
|
let sig = &self.excess_sig;
|
||||||
// Verify aggsig directly in libsecp
|
// Verify aggsig directly in libsecp
|
||||||
let pubkey = &self.excess.to_pubkey(&secp)?;
|
let pubkey = &self.excess.to_pubkey(&secp)?;
|
||||||
|
@ -229,13 +230,13 @@ impl TxKernel {
|
||||||
|
|
||||||
/// Builds a new tx kernel with the provided fee.
|
/// Builds a new tx kernel with the provided fee.
|
||||||
pub fn with_fee(self, fee: u64) -> TxKernel {
|
pub fn with_fee(self, fee: u64) -> TxKernel {
|
||||||
TxKernel { fee: fee, ..self }
|
TxKernel { fee, ..self }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a new tx kernel with the provided lock_height.
|
/// Builds a new tx kernel with the provided lock_height.
|
||||||
pub fn with_lock_height(self, lock_height: u64) -> TxKernel {
|
pub fn with_lock_height(self, lock_height: u64) -> TxKernel {
|
||||||
TxKernel {
|
TxKernel {
|
||||||
lock_height: lock_height,
|
lock_height,
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -355,9 +356,9 @@ impl TransactionBody {
|
||||||
verify_sorted: bool,
|
verify_sorted: bool,
|
||||||
) -> Result<TransactionBody, Error> {
|
) -> Result<TransactionBody, Error> {
|
||||||
let body = TransactionBody {
|
let body = TransactionBody {
|
||||||
inputs: inputs,
|
inputs,
|
||||||
outputs: outputs,
|
outputs,
|
||||||
kernels: kernels,
|
kernels,
|
||||||
};
|
};
|
||||||
|
|
||||||
if verify_sorted {
|
if verify_sorted {
|
||||||
|
@ -435,7 +436,7 @@ impl TransactionBody {
|
||||||
|
|
||||||
/// Calculate transaction weight from transaction details
|
/// Calculate transaction weight from transaction details
|
||||||
pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> u32 {
|
pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> u32 {
|
||||||
let mut body_weight = -1 * (input_len as i32) + (4 * output_len as i32) + kernel_len as i32;
|
let mut body_weight = -(input_len as i32) + (4 * output_len as i32) + kernel_len as i32;
|
||||||
if body_weight < 1 {
|
if body_weight < 1 {
|
||||||
body_weight = 1;
|
body_weight = 1;
|
||||||
}
|
}
|
||||||
|
@ -553,12 +554,12 @@ impl TransactionBody {
|
||||||
|
|
||||||
// Find all the outputs that have not had their rangeproofs verified.
|
// Find all the outputs that have not had their rangeproofs verified.
|
||||||
let outputs = {
|
let outputs = {
|
||||||
let mut verifier = verifier.write().unwrap();
|
let mut verifier = verifier.write();
|
||||||
verifier.filter_rangeproof_unverified(&self.outputs)
|
verifier.filter_rangeproof_unverified(&self.outputs)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Now batch verify all those unverified rangeproofs
|
// Now batch verify all those unverified rangeproofs
|
||||||
if outputs.len() > 0 {
|
if !outputs.is_empty() {
|
||||||
let mut commits = vec![];
|
let mut commits = vec![];
|
||||||
let mut proofs = vec![];
|
let mut proofs = vec![];
|
||||||
for x in &outputs {
|
for x in &outputs {
|
||||||
|
@ -570,7 +571,7 @@ impl TransactionBody {
|
||||||
|
|
||||||
// Find all the kernels that have not yet been verified.
|
// Find all the kernels that have not yet been verified.
|
||||||
let kernels = {
|
let kernels = {
|
||||||
let mut verifier = verifier.write().unwrap();
|
let mut verifier = verifier.write();
|
||||||
verifier.filter_kernel_sig_unverified(&self.kernels)
|
verifier.filter_kernel_sig_unverified(&self.kernels)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -583,7 +584,7 @@ impl TransactionBody {
|
||||||
|
|
||||||
// Cache the successful verification results for the new outputs and kernels.
|
// Cache the successful verification results for the new outputs and kernels.
|
||||||
{
|
{
|
||||||
let mut verifier = verifier.write().unwrap();
|
let mut verifier = verifier.write();
|
||||||
verifier.add_rangeproof_verified(outputs);
|
verifier.add_rangeproof_verified(outputs);
|
||||||
verifier.add_kernel_sig_verified(kernels);
|
verifier.add_kernel_sig_verified(kernels);
|
||||||
}
|
}
|
||||||
|
@ -686,10 +687,7 @@ impl Transaction {
|
||||||
/// Creates a new transaction using this transaction as a template
|
/// Creates a new transaction using this transaction as a template
|
||||||
/// and with the specified offset.
|
/// and with the specified offset.
|
||||||
pub fn with_offset(self, offset: BlindingFactor) -> Transaction {
|
pub fn with_offset(self, offset: BlindingFactor) -> Transaction {
|
||||||
Transaction {
|
Transaction { offset, ..self }
|
||||||
offset: offset,
|
|
||||||
..self
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a new transaction with the provided inputs added. Existing
|
/// Builds a new transaction with the provided inputs added. Existing
|
||||||
|
@ -911,7 +909,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
||||||
// now compute the total kernel offset
|
// now compute the total kernel offset
|
||||||
let total_kernel_offset = {
|
let total_kernel_offset = {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let mut positive_key = vec![mk_tx.offset]
|
let mut positive_key = vec![mk_tx.offset]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| *x != BlindingFactor::zero())
|
.filter(|x| *x != BlindingFactor::zero())
|
||||||
|
@ -1071,7 +1069,7 @@ impl Readable for Output {
|
||||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||||
|
|
||||||
Ok(Output {
|
Ok(Output {
|
||||||
features: features,
|
features,
|
||||||
commit: Commitment::read(reader)?,
|
commit: Commitment::read(reader)?,
|
||||||
proof: RangeProof::read(reader)?,
|
proof: RangeProof::read(reader)?,
|
||||||
})
|
})
|
||||||
|
@ -1092,7 +1090,7 @@ impl Output {
|
||||||
/// Validates the range proof using the commitment
|
/// Validates the range proof using the commitment
|
||||||
pub fn verify_proof(&self) -> Result<(), secp::Error> {
|
pub fn verify_proof(&self) -> Result<(), secp::Error> {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
match secp.verify_bullet_proof(self.commit, self.proof, None) {
|
match secp.verify_bullet_proof(self.commit, self.proof, None) {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
|
@ -1105,7 +1103,7 @@ impl Output {
|
||||||
proofs: &Vec<RangeProof>,
|
proofs: &Vec<RangeProof>,
|
||||||
) -> Result<(), secp::Error> {
|
) -> Result<(), secp::Error> {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
match secp.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None) {
|
match secp.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None) {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
|
@ -1130,8 +1128,8 @@ impl OutputIdentifier {
|
||||||
/// Build a new output_identifier.
|
/// Build a new output_identifier.
|
||||||
pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier {
|
pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier {
|
||||||
OutputIdentifier {
|
OutputIdentifier {
|
||||||
features: features,
|
features,
|
||||||
commit: commit.clone(),
|
commit: *commit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1151,9 +1149,9 @@ impl OutputIdentifier {
|
||||||
/// Converts this identifier to a full output, provided a RangeProof
|
/// Converts this identifier to a full output, provided a RangeProof
|
||||||
pub fn into_output(self, proof: RangeProof) -> Output {
|
pub fn into_output(self, proof: RangeProof) -> Output {
|
||||||
Output {
|
Output {
|
||||||
|
proof,
|
||||||
features: self.features,
|
features: self.features,
|
||||||
commit: self.commit,
|
commit: self.commit,
|
||||||
proof: proof,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1195,8 +1193,8 @@ impl Readable for OutputIdentifier {
|
||||||
let features =
|
let features =
|
||||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||||
Ok(OutputIdentifier {
|
Ok(OutputIdentifier {
|
||||||
|
features,
|
||||||
commit: Commitment::read(reader)?,
|
commit: Commitment::read(reader)?,
|
||||||
features: features,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@ use lru_cache::LruCache;
|
||||||
|
|
||||||
use core::hash::{Hash, Hashed};
|
use core::hash::{Hash, Hashed};
|
||||||
use core::{Output, TxKernel};
|
use core::{Output, TxKernel};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Verifier cache for caching expensive verification results.
|
/// Verifier cache for caching expensive verification results.
|
||||||
/// Specifically the following -
|
/// Specifically the following -
|
||||||
|
@ -28,10 +27,10 @@ use util::LOGGER;
|
||||||
pub trait VerifierCache: Sync + Send {
|
pub trait VerifierCache: Sync + Send {
|
||||||
/// Takes a vec of tx kernels and returns those kernels
|
/// Takes a vec of tx kernels and returns those kernels
|
||||||
/// that have not yet been verified.
|
/// that have not yet been verified.
|
||||||
fn filter_kernel_sig_unverified(&mut self, kernels: &Vec<TxKernel>) -> Vec<TxKernel>;
|
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel>;
|
||||||
/// Takes a vec of tx outputs and returns those outputs
|
/// Takes a vec of tx outputs and returns those outputs
|
||||||
/// that have not yet had their rangeproofs verified.
|
/// that have not yet had their rangeproofs verified.
|
||||||
fn filter_rangeproof_unverified(&mut self, outputs: &Vec<Output>) -> Vec<Output>;
|
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output>;
|
||||||
/// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above).
|
/// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above).
|
||||||
fn add_kernel_sig_verified(&mut self, kernels: Vec<TxKernel>);
|
fn add_kernel_sig_verified(&mut self, kernels: Vec<TxKernel>);
|
||||||
/// Adds a vec of outputs to the cache (used in conjunction with the the filter above).
|
/// Adds a vec of outputs to the cache (used in conjunction with the the filter above).
|
||||||
|
@ -46,9 +45,6 @@ pub struct LruVerifierCache {
|
||||||
rangeproof_verification_cache: LruCache<Hash, bool>,
|
rangeproof_verification_cache: LruCache<Hash, bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Sync for LruVerifierCache {}
|
|
||||||
unsafe impl Send for LruVerifierCache {}
|
|
||||||
|
|
||||||
impl LruVerifierCache {
|
impl LruVerifierCache {
|
||||||
/// TODO how big should these caches be?
|
/// TODO how big should these caches be?
|
||||||
/// They need to be *at least* large enough to cover a maxed out block.
|
/// They need to be *at least* large enough to cover a maxed out block.
|
||||||
|
@ -61,7 +57,7 @@ impl LruVerifierCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VerifierCache for LruVerifierCache {
|
impl VerifierCache for LruVerifierCache {
|
||||||
fn filter_kernel_sig_unverified(&mut self, kernels: &Vec<TxKernel>) -> Vec<TxKernel> {
|
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel> {
|
||||||
let res = kernels
|
let res = kernels
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| {
|
.filter(|x| {
|
||||||
|
@ -71,8 +67,7 @@ impl VerifierCache for LruVerifierCache {
|
||||||
.unwrap_or(&mut false)
|
.unwrap_or(&mut false)
|
||||||
}).cloned()
|
}).cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
debug!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
|
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
|
||||||
kernels.len(),
|
kernels.len(),
|
||||||
res.len()
|
res.len()
|
||||||
|
@ -80,7 +75,7 @@ impl VerifierCache for LruVerifierCache {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_rangeproof_unverified(&mut self, outputs: &Vec<Output>) -> Vec<Output> {
|
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output> {
|
||||||
let res = outputs
|
let res = outputs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| {
|
.filter(|x| {
|
||||||
|
@ -90,8 +85,7 @@ impl VerifierCache for LruVerifierCache {
|
||||||
.unwrap_or(&mut false)
|
.unwrap_or(&mut false)
|
||||||
}).cloned()
|
}).cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
debug!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
|
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
|
||||||
outputs.len(),
|
outputs.len(),
|
||||||
res.len()
|
res.len()
|
||||||
|
|
|
@ -27,7 +27,7 @@ use pow::{self, CuckatooContext, EdgeType, PoWContext};
|
||||||
/// code wherever mining is needed. This should allow for
|
/// code wherever mining is needed. This should allow for
|
||||||
/// different sets of parameters for different purposes,
|
/// different sets of parameters for different purposes,
|
||||||
/// e.g. CI, User testing, production values
|
/// e.g. CI, User testing, production values
|
||||||
use std::sync::RwLock;
|
use util::RwLock;
|
||||||
|
|
||||||
/// Define these here, as they should be developer-set, not really tweakable
|
/// Define these here, as they should be developer-set, not really tweakable
|
||||||
/// by users
|
/// by users
|
||||||
|
@ -70,13 +70,21 @@ pub const TESTNET3_INITIAL_DIFFICULTY: u64 = 30000;
|
||||||
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
|
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
|
||||||
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
|
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
|
||||||
|
|
||||||
|
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
|
||||||
|
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
|
||||||
|
|
||||||
|
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
|
||||||
|
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
|
||||||
|
|
||||||
/// Testnet 4 initial block difficulty
|
/// Testnet 4 initial block difficulty
|
||||||
/// 1_000 times natural scale factor for cuckatoo29
|
/// 1_000 times natural scale factor for cuckatoo29
|
||||||
pub const TESTNET4_INITIAL_DIFFICULTY: u64 = 1_000 * UNIT_DIFFICULTY;
|
pub const TESTNET4_INITIAL_DIFFICULTY: u64 = 1_000 * UNIT_DIFFICULTY;
|
||||||
|
|
||||||
/// Trigger compaction check on average every day for FAST_SYNC_NODE,
|
/// Trigger compaction check on average every day for all nodes.
|
||||||
/// roll the dice on every block to decide,
|
/// Randomized per node - roll the dice on every block to decide.
|
||||||
/// all blocks lower than (BodyHead.height - CUT_THROUGH_HORIZON) will be removed.
|
/// Will compact the txhashset to remove pruned data.
|
||||||
|
/// Will also remove old blocks and associated data from the database.
|
||||||
|
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
|
||||||
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
|
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
|
||||||
|
|
||||||
/// Types of chain a server can run with, dictates the genesis block and
|
/// Types of chain a server can run with, dictates the genesis block and
|
||||||
|
@ -126,7 +134,7 @@ lazy_static!{
|
||||||
|
|
||||||
/// Set the mining mode
|
/// Set the mining mode
|
||||||
pub fn set_mining_mode(mode: ChainTypes) {
|
pub fn set_mining_mode(mode: ChainTypes) {
|
||||||
let mut param_ref = CHAIN_TYPE.write().unwrap();
|
let mut param_ref = CHAIN_TYPE.write();
|
||||||
*param_ref = mode;
|
*param_ref = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,7 +158,7 @@ pub fn pow_type() -> PoWContextTypes {
|
||||||
|
|
||||||
/// The minimum acceptable edge_bits
|
/// The minimum acceptable edge_bits
|
||||||
pub fn min_edge_bits() -> u8 {
|
pub fn min_edge_bits() -> u8 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
|
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
|
||||||
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
|
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
|
||||||
|
@ -163,7 +171,7 @@ pub fn min_edge_bits() -> u8 {
|
||||||
/// while the min_edge_bits can be changed on a soft fork, changing
|
/// while the min_edge_bits can be changed on a soft fork, changing
|
||||||
/// base_edge_bits is a hard fork.
|
/// base_edge_bits is a hard fork.
|
||||||
pub fn base_edge_bits() -> u8 {
|
pub fn base_edge_bits() -> u8 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
|
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
|
||||||
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
|
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
|
||||||
|
@ -174,7 +182,7 @@ pub fn base_edge_bits() -> u8 {
|
||||||
|
|
||||||
/// The proofsize
|
/// The proofsize
|
||||||
pub fn proofsize() -> usize {
|
pub fn proofsize() -> usize {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
|
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
|
||||||
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
|
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
|
||||||
|
@ -184,7 +192,7 @@ pub fn proofsize() -> usize {
|
||||||
|
|
||||||
/// Coinbase maturity for coinbases to be spent
|
/// Coinbase maturity for coinbases to be spent
|
||||||
pub fn coinbase_maturity() -> u64 {
|
pub fn coinbase_maturity() -> u64 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
|
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
|
||||||
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
|
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
|
||||||
|
@ -194,7 +202,7 @@ pub fn coinbase_maturity() -> u64 {
|
||||||
|
|
||||||
/// Initial mining difficulty
|
/// Initial mining difficulty
|
||||||
pub fn initial_block_difficulty() -> u64 {
|
pub fn initial_block_difficulty() -> u64 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
|
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
|
||||||
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
|
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
|
||||||
|
@ -207,7 +215,7 @@ pub fn initial_block_difficulty() -> u64 {
|
||||||
}
|
}
|
||||||
/// Initial mining secondary scale
|
/// Initial mining secondary scale
|
||||||
pub fn initial_graph_weight() -> u32 {
|
pub fn initial_graph_weight() -> u32 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT,
|
ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT,
|
||||||
ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT,
|
ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT,
|
||||||
|
@ -221,7 +229,7 @@ pub fn initial_graph_weight() -> u32 {
|
||||||
|
|
||||||
/// Horizon at which we can cut-through and do full local pruning
|
/// Horizon at which we can cut-through and do full local pruning
|
||||||
pub fn cut_through_horizon() -> u32 {
|
pub fn cut_through_horizon() -> u32 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON,
|
ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON,
|
||||||
ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON,
|
ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON,
|
||||||
|
@ -231,19 +239,19 @@ pub fn cut_through_horizon() -> u32 {
|
||||||
|
|
||||||
/// Are we in automated testing mode?
|
/// Are we in automated testing mode?
|
||||||
pub fn is_automated_testing_mode() -> bool {
|
pub fn is_automated_testing_mode() -> bool {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
ChainTypes::AutomatedTesting == *param_ref
|
ChainTypes::AutomatedTesting == *param_ref
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Are we in user testing mode?
|
/// Are we in user testing mode?
|
||||||
pub fn is_user_testing_mode() -> bool {
|
pub fn is_user_testing_mode() -> bool {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
ChainTypes::UserTesting == *param_ref
|
ChainTypes::UserTesting == *param_ref
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Are we in production mode (a live public network)?
|
/// Are we in production mode (a live public network)?
|
||||||
pub fn is_production_mode() -> bool {
|
pub fn is_production_mode() -> bool {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
ChainTypes::Testnet1 == *param_ref
|
ChainTypes::Testnet1 == *param_ref
|
||||||
|| ChainTypes::Testnet2 == *param_ref
|
|| ChainTypes::Testnet2 == *param_ref
|
||||||
|| ChainTypes::Testnet3 == *param_ref
|
|| ChainTypes::Testnet3 == *param_ref
|
||||||
|
@ -256,7 +264,7 @@ pub fn is_production_mode() -> bool {
|
||||||
/// as the genesis block POW solution turns out to be the same for every new
|
/// as the genesis block POW solution turns out to be the same for every new
|
||||||
/// block chain at the moment
|
/// block chain at the moment
|
||||||
pub fn get_genesis_nonce() -> u64 {
|
pub fn get_genesis_nonce() -> u64 {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
// won't make a difference
|
// won't make a difference
|
||||||
ChainTypes::AutomatedTesting => 0,
|
ChainTypes::AutomatedTesting => 0,
|
||||||
|
|
|
@ -38,7 +38,7 @@ extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
extern crate siphasher;
|
extern crate siphasher;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
extern crate failure;
|
extern crate failure;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
|
|
@ -78,19 +78,19 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_header_nonce(header: Vec<u8>, nonce: Option<u32>) -> Result<[u64; 4], Error> {
|
pub fn set_header_nonce(header: &[u8], nonce: Option<u32>) -> Result<[u64; 4], Error> {
|
||||||
if let Some(n) = nonce {
|
if let Some(n) = nonce {
|
||||||
let len = header.len();
|
let len = header.len();
|
||||||
let mut header = header.clone();
|
let mut header = header.to_owned();
|
||||||
header.truncate(len - mem::size_of::<u32>());
|
header.truncate(len - mem::size_of::<u32>());
|
||||||
header.write_u32::<LittleEndian>(n)?;
|
header.write_u32::<LittleEndian>(n)?;
|
||||||
create_siphash_keys(header)
|
create_siphash_keys(&header)
|
||||||
} else {
|
} else {
|
||||||
create_siphash_keys(header)
|
create_siphash_keys(&header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_siphash_keys(header: Vec<u8>) -> Result<[u64; 4], Error> {
|
pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> {
|
||||||
let h = blake2b(32, &[], &header);
|
let h = blake2b(32, &[], &header);
|
||||||
let hb = h.as_bytes();
|
let hb = h.as_bytes();
|
||||||
let mut rdr = Cursor::new(hb);
|
let mut rdr = Cursor::new(hb);
|
||||||
|
@ -163,7 +163,7 @@ where
|
||||||
|
|
||||||
/// Reset the main keys used for siphash from the header and nonce
|
/// Reset the main keys used for siphash from the header and nonce
|
||||||
pub fn reset_header_nonce(&mut self, header: Vec<u8>, nonce: Option<u32>) -> Result<(), Error> {
|
pub fn reset_header_nonce(&mut self, header: Vec<u8>, nonce: Option<u32>) -> Result<(), Error> {
|
||||||
self.siphash_keys = set_header_nonce(header, nonce)?;
|
self.siphash_keys = set_header_nonce(&header, nonce)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ where
|
||||||
);
|
);
|
||||||
let mut masked = hash_u64 & self.edge_mask.to_u64().ok_or(ErrorKind::IntegerCast)?;
|
let mut masked = hash_u64 & self.edge_mask.to_u64().ok_or(ErrorKind::IntegerCast)?;
|
||||||
if shift {
|
if shift {
|
||||||
masked = masked << 1;
|
masked <<= 1;
|
||||||
masked |= uorv;
|
masked |= uorv;
|
||||||
}
|
}
|
||||||
Ok(T::from(masked).ok_or(ErrorKind::IntegerCast)?)
|
Ok(T::from(masked).ok_or(ErrorKind::IntegerCast)?)
|
||||||
|
|
|
@ -54,14 +54,14 @@ where
|
||||||
pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result<Graph<T>, Error> {
|
pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result<Graph<T>, Error> {
|
||||||
let max_nodes = 2 * to_u64!(max_edges);
|
let max_nodes = 2 * to_u64!(max_edges);
|
||||||
Ok(Graph {
|
Ok(Graph {
|
||||||
max_edges: max_edges,
|
max_edges,
|
||||||
max_nodes: max_nodes,
|
max_nodes,
|
||||||
|
max_sols,
|
||||||
|
proof_size,
|
||||||
links: vec![],
|
links: vec![],
|
||||||
adj_list: vec![],
|
adj_list: vec![],
|
||||||
visited: Bitmap::create(),
|
visited: Bitmap::create(),
|
||||||
max_sols: max_sols,
|
|
||||||
solutions: vec![],
|
solutions: vec![],
|
||||||
proof_size: proof_size,
|
|
||||||
nil: T::max_value(),
|
nil: T::max_value(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ where
|
||||||
|
|
||||||
/// Simple implementation of algorithm
|
/// Simple implementation of algorithm
|
||||||
|
|
||||||
pub fn find_cycles_iter<'a, I>(&mut self, iter: I) -> Result<Vec<Proof>, Error>
|
pub fn find_cycles_iter<I>(&mut self, iter: I) -> Result<Vec<Proof>, Error>
|
||||||
where
|
where
|
||||||
I: Iterator<Item = u64>,
|
I: Iterator<Item = u64>,
|
||||||
{
|
{
|
||||||
|
@ -260,7 +260,7 @@ where
|
||||||
for s in &self.graph.solutions {
|
for s in &self.graph.solutions {
|
||||||
self.verify_impl(&s)?;
|
self.verify_impl(&s)?;
|
||||||
}
|
}
|
||||||
if self.graph.solutions.len() == 0 {
|
if self.graph.solutions.is_empty() {
|
||||||
Err(ErrorKind::NoSolution)?
|
Err(ErrorKind::NoSolution)?
|
||||||
} else {
|
} else {
|
||||||
Ok(self.graph.solutions.clone())
|
Ok(self.graph.solutions.clone())
|
||||||
|
|
|
@ -77,7 +77,7 @@ where
|
||||||
let params = CuckooParams::new(edge_bits, proof_size)?;
|
let params = CuckooParams::new(edge_bits, proof_size)?;
|
||||||
let num_nodes = 2 * params.num_edges as usize;
|
let num_nodes = 2 * params.num_edges as usize;
|
||||||
Ok(CuckooContext {
|
Ok(CuckooContext {
|
||||||
params: params,
|
params,
|
||||||
graph: vec![T::zero(); num_nodes],
|
graph: vec![T::zero(); num_nodes],
|
||||||
_max_sols: max_sols,
|
_max_sols: max_sols,
|
||||||
})
|
})
|
||||||
|
@ -190,7 +190,7 @@ where
|
||||||
cycle.insert(Edge { u: us[0], v: vs[0] });
|
cycle.insert(Edge { u: us[0], v: vs[0] });
|
||||||
while nu != 0 {
|
while nu != 0 {
|
||||||
// u's in even position; v's in odd
|
// u's in even position; v's in odd
|
||||||
nu = nu - 1;
|
nu -= 1;
|
||||||
cycle.insert(Edge {
|
cycle.insert(Edge {
|
||||||
u: us[((nu + 1) & !1) as usize],
|
u: us[((nu + 1) & !1) as usize],
|
||||||
v: us[(nu | 1) as usize],
|
v: us[(nu | 1) as usize],
|
||||||
|
@ -214,11 +214,11 @@ where
|
||||||
cycle.remove(&edge);
|
cycle.remove(&edge);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return if n == self.params.proof_size {
|
if n == self.params.proof_size {
|
||||||
Ok(sol)
|
Ok(sol)
|
||||||
} else {
|
} else {
|
||||||
Err(ErrorKind::NoCycle)?
|
Err(ErrorKind::NoCycle)?
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Searches for a solution (simple implementation)
|
/// Searches for a solution (simple implementation)
|
||||||
|
|
|
@ -85,7 +85,7 @@ impl From<ErrorKind> for Error {
|
||||||
|
|
||||||
impl From<Context<ErrorKind>> for Error {
|
impl From<Context<ErrorKind>> for Error {
|
||||||
fn from(inner: Context<ErrorKind>) -> Error {
|
fn from(inner: Context<ErrorKind>) -> Error {
|
||||||
Error { inner: inner }
|
Error { inner }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ impl Lean {
|
||||||
|
|
||||||
// edge bitmap, before trimming all of them are on
|
// edge bitmap, before trimming all of them are on
|
||||||
let mut edges = Bitmap::create_with_capacity(params.num_edges as u32);
|
let mut edges = Bitmap::create_with_capacity(params.num_edges as u32);
|
||||||
edges.flip_inplace(0..params.num_edges.into());
|
edges.flip_inplace(0..params.num_edges);
|
||||||
|
|
||||||
Lean { params, edges }
|
Lean { params, edges }
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ pub fn mine_genesis_block() -> Result<Block, Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// total_difficulty on the genesis header *is* the difficulty of that block
|
// total_difficulty on the genesis header *is* the difficulty of that block
|
||||||
let genesis_difficulty = gen.header.pow.total_difficulty.clone();
|
let genesis_difficulty = gen.header.pow.total_difficulty;
|
||||||
|
|
||||||
let sz = global::min_edge_bits();
|
let sz = global::min_edge_bits();
|
||||||
let proof_size = global::proofsize();
|
let proof_size = global::proofsize();
|
||||||
|
|
|
@ -62,7 +62,7 @@ pub fn siphash24(v: &[u64; 4], nonce: u64) -> u64 {
|
||||||
round!();
|
round!();
|
||||||
round!();
|
round!();
|
||||||
|
|
||||||
return v0 ^ v1 ^ v2 ^ v3;
|
v0 ^ v1 ^ v2 ^ v3
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -80,11 +80,6 @@ impl Difficulty {
|
||||||
Difficulty { num: max(num, 1) }
|
Difficulty { num: max(num, 1) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute difficulty scaling factor for graph defined by 2 * 2^edge_bits * edge_bits bits
|
|
||||||
pub fn scale(edge_bits: u8) -> u64 {
|
|
||||||
(2 << (edge_bits - global::base_edge_bits()) as u64) * (edge_bits as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Computes the difficulty from a hash. Divides the maximum target by the
|
/// Computes the difficulty from a hash. Divides the maximum target by the
|
||||||
/// provided hash and applies the Cuck(at)oo size adjustment factor (see
|
/// provided hash and applies the Cuck(at)oo size adjustment factor (see
|
||||||
/// https://lists.launchpad.net/mimblewimble/msg00494.html).
|
/// https://lists.launchpad.net/mimblewimble/msg00494.html).
|
||||||
|
|
|
@ -92,10 +92,7 @@ impl error::Error for Error {
|
||||||
fn description(&self) -> &str {
|
fn description(&self) -> &str {
|
||||||
match *self {
|
match *self {
|
||||||
Error::IOErr(ref e, _) => e,
|
Error::IOErr(ref e, _) => e,
|
||||||
Error::UnexpectedData {
|
Error::UnexpectedData { .. } => "unexpected data",
|
||||||
expected: _,
|
|
||||||
received: _,
|
|
||||||
} => "unexpected data",
|
|
||||||
Error::CorruptedData => "corrupted data",
|
Error::CorruptedData => "corrupted data",
|
||||||
Error::TooLargeReadErr => "too large read",
|
Error::TooLargeReadErr => "too large read",
|
||||||
Error::ConsensusError(_) => "consensus error (sort order)",
|
Error::ConsensusError(_) => "consensus error (sort order)",
|
||||||
|
@ -231,13 +228,13 @@ where
|
||||||
|
|
||||||
/// Deserializes a Readeable from any std::io::Read implementation.
|
/// Deserializes a Readeable from any std::io::Read implementation.
|
||||||
pub fn deserialize<T: Readable>(source: &mut Read) -> Result<T, Error> {
|
pub fn deserialize<T: Readable>(source: &mut Read) -> Result<T, Error> {
|
||||||
let mut reader = BinReader { source: source };
|
let mut reader = BinReader { source };
|
||||||
T::read(&mut reader)
|
T::read(&mut reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serializes a Writeable into any std::io::Write implementation.
|
/// Serializes a Writeable into any std::io::Write implementation.
|
||||||
pub fn serialize<W: Writeable>(sink: &mut Write, thing: &W) -> Result<(), Error> {
|
pub fn serialize<W: Writeable>(sink: &mut Write, thing: &W) -> Result<(), Error> {
|
||||||
let mut writer = BinWriter { sink: sink };
|
let mut writer = BinWriter { sink };
|
||||||
thing.write(&mut writer)
|
thing.write(&mut writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,9 +316,7 @@ impl Readable for Commitment {
|
||||||
fn read(reader: &mut Reader) -> Result<Commitment, Error> {
|
fn read(reader: &mut Reader) -> Result<Commitment, Error> {
|
||||||
let a = reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE)?;
|
let a = reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE)?;
|
||||||
let mut c = [0; PEDERSEN_COMMITMENT_SIZE];
|
let mut c = [0; PEDERSEN_COMMITMENT_SIZE];
|
||||||
for i in 0..PEDERSEN_COMMITMENT_SIZE {
|
c[..PEDERSEN_COMMITMENT_SIZE].clone_from_slice(&a[..PEDERSEN_COMMITMENT_SIZE]);
|
||||||
c[i] = a[i];
|
|
||||||
}
|
|
||||||
Ok(Commitment(c))
|
Ok(Commitment(c))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -368,9 +363,7 @@ impl Readable for RangeProof {
|
||||||
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
|
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
|
||||||
let p = reader.read_limited_vec(MAX_PROOF_SIZE)?;
|
let p = reader.read_limited_vec(MAX_PROOF_SIZE)?;
|
||||||
let mut a = [0; MAX_PROOF_SIZE];
|
let mut a = [0; MAX_PROOF_SIZE];
|
||||||
for i in 0..p.len() {
|
a[..p.len()].clone_from_slice(&p[..]);
|
||||||
a[i] = p[i];
|
|
||||||
}
|
|
||||||
Ok(RangeProof {
|
Ok(RangeProof {
|
||||||
proof: a,
|
proof: a,
|
||||||
plen: p.len(),
|
plen: p.len(),
|
||||||
|
@ -388,9 +381,7 @@ impl Readable for Signature {
|
||||||
fn read(reader: &mut Reader) -> Result<Signature, Error> {
|
fn read(reader: &mut Reader) -> Result<Signature, Error> {
|
||||||
let a = reader.read_fixed_bytes(AGG_SIGNATURE_SIZE)?;
|
let a = reader.read_fixed_bytes(AGG_SIGNATURE_SIZE)?;
|
||||||
let mut c = [0; AGG_SIGNATURE_SIZE];
|
let mut c = [0; AGG_SIGNATURE_SIZE];
|
||||||
for i in 0..AGG_SIGNATURE_SIZE {
|
c[..AGG_SIGNATURE_SIZE].clone_from_slice(&a[..AGG_SIGNATURE_SIZE]);
|
||||||
c[i] = a[i];
|
|
||||||
}
|
|
||||||
Ok(Signature::from_raw_data(&c).unwrap())
|
Ok(Signature::from_raw_data(&c).unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -577,81 +568,81 @@ pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
||||||
|
|
||||||
impl<'a> AsFixedBytes for &'a [u8] {
|
impl<'a> AsFixedBytes for &'a [u8] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 1;
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for Vec<u8> {
|
impl AsFixedBytes for Vec<u8> {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return self.len();
|
self.len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 1] {
|
impl AsFixedBytes for [u8; 1] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 1;
|
1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 2] {
|
impl AsFixedBytes for [u8; 2] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 2;
|
2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 4] {
|
impl AsFixedBytes for [u8; 4] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 4;
|
4
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 6] {
|
impl AsFixedBytes for [u8; 6] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 6;
|
6
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 8] {
|
impl AsFixedBytes for [u8; 8] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 8;
|
8
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 20] {
|
impl AsFixedBytes for [u8; 20] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 20;
|
20
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for [u8; 32] {
|
impl AsFixedBytes for [u8; 32] {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 32;
|
32
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for String {
|
impl AsFixedBytes for String {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return self.len();
|
self.len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for ::core::hash::Hash {
|
impl AsFixedBytes for ::core::hash::Hash {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 32;
|
32
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for ::util::secp::pedersen::RangeProof {
|
impl AsFixedBytes for ::util::secp::pedersen::RangeProof {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return self.plen;
|
self.plen
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for ::util::secp::Signature {
|
impl AsFixedBytes for ::util::secp::Signature {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 64;
|
64
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for ::util::secp::pedersen::Commitment {
|
impl AsFixedBytes for ::util::secp::pedersen::Commitment {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return PEDERSEN_COMMITMENT_SIZE;
|
PEDERSEN_COMMITMENT_SIZE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for BlindingFactor {
|
impl AsFixedBytes for BlindingFactor {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return SECRET_KEY_SIZE;
|
SECRET_KEY_SIZE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl AsFixedBytes for ::keychain::Identifier {
|
impl AsFixedBytes for ::keychain::Identifier {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return IDENTIFIER_SIZE;
|
IDENTIFIER_SIZE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,9 @@ extern crate grin_keychain as keychain;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
extern crate grin_wallet as wallet;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
|
|
|
@ -480,18 +480,21 @@ fn secondary_pow_scale() {
|
||||||
// difficulty block
|
// difficulty block
|
||||||
hi.is_secondary = false;
|
hi.is_secondary = false;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
|
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::<Vec<_>>()),
|
||||||
147
|
147
|
||||||
);
|
);
|
||||||
// all secondary on 90%, factor should go down a bit
|
// all secondary on 90%, factor should go down a bit
|
||||||
hi.is_secondary = true;
|
hi.is_secondary = true;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
|
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::<Vec<_>>()),
|
||||||
94
|
94
|
||||||
);
|
);
|
||||||
// all secondary on 1%, factor should go down to bound (divide by 2)
|
// all secondary on 1%, factor should go down to bound (divide by 2)
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
secondary_pow_scaling(890_000, &(0..window).map(|_| hi.clone()).collect()),
|
secondary_pow_scaling(
|
||||||
|
890_000,
|
||||||
|
&(0..window).map(|_| hi.clone()).collect::<Vec<_>>()
|
||||||
|
),
|
||||||
49
|
49
|
||||||
);
|
);
|
||||||
// same as above, testing lowest bound
|
// same as above, testing lowest bound
|
||||||
|
@ -510,7 +513,7 @@ fn secondary_pow_scale() {
|
||||||
&(0..(window / 10))
|
&(0..(window / 10))
|
||||||
.map(|_| primary_hi.clone())
|
.map(|_| primary_hi.clone())
|
||||||
.chain((0..(window * 9 / 10)).map(|_| hi.clone()))
|
.chain((0..(window * 9 / 10)).map(|_| hi.clone()))
|
||||||
.collect()
|
.collect::<Vec<_>>()
|
||||||
),
|
),
|
||||||
94
|
94
|
||||||
);
|
);
|
||||||
|
@ -521,7 +524,7 @@ fn secondary_pow_scale() {
|
||||||
&(0..(window / 20))
|
&(0..(window / 20))
|
||||||
.map(|_| primary_hi.clone())
|
.map(|_| primary_hi.clone())
|
||||||
.chain((0..(window * 95 / 100)).map(|_| hi.clone()))
|
.chain((0..(window * 95 / 100)).map(|_| hi.clone()))
|
||||||
.collect()
|
.collect::<Vec<_>>()
|
||||||
),
|
),
|
||||||
94
|
94
|
||||||
);
|
);
|
||||||
|
@ -532,7 +535,7 @@ fn secondary_pow_scale() {
|
||||||
&(0..(window * 6 / 10))
|
&(0..(window * 6 / 10))
|
||||||
.map(|_| primary_hi.clone())
|
.map(|_| primary_hi.clone())
|
||||||
.chain((0..(window * 4 / 10)).map(|_| hi.clone()))
|
.chain((0..(window * 4 / 10)).map(|_| hi.clone()))
|
||||||
.collect()
|
.collect::<Vec<_>>()
|
||||||
),
|
),
|
||||||
84
|
84
|
||||||
);
|
);
|
||||||
|
|
|
@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
extern crate grin_wallet as wallet;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
|
@ -350,7 +351,7 @@ fn blind_tx() {
|
||||||
let Output { proof, .. } = btx.outputs()[0];
|
let Output { proof, .. } = btx.outputs()[0];
|
||||||
|
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let info = secp.range_proof_info(proof);
|
let info = secp.range_proof_info(proof);
|
||||||
|
|
||||||
assert!(info.min == 0);
|
assert!(info.min == 0);
|
||||||
|
|
|
@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
extern crate grin_wallet as wallet;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
|
@ -48,20 +49,20 @@ fn test_verifier_cache_rangeproofs() {
|
||||||
|
|
||||||
// Check our output is not verified according to the cache.
|
// Check our output is not verified according to the cache.
|
||||||
{
|
{
|
||||||
let mut cache = cache.write().unwrap();
|
let mut cache = cache.write();
|
||||||
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
|
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
|
||||||
assert_eq!(unverified, vec![out]);
|
assert_eq!(unverified, vec![out]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add our output to the cache.
|
// Add our output to the cache.
|
||||||
{
|
{
|
||||||
let mut cache = cache.write().unwrap();
|
let mut cache = cache.write();
|
||||||
cache.add_rangeproof_verified(vec![out]);
|
cache.add_rangeproof_verified(vec![out]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check it shows as verified according to the cache.
|
// Check it shows as verified according to the cache.
|
||||||
{
|
{
|
||||||
let mut cache = cache.write().unwrap();
|
let mut cache = cache.write();
|
||||||
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
|
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
|
||||||
assert_eq!(unverified, vec![]);
|
assert_eq!(unverified, vec![]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,10 +170,6 @@ Receives a transaction, modifying the slate accordingly (which can then be sent
|
||||||
* **Sample Call:**
|
* **Sample Call:**
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var coinbase_data = {
|
|
||||||
fees: 0,
|
|
||||||
height: 123456
|
|
||||||
}
|
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: "/v1/wallet/foreign/build_coinbase",
|
url: "/v1/wallet/foreign/build_coinbase",
|
||||||
dataType: "json",
|
dataType: "json",
|
||||||
|
|
|
@ -390,10 +390,6 @@ Send a transaction either directly by http or file (then display the slate)
|
||||||
* **Sample Call:**
|
* **Sample Call:**
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var coinbase_data = {
|
|
||||||
fees: 0,
|
|
||||||
height: 123456
|
|
||||||
}
|
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: "/v1/wallet/owner/issue_send_tx",
|
url: "/v1/wallet/owner/issue_send_tx",
|
||||||
dataType: "json",
|
dataType: "json",
|
||||||
|
@ -506,10 +502,6 @@ Builds the complete transaction and sends it to a grin node for propagation.
|
||||||
* **Sample Call:**
|
* **Sample Call:**
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var coinbase_data = {
|
|
||||||
fees: 0,
|
|
||||||
height: 123456
|
|
||||||
}
|
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: "/v1/wallet/owner/finalize_tx",
|
url: "/v1/wallet/owner/finalize_tx",
|
||||||
dataType: "json",
|
dataType: "json",
|
||||||
|
@ -555,10 +547,6 @@ Roll back a transaction and all associated outputs with a given transaction id T
|
||||||
* **Sample Call:**
|
* **Sample Call:**
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var coinbase_data = {
|
|
||||||
fees: 0,
|
|
||||||
height: 123456
|
|
||||||
}
|
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: "/v1/wallet/owner/cancel_tx?id=3",
|
url: "/v1/wallet/owner/cancel_tx?id=3",
|
||||||
dataType: "json",
|
dataType: "json",
|
||||||
|
@ -569,6 +557,83 @@ Roll back a transaction and all associated outputs with a given transaction id T
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### POST Post Tx
|
||||||
|
|
||||||
|
Push new transaction to the connected node transaction pool. Add `?fluff` at the end of the URL to bypass Dandelion relay.
|
||||||
|
|
||||||
|
* **URL**
|
||||||
|
|
||||||
|
/v1/wallet/owner/post_tx
|
||||||
|
|
||||||
|
* **Method:**
|
||||||
|
|
||||||
|
`POST`
|
||||||
|
|
||||||
|
* **URL Params**
|
||||||
|
|
||||||
|
None
|
||||||
|
|
||||||
|
* **Data Params**
|
||||||
|
|
||||||
|
**Required:** A transaction slate in JSON.
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|:----------------------|:---------|:--------------------------------------------------------------------------|
|
||||||
|
| num_participants | number | The number of participants intended to take part in this transaction |
|
||||||
|
| id | number | Unique transaction ID, selected by sender |
|
||||||
|
| tx | object | The core transaction data (inputs, outputs, kernels and kernel offset) |
|
||||||
|
| - offset | []number | The kernel "offset" k2, excess is k1G after splitting the key k = k1 + k2 |
|
||||||
|
| - body | object | The transaction body - inputs/outputs/kernels |
|
||||||
|
| - - inputs | []object | List of inputs spent by the transaction |
|
||||||
|
| - - - features | object | The features of the output being spent |
|
||||||
|
| - - - - bits | number | Representation of the features in bits |
|
||||||
|
| - - - commit | []number | The commit referencing the output being spent |
|
||||||
|
| - - outputs | []object | List of outputs the transaction produces |
|
||||||
|
| - - - features | object | Options for an output's structure or use |
|
||||||
|
| - - - - bits | number | Representation of the features in bits |
|
||||||
|
| - - - commit | []number | The homomorphic commitment representing the output amount |
|
||||||
|
| - - - proof | []number | A proof that the commitment is in the right range |
|
||||||
|
| - - kernels | []object | List of kernels that make up this transaction (usually a single kernel) |
|
||||||
|
| - - - features | object | Options for a kernel's structure or use |
|
||||||
|
| - - - - bits | number | Representation of the features in bits |
|
||||||
|
| - - - fee | number | Fee originally included in the transaction this proof is for |
|
||||||
|
| - - - lock_height | number | The max lock_height of all inputs to this transaction |
|
||||||
|
| - - - excess | []number | Remainder of the sum of all transaction commitments |
|
||||||
|
| - - - excess_sig | []number | The signature proving the excess is a valid public key (signs the tx fee) |
|
||||||
|
| amount | number | Base amount (excluding fee) |
|
||||||
|
| fee | number | Fee amount |
|
||||||
|
| height | number | Block height for the transaction |
|
||||||
|
| lock_height | number | Lock height |
|
||||||
|
| participant_data | object | Participant data |
|
||||||
|
| - id | number | Id of participant in the transaction. (For now, 0=sender, 1=rec) |
|
||||||
|
| - public_blind_excess | []number | Public key corresponding to private blinding factor |
|
||||||
|
| - public_nonce | []number | Public key corresponding to private nonce |
|
||||||
|
| - part_sig | []number | Public partial signature |
|
||||||
|
|
||||||
|
* **Success Response:**
|
||||||
|
|
||||||
|
* **Code:** 200
|
||||||
|
|
||||||
|
* **Error Response:**
|
||||||
|
|
||||||
|
* **Code:** 400
|
||||||
|
|
||||||
|
* **Sample Call:**
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
$.ajax({
|
||||||
|
url: "/v1/wallet/owner/post_tx",
|
||||||
|
dataType: "json",
|
||||||
|
type : "POST",
|
||||||
|
success : function(r) {
|
||||||
|
console.log(r);
|
||||||
|
},
|
||||||
|
data: {
|
||||||
|
file: tx.json
|
||||||
|
},
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
### POST Issue Burn Tx
|
### POST Issue Burn Tx
|
||||||
|
|
||||||
Issue a burn TX.
|
Issue a burn TX.
|
||||||
|
@ -600,10 +665,6 @@ Issue a burn TX.
|
||||||
* **Sample Call:**
|
* **Sample Call:**
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
var coinbase_data = {
|
|
||||||
fees: 0,
|
|
||||||
height: 123456
|
|
||||||
}
|
|
||||||
$.ajax({
|
$.ajax({
|
||||||
url: "/v1/wallet/owner/issue_burn_tx",
|
url: "/v1/wallet/owner/issue_burn_tx",
|
||||||
dataType: "json",
|
dataType: "json",
|
||||||
|
|
68
doc/wallet/tls-setup.md
Normal file
68
doc/wallet/tls-setup.md
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
# Wallet TLS setup
|
||||||
|
|
||||||
|
## What you need
|
||||||
|
* A server with a static IP address (eg `3.3.3.3`)
|
||||||
|
* A domain name ownership (`example.com`)
|
||||||
|
* DNS configuration for this IP (`grin1.example.com` -> `3.3.3.3`)
|
||||||
|
|
||||||
|
If you don't have a static IP you may want to consider using services like DynDNS which support dynamic IP resolving, this case is not covered by this guide, but all the next steps are equally applicable.
|
||||||
|
|
||||||
|
If you don't have a domain name there is a possibility to get a TLS certificate for your IP, but you have to pay for that (so perhaps it's cheaper to buy a domain name) and it's rarely supported by certificate providers.
|
||||||
|
|
||||||
|
## I have a TLS certificate already
|
||||||
|
Uncomment and update the following lines in wallet config (by default `~/.grin/grin-wallet.toml`):
|
||||||
|
|
||||||
|
```
|
||||||
|
tls_certificate_file = "/path/to/my/cerificate/fullchain.pem"
|
||||||
|
tls_certificate_key = "/path/to/my/cerificate/privkey.pem"
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure your user has read access to the files (see below for how to do it). Restart wallet. When you (or someone else) send grins to this wallet the destination (`-d` option) must start with `https://`, not with `http://`.
|
||||||
|
|
||||||
|
## I don't have a TLS certificate
|
||||||
|
You can get it for free from [Let's Encrypt](https://letsencrypt.org/). To simplify the process we need `certbot`.
|
||||||
|
|
||||||
|
### Install certbot
|
||||||
|
Go to [Certbot home page](https://certbot.eff.org/), choose I'm using `None of the above` and your OS (eg `Ubuntu 18.04` which will be used as an example). You will be redirected to a page with instructions like [steps for Ubuntu](https://certbot.eff.org/lets-encrypt/ubuntubionic-other). Follow instructions from `Install` section. As result you should have `certbot` installed.
|
||||||
|
|
||||||
|
### Obtain certificate
|
||||||
|
If you have experince with `certboot` feel free to use any type of challenge. This guide covers the simplest case of HTTP challenge. For this you need to have a web server listening on port `80`, which requires running it as root in the simplest case. We will use the server provided by certbot. **Make sure you have port 80 open**
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo certbot certonly --standalone -d grin1.example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
It will ask you some questions, as result you should see something like:
|
||||||
|
|
||||||
|
```
|
||||||
|
Congratulations! Your certificate and chain have been saved at:
|
||||||
|
/etc/letsencrypt/live/grin1.example.com/fullchain.pem
|
||||||
|
Your key file has been saved at:
|
||||||
|
/etc/letsencrypt/live/grin1.example.com/privkey.pem
|
||||||
|
Your cert will expire on 2019-01-16. To obtain a new or tweaked
|
||||||
|
version of this certificate in the future, simply run certbot
|
||||||
|
again. To non-interactively renew *all* of your certificates, run
|
||||||
|
"certbot renew"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Change permissions
|
||||||
|
Now you have the certificate files but only root user can read it. We run grin as `ubuntu` user. There are different scenarios how to fix it, the simplest one is to create a group which will have access to `/etc/letsencrypt` directory and add our user to this group.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ sudo groupadd tls-cert`
|
||||||
|
$ sudo usermod -a -G tls-cert ubuntu`
|
||||||
|
$ chgrp -R tls-cert /etc/letsencrypt`
|
||||||
|
$ chmod -R g=rX /etc/letsencrypt`
|
||||||
|
$ sudo chmod 2755 /etc/letsencrypt`
|
||||||
|
```
|
||||||
|
|
||||||
|
The last step is needed for renewal, it makes sure that all new files will have the same group ownership.
|
||||||
|
|
||||||
|
### Update wallet config
|
||||||
|
Refer to `I have a TLS certificate already` because you have it now. Use the folowing values:
|
||||||
|
|
||||||
|
```
|
||||||
|
tls_certificate_file = "/etc/letsencrypt/live/grin1.example.com/fullchain.pem"
|
||||||
|
tls_certificate_key = "/etc/letsencrypt/live/grin1.example.com/privkey.pem"
|
||||||
|
```
|
||||||
|
|
|
@ -9,7 +9,7 @@ publish = false
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
blake2-rfc = "0.2"
|
blake2-rfc = "0.2"
|
||||||
rand = "0.5"
|
rand = "0.5"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
|
|
|
@ -24,10 +24,10 @@ extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
extern crate digest;
|
extern crate digest;
|
||||||
extern crate hmac;
|
extern crate hmac;
|
||||||
|
extern crate log;
|
||||||
extern crate ripemd160;
|
extern crate ripemd160;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate sha2;
|
extern crate sha2;
|
||||||
extern crate slog;
|
|
||||||
extern crate uuid;
|
extern crate uuid;
|
||||||
|
|
||||||
mod base58;
|
mod base58;
|
||||||
|
|
|
@ -244,7 +244,7 @@ impl Add for BlindingFactor {
|
||||||
//
|
//
|
||||||
fn add(self, other: BlindingFactor) -> Self::Output {
|
fn add(self, other: BlindingFactor) -> Self::Output {
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock();
|
||||||
let keys = vec![self, other]
|
let keys = vec![self, other]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| *x != BlindingFactor::zero())
|
.filter(|x| *x != BlindingFactor::zero())
|
||||||
|
|
|
@ -15,7 +15,7 @@ num = "0.1"
|
||||||
rand = "0.5"
|
rand = "0.5"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
chrono = { version = "0.4.4", features = ["serde"] }
|
chrono = { version = "0.4.4", features = ["serde"] }
|
||||||
|
|
||||||
grin_core = { path = "../core" }
|
grin_core = { path = "../core" }
|
||||||
|
|
|
@ -24,13 +24,13 @@ use std::fs::File;
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::net::TcpStream;
|
use std::net::TcpStream;
|
||||||
use std::sync::{mpsc, Arc, RwLock};
|
use std::sync::{mpsc, Arc};
|
||||||
use std::{cmp, thread, time};
|
use std::{cmp, thread, time};
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
|
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
|
||||||
use types::Error;
|
use types::Error;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// A trait to be implemented in order to receive messages from the
|
/// A trait to be implemented in order to receive messages from the
|
||||||
/// connection. Allows providing an optional response.
|
/// connection. Allows providing an optional response.
|
||||||
|
@ -144,7 +144,6 @@ impl<'a> Response<'a> {
|
||||||
|
|
||||||
pub const SEND_CHANNEL_CAP: usize = 10;
|
pub const SEND_CHANNEL_CAP: usize = 10;
|
||||||
|
|
||||||
// TODO count sent and received
|
|
||||||
pub struct Tracker {
|
pub struct Tracker {
|
||||||
/// Bytes we've sent.
|
/// Bytes we've sent.
|
||||||
pub sent_bytes: Arc<RwLock<u64>>,
|
pub sent_bytes: Arc<RwLock<u64>>,
|
||||||
|
@ -168,9 +167,8 @@ impl Tracker {
|
||||||
self.send_channel.try_send(buf)?;
|
self.send_channel.try_send(buf)?;
|
||||||
|
|
||||||
// Increase sent bytes counter
|
// Increase sent bytes counter
|
||||||
if let Ok(mut sent_bytes) = self.sent_bytes.write() {
|
let mut sent_bytes = self.sent_bytes.write();
|
||||||
*sent_bytes += buf_len as u64;
|
*sent_bytes += buf_len as u64;
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -234,14 +232,14 @@ fn poll<H>(
|
||||||
if let Some(h) = try_break!(error_tx, read_header(conn, None)) {
|
if let Some(h) = try_break!(error_tx, read_header(conn, None)) {
|
||||||
let msg = Message::from_header(h, conn);
|
let msg = Message::from_header(h, conn);
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"Received message header, type {:?}, len {}.",
|
"Received message header, type {:?}, len {}.",
|
||||||
msg.header.msg_type,
|
msg.header.msg_type,
|
||||||
msg.header.msg_len
|
msg.header.msg_len
|
||||||
);
|
);
|
||||||
|
|
||||||
// Increase received bytes counter
|
// Increase received bytes counter
|
||||||
if let Ok(mut received_bytes) = received_bytes.write() {
|
{
|
||||||
|
let mut received_bytes = received_bytes.write();
|
||||||
let header_size = size_of::<MsgHeader>() as u64;
|
let header_size = size_of::<MsgHeader>() as u64;
|
||||||
*received_bytes += header_size + msg.header.msg_len;
|
*received_bytes += header_size + msg.header.msg_len;
|
||||||
}
|
}
|
||||||
|
@ -275,7 +273,6 @@ fn poll<H>(
|
||||||
// check the close channel
|
// check the close channel
|
||||||
if let Ok(_) = close_rx.try_recv() {
|
if let Ok(_) = close_rx.try_recv() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Connection close with {} initiated by us",
|
"Connection close with {} initiated by us",
|
||||||
conn.peer_addr()
|
conn.peer_addr()
|
||||||
.map(|a| a.to_string())
|
.map(|a| a.to_string())
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::net::{SocketAddr, TcpStream};
|
use std::net::{SocketAddr, TcpStream};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
@ -24,7 +25,6 @@ use core::pow::Difficulty;
|
||||||
use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT};
|
use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT};
|
||||||
use peer::Peer;
|
use peer::Peer;
|
||||||
use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
|
use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
const NONCES_CAP: usize = 100;
|
const NONCES_CAP: usize = 100;
|
||||||
|
|
||||||
|
@ -114,7 +114,6 @@ impl Handshake {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
|
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
|
||||||
shake.total_difficulty.to_num(),
|
shake.total_difficulty.to_num(),
|
||||||
peer_info.addr,
|
peer_info.addr,
|
||||||
|
@ -146,7 +145,7 @@ impl Handshake {
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// check the nonce to see if we are trying to connect to ourselves
|
// check the nonce to see if we are trying to connect to ourselves
|
||||||
let nonces = self.nonces.read().unwrap();
|
let nonces = self.nonces.read();
|
||||||
if nonces.contains(&hand.nonce) {
|
if nonces.contains(&hand.nonce) {
|
||||||
return Err(Error::PeerWithSelf);
|
return Err(Error::PeerWithSelf);
|
||||||
}
|
}
|
||||||
|
@ -185,7 +184,7 @@ impl Handshake {
|
||||||
};
|
};
|
||||||
|
|
||||||
write_message(conn, shake, Type::Shake)?;
|
write_message(conn, shake, Type::Shake)?;
|
||||||
trace!(LOGGER, "Success handshake with {}.", peer_info.addr);
|
trace!("Success handshake with {}.", peer_info.addr);
|
||||||
|
|
||||||
// when more than one protocol version is supported, choosing should go here
|
// when more than one protocol version is supported, choosing should go here
|
||||||
Ok(peer_info)
|
Ok(peer_info)
|
||||||
|
@ -195,7 +194,7 @@ impl Handshake {
|
||||||
fn next_nonce(&self) -> u64 {
|
fn next_nonce(&self) -> u64 {
|
||||||
let nonce = thread_rng().gen();
|
let nonce = thread_rng().gen();
|
||||||
|
|
||||||
let mut nonces = self.nonces.write().unwrap();
|
let mut nonces = self.nonces.write();
|
||||||
nonces.push_back(nonce);
|
nonces.push_back(nonce);
|
||||||
if nonces.len() >= NONCES_CAP {
|
if nonces.len() >= NONCES_CAP {
|
||||||
nonces.pop_front();
|
nonces.pop_front();
|
||||||
|
|
|
@ -37,7 +37,7 @@ extern crate serde;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
|
|
||||||
mod conn;
|
mod conn;
|
||||||
|
|
|
@ -26,7 +26,6 @@ use core::pow::Difficulty;
|
||||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||||
|
|
||||||
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
|
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Current latest version of the protocol
|
/// Current latest version of the protocol
|
||||||
pub const PROTOCOL_VERSION: u32 = 1;
|
pub const PROTOCOL_VERSION: u32 = 1;
|
||||||
|
@ -207,8 +206,8 @@ pub fn read_header(conn: &mut TcpStream, msg_type: Option<Type>) -> Result<MsgHe
|
||||||
// TODO 4x the limits for now to leave ourselves space to change things
|
// TODO 4x the limits for now to leave ourselves space to change things
|
||||||
if header.msg_len > max_len * 4 {
|
if header.msg_len > max_len * 4 {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"Too large read {}, had {}, wanted {}.",
|
||||||
"Too large read {}, had {}, wanted {}.", header.msg_type as u8, max_len, header.msg_len
|
header.msg_type as u8, max_len, header.msg_len
|
||||||
);
|
);
|
||||||
return Err(Error::Serialization(ser::Error::TooLargeReadErr));
|
return Err(Error::Serialization(ser::Error::TooLargeReadErr));
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::{SocketAddr, TcpStream};
|
use std::net::{SocketAddr, TcpStream};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chrono::prelude::{DateTime, Utc};
|
use chrono::prelude::{DateTime, Utc};
|
||||||
use conn;
|
use conn;
|
||||||
|
@ -27,7 +28,6 @@ use protocol::Protocol;
|
||||||
use types::{
|
use types::{
|
||||||
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
|
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
|
||||||
};
|
};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
const MAX_TRACK_SIZE: usize = 30;
|
const MAX_TRACK_SIZE: usize = 30;
|
||||||
|
|
||||||
|
@ -103,8 +103,8 @@ impl Peer {
|
||||||
if let Some(ref denied) = config.peers_deny {
|
if let Some(ref denied) = config.peers_deny {
|
||||||
if denied.contains(&peer) {
|
if denied.contains(&peer) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"checking peer allowed/denied: {:?} explicitly denied",
|
||||||
"checking peer allowed/denied: {:?} explicitly denied", peer_addr
|
peer_addr
|
||||||
);
|
);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -112,14 +112,14 @@ impl Peer {
|
||||||
if let Some(ref allowed) = config.peers_allow {
|
if let Some(ref allowed) = config.peers_allow {
|
||||||
if allowed.contains(&peer) {
|
if allowed.contains(&peer) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"checking peer allowed/denied: {:?} explicitly allowed",
|
||||||
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr
|
peer_addr
|
||||||
);
|
);
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"checking peer allowed/denied: {:?} not explicitly allowed, denying",
|
||||||
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr
|
peer_addr
|
||||||
);
|
);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -137,12 +137,12 @@ impl Peer {
|
||||||
|
|
||||||
/// Whether this peer has been banned.
|
/// Whether this peer has been banned.
|
||||||
pub fn is_banned(&self) -> bool {
|
pub fn is_banned(&self) -> bool {
|
||||||
State::Banned == *self.state.read().unwrap()
|
State::Banned == *self.state.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether this peer is stuck on sync.
|
/// Whether this peer is stuck on sync.
|
||||||
pub fn is_stuck(&self) -> (bool, Difficulty) {
|
pub fn is_stuck(&self) -> (bool, Difficulty) {
|
||||||
let peer_live_info = self.info.live_info.read().unwrap();
|
let peer_live_info = self.info.live_info.read();
|
||||||
let now = Utc::now().timestamp_millis();
|
let now = Utc::now().timestamp_millis();
|
||||||
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
|
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
|
||||||
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
|
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
|
||||||
|
@ -155,9 +155,8 @@ impl Peer {
|
||||||
/// Number of bytes sent to the peer
|
/// Number of bytes sent to the peer
|
||||||
pub fn sent_bytes(&self) -> Option<u64> {
|
pub fn sent_bytes(&self) -> Option<u64> {
|
||||||
if let Some(ref tracker) = self.connection {
|
if let Some(ref tracker) = self.connection {
|
||||||
if let Ok(sent_bytes) = tracker.sent_bytes.read() {
|
let sent_bytes = tracker.sent_bytes.read();
|
||||||
return Some(*sent_bytes);
|
return Some(*sent_bytes);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
@ -165,16 +164,15 @@ impl Peer {
|
||||||
/// Number of bytes received from the peer
|
/// Number of bytes received from the peer
|
||||||
pub fn received_bytes(&self) -> Option<u64> {
|
pub fn received_bytes(&self) -> Option<u64> {
|
||||||
if let Some(ref tracker) = self.connection {
|
if let Some(ref tracker) = self.connection {
|
||||||
if let Ok(received_bytes) = tracker.received_bytes.read() {
|
let received_bytes = tracker.received_bytes.read();
|
||||||
return Some(*received_bytes);
|
return Some(*received_bytes);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set this peer status to banned
|
/// Set this peer status to banned
|
||||||
pub fn set_banned(&self) {
|
pub fn set_banned(&self) {
|
||||||
*self.state.write().unwrap() = State::Banned;
|
*self.state.write() = State::Banned;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a ping to the remote peer, providing our local difficulty and
|
/// Send a ping to the remote peer, providing our local difficulty and
|
||||||
|
@ -199,13 +197,10 @@ impl Peer {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.send(ban_reason_msg, msg::Type::BanReason)
|
.send(ban_reason_msg, msg::Type::BanReason)
|
||||||
{
|
{
|
||||||
Ok(_) => debug!(
|
Ok(_) => debug!("Sent ban reason {:?} to {}", ban_reason, self.info.addr),
|
||||||
LOGGER,
|
|
||||||
"Sent ban reason {:?} to {}", ban_reason, self.info.addr
|
|
||||||
),
|
|
||||||
Err(e) => error!(
|
Err(e) => error!(
|
||||||
LOGGER,
|
"Could not send ban reason {:?} to {}: {:?}",
|
||||||
"Could not send ban reason {:?} to {}: {:?}", ban_reason, self.info.addr, e
|
ban_reason, self.info.addr, e
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -214,7 +209,7 @@ impl Peer {
|
||||||
/// if the remote peer is known to already have the block.
|
/// if the remote peer is known to already have the block.
|
||||||
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
|
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
|
||||||
if !self.tracking_adapter.has(b.hash()) {
|
if !self.tracking_adapter.has(b.hash()) {
|
||||||
trace!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr);
|
trace!("Send block {} to {}", b.hash(), self.info.addr);
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -222,7 +217,6 @@ impl Peer {
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Suppress block send {} to {} (already seen)",
|
"Suppress block send {} to {} (already seen)",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
self.info.addr,
|
self.info.addr,
|
||||||
|
@ -233,12 +227,7 @@ impl Peer {
|
||||||
|
|
||||||
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
|
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
|
||||||
if !self.tracking_adapter.has(b.hash()) {
|
if !self.tracking_adapter.has(b.hash()) {
|
||||||
trace!(
|
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
|
||||||
LOGGER,
|
|
||||||
"Send compact block {} to {}",
|
|
||||||
b.hash(),
|
|
||||||
self.info.addr
|
|
||||||
);
|
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -246,7 +235,6 @@ impl Peer {
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Suppress compact block send {} to {} (already seen)",
|
"Suppress compact block send {} to {} (already seen)",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
self.info.addr,
|
self.info.addr,
|
||||||
|
@ -257,7 +245,7 @@ impl Peer {
|
||||||
|
|
||||||
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
|
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
|
||||||
if !self.tracking_adapter.has(bh.hash()) {
|
if !self.tracking_adapter.has(bh.hash()) {
|
||||||
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr);
|
debug!("Send header {} to {}", bh.hash(), self.info.addr);
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -265,7 +253,6 @@ impl Peer {
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Suppress header send {} to {} (already seen)",
|
"Suppress header send {} to {} (already seen)",
|
||||||
bh.hash(),
|
bh.hash(),
|
||||||
self.info.addr,
|
self.info.addr,
|
||||||
|
@ -278,7 +265,7 @@ impl Peer {
|
||||||
/// dropped if the remote peer is known to already have the transaction.
|
/// dropped if the remote peer is known to already have the transaction.
|
||||||
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
|
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
|
||||||
if !self.tracking_adapter.has(tx.hash()) {
|
if !self.tracking_adapter.has(tx.hash()) {
|
||||||
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr);
|
debug!("Send tx {} to {}", tx.hash(), self.info.addr);
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -286,7 +273,6 @@ impl Peer {
|
||||||
Ok(true)
|
Ok(true)
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Not sending tx {} to {} (already seen)",
|
"Not sending tx {} to {} (already seen)",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
self.info.addr
|
self.info.addr
|
||||||
|
@ -299,7 +285,7 @@ impl Peer {
|
||||||
/// Note: tracking adapter is ignored for stem transactions (while under
|
/// Note: tracking adapter is ignored for stem transactions (while under
|
||||||
/// embargo).
|
/// embargo).
|
||||||
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||||
debug!(LOGGER, "Send (stem) tx {} to {}", tx.hash(), self.info.addr);
|
debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -317,10 +303,7 @@ impl Peer {
|
||||||
|
|
||||||
/// Sends a request for a specific block by hash
|
/// Sends a request for a specific block by hash
|
||||||
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
|
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
|
||||||
debug!(
|
debug!("Requesting block {} from peer {}.", h, self.info.addr);
|
||||||
LOGGER,
|
|
||||||
"Requesting block {} from peer {}.", h, self.info.addr
|
|
||||||
);
|
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -329,10 +312,7 @@ impl Peer {
|
||||||
|
|
||||||
/// Sends a request for a specific compact block by hash
|
/// Sends a request for a specific compact block by hash
|
||||||
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
|
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
|
||||||
debug!(
|
debug!("Requesting compact block {} from {}", h, self.info.addr);
|
||||||
LOGGER,
|
|
||||||
"Requesting compact block {} from {}", h, self.info.addr
|
|
||||||
);
|
|
||||||
self.connection
|
self.connection
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -340,7 +320,7 @@ impl Peer {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
|
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
|
||||||
debug!(LOGGER, "Asking {} for more peers.", self.info.addr);
|
debug!("Asking {} for more peers.", self.info.addr);
|
||||||
self.connection.as_ref().unwrap().send(
|
self.connection.as_ref().unwrap().send(
|
||||||
&GetPeerAddrs {
|
&GetPeerAddrs {
|
||||||
capabilities: capab,
|
capabilities: capab,
|
||||||
|
@ -351,8 +331,8 @@ impl Peer {
|
||||||
|
|
||||||
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
|
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Asking {} for txhashset archive at {} {}.",
|
||||||
"Asking {} for txhashset archive at {} {}.", self.info.addr, height, hash
|
self.info.addr, height, hash
|
||||||
);
|
);
|
||||||
self.connection.as_ref().unwrap().send(
|
self.connection.as_ref().unwrap().send(
|
||||||
&TxHashSetRequest { hash, height },
|
&TxHashSetRequest { hash, height },
|
||||||
|
@ -369,7 +349,7 @@ impl Peer {
|
||||||
match self.connection.as_ref().unwrap().error_channel.try_recv() {
|
match self.connection.as_ref().unwrap().error_channel.try_recv() {
|
||||||
Ok(Error::Serialization(e)) => {
|
Ok(Error::Serialization(e)) => {
|
||||||
let need_stop = {
|
let need_stop = {
|
||||||
let mut state = self.state.write().unwrap();
|
let mut state = self.state.write();
|
||||||
if State::Banned != *state {
|
if State::Banned != *state {
|
||||||
*state = State::Disconnected;
|
*state = State::Disconnected;
|
||||||
true
|
true
|
||||||
|
@ -379,8 +359,8 @@ impl Peer {
|
||||||
};
|
};
|
||||||
if need_stop {
|
if need_stop {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Client {} corrupted, will disconnect ({:?}).",
|
||||||
"Client {} corrupted, will disconnect ({:?}).", self.info.addr, e
|
self.info.addr, e
|
||||||
);
|
);
|
||||||
self.stop();
|
self.stop();
|
||||||
}
|
}
|
||||||
|
@ -388,7 +368,7 @@ impl Peer {
|
||||||
}
|
}
|
||||||
Ok(e) => {
|
Ok(e) => {
|
||||||
let need_stop = {
|
let need_stop = {
|
||||||
let mut state = self.state.write().unwrap();
|
let mut state = self.state.write();
|
||||||
if State::Disconnected != *state {
|
if State::Disconnected != *state {
|
||||||
*state = State::Disconnected;
|
*state = State::Disconnected;
|
||||||
true
|
true
|
||||||
|
@ -397,13 +377,13 @@ impl Peer {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if need_stop {
|
if need_stop {
|
||||||
debug!(LOGGER, "Client {} connection lost: {:?}", self.info.addr, e);
|
debug!("Client {} connection lost: {:?}", self.info.addr, e);
|
||||||
self.stop();
|
self.stop();
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
let state = self.state.read().unwrap();
|
let state = self.state.read();
|
||||||
State::Connected == *state
|
State::Connected == *state
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -427,14 +407,14 @@ impl TrackingAdapter {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has(&self, hash: Hash) -> bool {
|
fn has(&self, hash: Hash) -> bool {
|
||||||
let known = self.known.read().unwrap();
|
let known = self.known.read();
|
||||||
// may become too slow, an ordered set (by timestamp for eviction) may
|
// may become too slow, an ordered set (by timestamp for eviction) may
|
||||||
// end up being a better choice
|
// end up being a better choice
|
||||||
known.contains(&hash)
|
known.contains(&hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push(&self, hash: Hash) {
|
fn push(&self, hash: Hash) {
|
||||||
let mut known = self.known.write().unwrap();
|
let mut known = self.known.write();
|
||||||
if known.len() > MAX_TRACK_SIZE {
|
if known.len() > MAX_TRACK_SIZE {
|
||||||
known.truncate(MAX_TRACK_SIZE);
|
known.truncate(MAX_TRACK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
140
p2p/src/peers.rs
140
p2p/src/peers.rs
|
@ -15,15 +15,17 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
|
use chrono::Duration;
|
||||||
use core::core;
|
use core::core;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
|
use core::global;
|
||||||
use core::pow::Difficulty;
|
use core::pow::Difficulty;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
use peer::Peer;
|
use peer::Peer;
|
||||||
use store::{PeerData, PeerStore, State};
|
use store::{PeerData, PeerStore, State};
|
||||||
|
@ -67,14 +69,15 @@ impl Peers {
|
||||||
flags: State::Healthy,
|
flags: State::Healthy,
|
||||||
last_banned: 0,
|
last_banned: 0,
|
||||||
ban_reason: ReasonForBan::None,
|
ban_reason: ReasonForBan::None,
|
||||||
|
last_connected: Utc::now().timestamp(),
|
||||||
};
|
};
|
||||||
addr = peer.info.addr.clone();
|
addr = peer.info.addr.clone();
|
||||||
}
|
}
|
||||||
debug!(LOGGER, "Saving newly connected peer {}.", addr);
|
debug!("Saving newly connected peer {}.", addr);
|
||||||
self.save_peer(&peer_data)?;
|
self.save_peer(&peer_data)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write();
|
||||||
peers.insert(addr, peer.clone());
|
peers.insert(addr, peer.clone());
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -88,27 +91,26 @@ impl Peers {
|
||||||
Some(peer) => {
|
Some(peer) => {
|
||||||
// Clear the map and add new relay
|
// Clear the map and add new relay
|
||||||
let dandelion_relay = &self.dandelion_relay;
|
let dandelion_relay = &self.dandelion_relay;
|
||||||
dandelion_relay.write().unwrap().clear();
|
dandelion_relay.write().clear();
|
||||||
dandelion_relay
|
dandelion_relay
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
|
||||||
.insert(Utc::now().timestamp(), peer.clone());
|
.insert(Utc::now().timestamp(), peer.clone());
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Successfully updated Dandelion relay to: {}",
|
||||||
"Successfully updated Dandelion relay to: {}", peer.info.addr
|
peer.info.addr
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
None => debug!(LOGGER, "Could not update dandelion relay"),
|
None => debug!("Could not update dandelion relay"),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the dandelion relay
|
// Get the dandelion relay
|
||||||
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
|
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
|
||||||
self.dandelion_relay.read().unwrap().clone()
|
self.dandelion_relay.read().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
||||||
self.peers.read().unwrap().contains_key(addr)
|
self.peers.read().contains_key(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get vec of peers we are currently connected to.
|
/// Get vec of peers we are currently connected to.
|
||||||
|
@ -116,7 +118,6 @@ impl Peers {
|
||||||
let mut res = self
|
let mut res = self
|
||||||
.peers
|
.peers
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
|
||||||
.values()
|
.values()
|
||||||
.filter(|p| p.is_connected())
|
.filter(|p| p.is_connected())
|
||||||
.cloned()
|
.cloned()
|
||||||
|
@ -136,14 +137,13 @@ impl Peers {
|
||||||
|
|
||||||
/// Get a peer we're connected to by address.
|
/// Get a peer we're connected to by address.
|
||||||
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> {
|
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> {
|
||||||
self.peers.read().unwrap().get(addr).map(|p| p.clone())
|
self.peers.read().get(addr).map(|p| p.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Number of peers we're currently connected to.
|
/// Number of peers we're currently connected to.
|
||||||
pub fn peer_count(&self) -> u32 {
|
pub fn peer_count(&self) -> u32 {
|
||||||
self.peers
|
self.peers
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
|
||||||
.values()
|
.values()
|
||||||
.filter(|x| x.is_connected())
|
.filter(|x| x.is_connected())
|
||||||
.count() as u32
|
.count() as u32
|
||||||
|
@ -168,37 +168,11 @@ impl Peers {
|
||||||
max_peers
|
max_peers
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return vec of connected peers that currently advertise more work
|
|
||||||
// (total_difficulty) than we do and are also full archival nodes.
|
|
||||||
pub fn more_work_archival_peers(&self) -> Vec<Arc<Peer>> {
|
|
||||||
let peers = self.connected_peers();
|
|
||||||
if peers.len() == 0 {
|
|
||||||
return vec![];
|
|
||||||
}
|
|
||||||
|
|
||||||
let total_difficulty = self.total_difficulty();
|
|
||||||
|
|
||||||
let mut max_peers = peers
|
|
||||||
.into_iter()
|
|
||||||
.filter(|x| {
|
|
||||||
x.info.total_difficulty() > total_difficulty
|
|
||||||
&& x.info.capabilities.contains(Capabilities::FULL_HIST)
|
|
||||||
}).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
thread_rng().shuffle(&mut max_peers);
|
|
||||||
max_peers
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns single random peer with more work than us.
|
/// Returns single random peer with more work than us.
|
||||||
pub fn more_work_peer(&self) -> Option<Arc<Peer>> {
|
pub fn more_work_peer(&self) -> Option<Arc<Peer>> {
|
||||||
self.more_work_peers().pop()
|
self.more_work_peers().pop()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns single random archival peer with more work than us.
|
|
||||||
pub fn more_work_archival_peer(&self) -> Option<Arc<Peer>> {
|
|
||||||
self.more_work_archival_peers().pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return vec of connected peers that currently have the most worked
|
/// Return vec of connected peers that currently have the most worked
|
||||||
/// branch, showing the highest total difficulty.
|
/// branch, showing the highest total difficulty.
|
||||||
pub fn most_work_peers(&self) -> Vec<Arc<Peer>> {
|
pub fn most_work_peers(&self) -> Vec<Arc<Peer>> {
|
||||||
|
@ -240,11 +214,11 @@ impl Peers {
|
||||||
/// Ban a peer, disconnecting it if we're currently connected
|
/// Ban a peer, disconnecting it if we're currently connected
|
||||||
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
||||||
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
|
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
|
||||||
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
|
error!("Couldn't ban {}: {:?}", peer_addr, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(peer) = self.get_connected_peer(peer_addr) {
|
if let Some(peer) = self.get_connected_peer(peer_addr) {
|
||||||
debug!(LOGGER, "Banning peer {}", peer_addr);
|
debug!("Banning peer {}", peer_addr);
|
||||||
// setting peer status will get it removed at the next clean_peer
|
// setting peer status will get it removed at the next clean_peer
|
||||||
peer.send_ban_reason(ban_reason);
|
peer.send_ban_reason(ban_reason);
|
||||||
peer.set_banned();
|
peer.set_banned();
|
||||||
|
@ -258,13 +232,13 @@ impl Peers {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
if self.is_banned(*peer_addr) {
|
if self.is_banned(*peer_addr) {
|
||||||
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
|
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
|
||||||
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e);
|
error!("Couldn't unban {}: {:?}", peer_addr, e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error!(LOGGER, "Couldn't unban {}: peer is not banned", peer_addr);
|
error!("Couldn't unban {}: peer is not banned", peer_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e),
|
Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +254,7 @@ impl Peers {
|
||||||
match inner(&p) {
|
match inner(&p) {
|
||||||
Ok(true) => count += 1,
|
Ok(true) => count += 1,
|
||||||
Ok(false) => (),
|
Ok(false) => (),
|
||||||
Err(e) => debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e),
|
Err(e) => debug!("Error sending {} to peer: {:?}", obj_name, e),
|
||||||
}
|
}
|
||||||
|
|
||||||
if count >= num_peers {
|
if count >= num_peers {
|
||||||
|
@ -299,7 +273,6 @@ impl Peers {
|
||||||
let num_peers = self.config.peer_max_count();
|
let num_peers = self.config.peer_max_count();
|
||||||
let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b));
|
let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b));
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.pow.total_difficulty,
|
b.header.pow.total_difficulty,
|
||||||
|
@ -317,7 +290,6 @@ impl Peers {
|
||||||
let num_peers = self.config.peer_min_preferred_count();
|
let num_peers = self.config.peer_min_preferred_count();
|
||||||
let count = self.broadcast("header", num_peers, |p| p.send_header(bh));
|
let count = self.broadcast("header", num_peers, |p| p.send_header(bh));
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
||||||
bh.hash(),
|
bh.hash(),
|
||||||
bh.pow.total_difficulty,
|
bh.pow.total_difficulty,
|
||||||
|
@ -330,7 +302,7 @@ impl Peers {
|
||||||
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||||
let dandelion_relay = self.get_dandelion_relay();
|
let dandelion_relay = self.get_dandelion_relay();
|
||||||
if dandelion_relay.is_empty() {
|
if dandelion_relay.is_empty() {
|
||||||
debug!(LOGGER, "No dandelion relay, updating.");
|
debug!("No dandelion relay, updating.");
|
||||||
self.update_dandelion_relay();
|
self.update_dandelion_relay();
|
||||||
}
|
}
|
||||||
// If still return an error, let the caller handle this as they see fit.
|
// If still return an error, let the caller handle this as they see fit.
|
||||||
|
@ -341,10 +313,7 @@ impl Peers {
|
||||||
for relay in dandelion_relay.values() {
|
for relay in dandelion_relay.values() {
|
||||||
if relay.is_connected() {
|
if relay.is_connected() {
|
||||||
if let Err(e) = relay.send_stem_transaction(tx) {
|
if let Err(e) = relay.send_stem_transaction(tx) {
|
||||||
debug!(
|
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
||||||
LOGGER,
|
|
||||||
"Error sending stem transaction to peer relay: {:?}", e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -360,7 +329,6 @@ impl Peers {
|
||||||
let num_peers = self.config.peer_min_preferred_count();
|
let num_peers = self.config.peer_min_preferred_count();
|
||||||
let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx));
|
let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx));
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"broadcast_transaction: {}, to {} peers, done.",
|
"broadcast_transaction: {}, to {} peers, done.",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
count,
|
count,
|
||||||
|
@ -370,7 +338,7 @@ impl Peers {
|
||||||
/// Ping all our connected peers. Always automatically expects a pong back
|
/// Ping all our connected peers. Always automatically expects a pong back
|
||||||
/// or disconnects. This acts as a liveness test.
|
/// or disconnects. This acts as a liveness test.
|
||||||
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
|
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
|
||||||
let peers_map = self.peers.read().unwrap();
|
let peers_map = self.peers.read();
|
||||||
for p in peers_map.values() {
|
for p in peers_map.values() {
|
||||||
if p.is_connected() {
|
if p.is_connected() {
|
||||||
let _ = p.send_ping(total_difficulty, height);
|
let _ = p.send_ping(total_difficulty, height);
|
||||||
|
@ -417,17 +385,17 @@ impl Peers {
|
||||||
let mut rm = vec![];
|
let mut rm = vec![];
|
||||||
|
|
||||||
// build a list of peers to be cleaned up
|
// build a list of peers to be cleaned up
|
||||||
for peer in self.peers.read().unwrap().values() {
|
for peer in self.peers.read().values() {
|
||||||
if peer.is_banned() {
|
if peer.is_banned() {
|
||||||
debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr);
|
debug!("clean_peers {:?}, peer banned", peer.info.addr);
|
||||||
rm.push(peer.clone());
|
rm.push(peer.clone());
|
||||||
} else if !peer.is_connected() {
|
} else if !peer.is_connected() {
|
||||||
debug!(LOGGER, "clean_peers {:?}, not connected", peer.info.addr);
|
debug!("clean_peers {:?}, not connected", peer.info.addr);
|
||||||
rm.push(peer.clone());
|
rm.push(peer.clone());
|
||||||
} else {
|
} else {
|
||||||
let (stuck, diff) = peer.is_stuck();
|
let (stuck, diff) = peer.is_stuck();
|
||||||
if stuck && diff < self.adapter.total_difficulty() {
|
if stuck && diff < self.adapter.total_difficulty() {
|
||||||
debug!(LOGGER, "clean_peers {:?}, stuck peer", peer.info.addr);
|
debug!("clean_peers {:?}, stuck peer", peer.info.addr);
|
||||||
peer.stop();
|
peer.stop();
|
||||||
let _ = self.update_state(peer.info.addr, State::Defunct);
|
let _ = self.update_state(peer.info.addr, State::Defunct);
|
||||||
rm.push(peer.clone());
|
rm.push(peer.clone());
|
||||||
|
@ -437,7 +405,7 @@ impl Peers {
|
||||||
|
|
||||||
// now clean up peer map based on the list to remove
|
// now clean up peer map based on the list to remove
|
||||||
{
|
{
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write();
|
||||||
for p in rm {
|
for p in rm {
|
||||||
peers.remove(&p.info.addr);
|
peers.remove(&p.info.addr);
|
||||||
}
|
}
|
||||||
|
@ -463,13 +431,13 @@ impl Peers {
|
||||||
// now remove them taking a short-lived write lock each time
|
// now remove them taking a short-lived write lock each time
|
||||||
// maybe better to take write lock once and remove them all?
|
// maybe better to take write lock once and remove them all?
|
||||||
for x in addrs.iter().take(excess_count) {
|
for x in addrs.iter().take(excess_count) {
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write();
|
||||||
peers.remove(x);
|
peers.remove(x);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stop(&self) {
|
pub fn stop(&self) {
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write();
|
||||||
for (_, peer) in peers.drain() {
|
for (_, peer) in peers.drain() {
|
||||||
peer.stop();
|
peer.stop();
|
||||||
}
|
}
|
||||||
|
@ -478,6 +446,31 @@ impl Peers {
|
||||||
pub fn enough_peers(&self) -> bool {
|
pub fn enough_peers(&self) -> bool {
|
||||||
self.connected_peers().len() >= self.config.peer_min_preferred_count() as usize
|
self.connected_peers().len() >= self.config.peer_min_preferred_count() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes those peers that seem to have expired
|
||||||
|
pub fn remove_expired(&self) {
|
||||||
|
let now = Utc::now();
|
||||||
|
|
||||||
|
// Delete defunct peers from storage
|
||||||
|
let _ = self.store.delete_peers(|peer| {
|
||||||
|
let diff = now - Utc.timestamp(peer.last_connected, 0);
|
||||||
|
|
||||||
|
let should_remove = peer.flags == State::Defunct
|
||||||
|
&& diff > Duration::seconds(global::PEER_EXPIRATION_REMOVE_TIME);
|
||||||
|
|
||||||
|
if should_remove {
|
||||||
|
debug!(
|
||||||
|
"removing peer {:?}: last connected {} days {} hours {} minutes ago.",
|
||||||
|
peer.addr,
|
||||||
|
diff.num_days(),
|
||||||
|
diff.num_hours(),
|
||||||
|
diff.num_minutes()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
should_remove
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainAdapter for Peers {
|
impl ChainAdapter for Peers {
|
||||||
|
@ -499,8 +492,8 @@ impl ChainAdapter for Peers {
|
||||||
// if the peer sent us a block that's intrinsically bad
|
// if the peer sent us a block that's intrinsically bad
|
||||||
// they are either mistaken or malevolent, both of which require a ban
|
// they are either mistaken or malevolent, both of which require a ban
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Received a bad block {} from {}, the peer will be banned",
|
||||||
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
|
hash, peer_addr
|
||||||
);
|
);
|
||||||
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
|
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
|
||||||
false
|
false
|
||||||
|
@ -515,10 +508,8 @@ impl ChainAdapter for Peers {
|
||||||
// if the peer sent us a block that's intrinsically bad
|
// if the peer sent us a block that's intrinsically bad
|
||||||
// they are either mistaken or malevolent, both of which require a ban
|
// they are either mistaken or malevolent, both of which require a ban
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Received a bad compact block {} from {}, the peer will be banned",
|
"Received a bad compact block {} from {}, the peer will be banned",
|
||||||
hash,
|
hash, &peer_addr
|
||||||
&peer_addr
|
|
||||||
);
|
);
|
||||||
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
|
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
|
||||||
false
|
false
|
||||||
|
@ -568,8 +559,8 @@ impl ChainAdapter for Peers {
|
||||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
|
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
|
||||||
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
|
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Received a bad txhashset data from {}, the peer will be banned",
|
||||||
"Received a bad txhashset data from {}, the peer will be banned", &peer_addr
|
&peer_addr
|
||||||
);
|
);
|
||||||
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
|
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
|
||||||
false
|
false
|
||||||
|
@ -594,17 +585,13 @@ impl NetAdapter for Peers {
|
||||||
/// addresses.
|
/// addresses.
|
||||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
||||||
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
||||||
trace!(
|
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
|
||||||
LOGGER,
|
|
||||||
"find_peer_addrs: {} healthy peers picked",
|
|
||||||
peers.len()
|
|
||||||
);
|
|
||||||
map_vec!(peers, |p| p.addr)
|
map_vec!(peers, |p| p.addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A list of peers has been received from one of our peers.
|
/// A list of peers has been received from one of our peers.
|
||||||
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
||||||
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
|
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
||||||
for pa in peer_addrs {
|
for pa in peer_addrs {
|
||||||
if let Ok(e) = self.exists_peer(pa) {
|
if let Ok(e) = self.exists_peer(pa) {
|
||||||
if e {
|
if e {
|
||||||
|
@ -618,9 +605,10 @@ impl NetAdapter for Peers {
|
||||||
flags: State::Healthy,
|
flags: State::Healthy,
|
||||||
last_banned: 0,
|
last_banned: 0,
|
||||||
ban_reason: ReasonForBan::None,
|
ban_reason: ReasonForBan::None,
|
||||||
|
last_connected: Utc::now().timestamp(),
|
||||||
};
|
};
|
||||||
if let Err(e) = self.save_peer(&peer) {
|
if let Err(e) = self.save_peer(&peer) {
|
||||||
error!(LOGGER, "Could not save received peer address: {:?}", e);
|
error!("Could not save received peer address: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ use msg::{
|
||||||
TxHashSetArchive, TxHashSetRequest, Type,
|
TxHashSetArchive, TxHashSetRequest, Type,
|
||||||
};
|
};
|
||||||
use types::{Error, NetAdapter};
|
use types::{Error, NetAdapter};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub struct Protocol {
|
pub struct Protocol {
|
||||||
adapter: Arc<NetAdapter>,
|
adapter: Arc<NetAdapter>,
|
||||||
|
@ -52,10 +51,8 @@ impl MessageHandler for Protocol {
|
||||||
// banned peers up correctly?
|
// banned peers up correctly?
|
||||||
if adapter.is_banned(self.addr.clone()) {
|
if adapter.is_banned(self.addr.clone()) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"handler: consume: peer {:?} banned, received: {:?}, dropping.",
|
"handler: consume: peer {:?} banned, received: {:?}, dropping.",
|
||||||
self.addr,
|
self.addr, msg.header.msg_type,
|
||||||
msg.header.msg_type,
|
|
||||||
);
|
);
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
@ -82,14 +79,14 @@ impl MessageHandler for Protocol {
|
||||||
|
|
||||||
Type::BanReason => {
|
Type::BanReason => {
|
||||||
let ban_reason: BanReason = msg.body()?;
|
let ban_reason: BanReason = msg.body()?;
|
||||||
error!(LOGGER, "handle_payload: BanReason {:?}", ban_reason);
|
error!("handle_payload: BanReason {:?}", ban_reason);
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
Type::Transaction => {
|
Type::Transaction => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"handle_payload: received tx: msg_len: {}",
|
||||||
"handle_payload: received tx: msg_len: {}", msg.header.msg_len
|
msg.header.msg_len
|
||||||
);
|
);
|
||||||
let tx: core::Transaction = msg.body()?;
|
let tx: core::Transaction = msg.body()?;
|
||||||
adapter.transaction_received(tx, false);
|
adapter.transaction_received(tx, false);
|
||||||
|
@ -98,8 +95,8 @@ impl MessageHandler for Protocol {
|
||||||
|
|
||||||
Type::StemTransaction => {
|
Type::StemTransaction => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"handle_payload: received stem tx: msg_len: {}",
|
||||||
"handle_payload: received stem tx: msg_len: {}", msg.header.msg_len
|
msg.header.msg_len
|
||||||
);
|
);
|
||||||
let tx: core::Transaction = msg.body()?;
|
let tx: core::Transaction = msg.body()?;
|
||||||
adapter.transaction_received(tx, true);
|
adapter.transaction_received(tx, true);
|
||||||
|
@ -109,7 +106,6 @@ impl MessageHandler for Protocol {
|
||||||
Type::GetBlock => {
|
Type::GetBlock => {
|
||||||
let h: Hash = msg.body()?;
|
let h: Hash = msg.body()?;
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"handle_payload: Getblock: {}, msg_len: {}",
|
"handle_payload: Getblock: {}, msg_len: {}",
|
||||||
h,
|
h,
|
||||||
msg.header.msg_len,
|
msg.header.msg_len,
|
||||||
|
@ -124,8 +120,8 @@ impl MessageHandler for Protocol {
|
||||||
|
|
||||||
Type::Block => {
|
Type::Block => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"handle_payload: received block: msg_len: {}",
|
||||||
"handle_payload: received block: msg_len: {}", msg.header.msg_len
|
msg.header.msg_len
|
||||||
);
|
);
|
||||||
let b: core::Block = msg.body()?;
|
let b: core::Block = msg.body()?;
|
||||||
|
|
||||||
|
@ -145,8 +141,8 @@ impl MessageHandler for Protocol {
|
||||||
|
|
||||||
Type::CompactBlock => {
|
Type::CompactBlock => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"handle_payload: received compact block: msg_len: {}",
|
||||||
"handle_payload: received compact block: msg_len: {}", msg.header.msg_len
|
msg.header.msg_len
|
||||||
);
|
);
|
||||||
let b: core::CompactBlock = msg.body()?;
|
let b: core::CompactBlock = msg.body()?;
|
||||||
|
|
||||||
|
@ -218,8 +214,8 @@ impl MessageHandler for Protocol {
|
||||||
Type::TxHashSetRequest => {
|
Type::TxHashSetRequest => {
|
||||||
let sm_req: TxHashSetRequest = msg.body()?;
|
let sm_req: TxHashSetRequest = msg.body()?;
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"handle_payload: txhashset req for {} at {}",
|
||||||
"handle_payload: txhashset req for {} at {}", sm_req.hash, sm_req.height
|
sm_req.hash, sm_req.height
|
||||||
);
|
);
|
||||||
|
|
||||||
let txhashset = self.adapter.txhashset_read(sm_req.hash);
|
let txhashset = self.adapter.txhashset_read(sm_req.hash);
|
||||||
|
@ -244,15 +240,11 @@ impl MessageHandler for Protocol {
|
||||||
Type::TxHashSetArchive => {
|
Type::TxHashSetArchive => {
|
||||||
let sm_arch: TxHashSetArchive = msg.body()?;
|
let sm_arch: TxHashSetArchive = msg.body()?;
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"handle_payload: txhashset archive for {} at {}. size={}",
|
"handle_payload: txhashset archive for {} at {}. size={}",
|
||||||
sm_arch.hash,
|
sm_arch.hash, sm_arch.height, sm_arch.bytes,
|
||||||
sm_arch.height,
|
|
||||||
sm_arch.bytes,
|
|
||||||
);
|
);
|
||||||
if !self.adapter.txhashset_receive_ready() {
|
if !self.adapter.txhashset_receive_ready() {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
|
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
|
||||||
);
|
);
|
||||||
return Err(Error::BadMessage);
|
return Err(Error::BadMessage);
|
||||||
|
@ -284,14 +276,13 @@ impl MessageHandler for Protocol {
|
||||||
|
|
||||||
if let Err(e) = save_txhashset_to_file(tmp.clone()) {
|
if let Err(e) = save_txhashset_to_file(tmp.clone()) {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"handle_payload: txhashset archive save to file fail. err={:?}",
|
||||||
"handle_payload: txhashset archive save to file fail. err={:?}", e
|
e
|
||||||
);
|
);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"handle_payload: txhashset archive save to file {:?} success",
|
"handle_payload: txhashset archive save to file {:?} success",
|
||||||
tmp,
|
tmp,
|
||||||
);
|
);
|
||||||
|
@ -302,18 +293,15 @@ impl MessageHandler for Protocol {
|
||||||
.txhashset_write(sm_arch.hash, tmp_zip, self.addr);
|
.txhashset_write(sm_arch.hash, tmp_zip, self.addr);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
|
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
|
||||||
sm_arch.hash,
|
sm_arch.hash, sm_arch.height, res
|
||||||
sm_arch.height,
|
|
||||||
res
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => {
|
_ => {
|
||||||
debug!(LOGGER, "unknown message type {:?}", msg.header.msg_type);
|
debug!("unknown message type {:?}", msg.header.msg_type);
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,12 +329,8 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
|
||||||
let max_size = min_size + 6;
|
let max_size = min_size + 6;
|
||||||
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
|
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
|
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
|
||||||
total_headers,
|
total_headers, average_header_size, min_size, max_size,
|
||||||
average_header_size,
|
|
||||||
min_size,
|
|
||||||
max_size,
|
|
||||||
);
|
);
|
||||||
return Err(Error::Connection(io::Error::new(
|
return Err(Error::Connection(io::Error::new(
|
||||||
io::ErrorKind::InvalidData,
|
io::ErrorKind::InvalidData,
|
||||||
|
|
|
@ -30,7 +30,6 @@ use peer::Peer;
|
||||||
use peers::Peers;
|
use peers::Peers;
|
||||||
use store::PeerStore;
|
use store::PeerStore;
|
||||||
use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead};
|
use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// P2P server implementation, handling bootstrapping to find and connect to
|
/// P2P server implementation, handling bootstrapping to find and connect to
|
||||||
/// peers, receiving connections from other peers and keep track of all of them.
|
/// peers, receiving connections from other peers and keep track of all of them.
|
||||||
|
@ -50,35 +49,12 @@ impl Server {
|
||||||
/// Creates a new idle p2p server with no peers
|
/// Creates a new idle p2p server with no peers
|
||||||
pub fn new(
|
pub fn new(
|
||||||
db_env: Arc<lmdb::Environment>,
|
db_env: Arc<lmdb::Environment>,
|
||||||
mut capab: Capabilities,
|
capab: Capabilities,
|
||||||
config: P2PConfig,
|
config: P2PConfig,
|
||||||
adapter: Arc<ChainAdapter>,
|
adapter: Arc<ChainAdapter>,
|
||||||
genesis: Hash,
|
genesis: Hash,
|
||||||
stop: Arc<AtomicBool>,
|
stop: Arc<AtomicBool>,
|
||||||
_archive_mode: bool,
|
|
||||||
block_1_hash: Option<Hash>,
|
|
||||||
) -> Result<Server, Error> {
|
) -> Result<Server, Error> {
|
||||||
// In the case of an archive node, check that we do have the first block.
|
|
||||||
// In case of first sync we do not perform this check.
|
|
||||||
if capab.contains(Capabilities::FULL_HIST) && adapter.total_height() > 0 {
|
|
||||||
// Check that we have block 1
|
|
||||||
match block_1_hash {
|
|
||||||
Some(hash) => match adapter.get_block(hash) {
|
|
||||||
Some(_) => debug!(LOGGER, "Full block 1 found, archive capabilities confirmed"),
|
|
||||||
None => {
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"Full block 1 not found, archive capabilities disabled"
|
|
||||||
);
|
|
||||||
capab.remove(Capabilities::FULL_HIST);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
debug!(LOGGER, "Block 1 not found, archive capabilities disabled");
|
|
||||||
capab.remove(Capabilities::FULL_HIST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Server {
|
Ok(Server {
|
||||||
config: config.clone(),
|
config: config.clone(),
|
||||||
capabilities: capab,
|
capabilities: capab,
|
||||||
|
@ -102,12 +78,7 @@ impl Server {
|
||||||
Ok((stream, peer_addr)) => {
|
Ok((stream, peer_addr)) => {
|
||||||
if !self.check_banned(&stream) {
|
if !self.check_banned(&stream) {
|
||||||
if let Err(e) = self.handle_new_peer(stream) {
|
if let Err(e) = self.handle_new_peer(stream) {
|
||||||
warn!(
|
warn!("Error accepting peer {}: {:?}", peer_addr.to_string(), e);
|
||||||
LOGGER,
|
|
||||||
"Error accepting peer {}: {:?}",
|
|
||||||
peer_addr.to_string(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,7 +86,7 @@ impl Server {
|
||||||
// nothing to do, will retry in next iteration
|
// nothing to do, will retry in next iteration
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(LOGGER, "Couldn't establish new client connection: {:?}", e);
|
warn!("Couldn't establish new client connection: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if self.stop.load(Ordering::Relaxed) {
|
if self.stop.load(Ordering::Relaxed) {
|
||||||
|
@ -130,10 +101,7 @@ impl Server {
|
||||||
/// we're already connected to the provided address.
|
/// we're already connected to the provided address.
|
||||||
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
|
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
|
||||||
if Peer::is_denied(&self.config, &addr) {
|
if Peer::is_denied(&self.config, &addr) {
|
||||||
debug!(
|
debug!("connect_peer: peer {} denied, not connecting.", addr);
|
||||||
LOGGER,
|
|
||||||
"connect_peer: peer {} denied, not connecting.", addr
|
|
||||||
);
|
|
||||||
return Err(Error::ConnectionClose);
|
return Err(Error::ConnectionClose);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,12 +116,11 @@ impl Server {
|
||||||
|
|
||||||
if let Some(p) = self.peers.get_connected_peer(addr) {
|
if let Some(p) = self.peers.get_connected_peer(addr) {
|
||||||
// if we're already connected to the addr, just return the peer
|
// if we're already connected to the addr, just return the peer
|
||||||
trace!(LOGGER, "connect_peer: already connected {}", addr);
|
trace!("connect_peer: already connected {}", addr);
|
||||||
return Ok(p);
|
return Ok(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
|
||||||
"connect_peer: on {}:{}. connecting to {}",
|
"connect_peer: on {}:{}. connecting to {}",
|
||||||
self.config.host,
|
self.config.host,
|
||||||
self.config.port,
|
self.config.port,
|
||||||
|
@ -179,12 +146,8 @@ impl Server {
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"connect_peer: on {}:{}. Could not connect to {}: {:?}",
|
"connect_peer: on {}:{}. Could not connect to {}: {:?}",
|
||||||
self.config.host,
|
self.config.host, self.config.port, addr, e
|
||||||
self.config.port,
|
|
||||||
addr,
|
|
||||||
e
|
|
||||||
);
|
);
|
||||||
Err(Error::Connection(e))
|
Err(Error::Connection(e))
|
||||||
}
|
}
|
||||||
|
@ -211,9 +174,9 @@ impl Server {
|
||||||
// peer has been banned, go away!
|
// peer has been banned, go away!
|
||||||
if let Ok(peer_addr) = stream.peer_addr() {
|
if let Ok(peer_addr) = stream.peer_addr() {
|
||||||
if self.peers.is_banned(peer_addr) {
|
if self.peers.is_banned(peer_addr) {
|
||||||
debug!(LOGGER, "Peer {} banned, refusing connection.", peer_addr);
|
debug!("Peer {} banned, refusing connection.", peer_addr);
|
||||||
if let Err(e) = stream.shutdown(Shutdown::Both) {
|
if let Err(e) = stream.shutdown(Shutdown::Both) {
|
||||||
debug!(LOGGER, "Error shutting down conn: {:?}", e);
|
debug!("Error shutting down conn: {:?}", e);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||||
use grin_store::{self, option_to_not_found, to_key, Error};
|
use grin_store::{self, option_to_not_found, to_key, Error};
|
||||||
use msg::SockAddr;
|
use msg::SockAddr;
|
||||||
use types::{Capabilities, ReasonForBan};
|
use types::{Capabilities, ReasonForBan};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
const STORE_SUBPATH: &'static str = "peers";
|
const STORE_SUBPATH: &'static str = "peers";
|
||||||
|
|
||||||
|
@ -58,6 +57,8 @@ pub struct PeerData {
|
||||||
pub last_banned: i64,
|
pub last_banned: i64,
|
||||||
/// The reason for the ban
|
/// The reason for the ban
|
||||||
pub ban_reason: ReasonForBan,
|
pub ban_reason: ReasonForBan,
|
||||||
|
/// Time when we last connected to this peer.
|
||||||
|
pub last_connected: i64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Writeable for PeerData {
|
impl Writeable for PeerData {
|
||||||
|
@ -69,7 +70,8 @@ impl Writeable for PeerData {
|
||||||
[write_bytes, &self.user_agent],
|
[write_bytes, &self.user_agent],
|
||||||
[write_u8, self.flags as u8],
|
[write_u8, self.flags as u8],
|
||||||
[write_i64, self.last_banned],
|
[write_i64, self.last_banned],
|
||||||
[write_i32, self.ban_reason as i32]
|
[write_i32, self.ban_reason as i32],
|
||||||
|
[write_i64, self.last_connected]
|
||||||
);
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -80,18 +82,27 @@ impl Readable for PeerData {
|
||||||
let addr = SockAddr::read(reader)?;
|
let addr = SockAddr::read(reader)?;
|
||||||
let (capab, ua, fl, lb, br) =
|
let (capab, ua, fl, lb, br) =
|
||||||
ser_multiread!(reader, read_u32, read_vec, read_u8, read_i64, read_i32);
|
ser_multiread!(reader, read_u32, read_vec, read_u8, read_i64, read_i32);
|
||||||
|
let lc = reader.read_i64();
|
||||||
|
// this only works because each PeerData is read in its own vector and this
|
||||||
|
// is the last data element
|
||||||
|
let last_connected = if let Err(_) = lc {
|
||||||
|
Utc::now().timestamp()
|
||||||
|
} else {
|
||||||
|
lc.unwrap()
|
||||||
|
};
|
||||||
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
||||||
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
|
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
|
||||||
let last_banned = lb;
|
|
||||||
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
|
let ban_reason = ReasonForBan::from_i32(br).ok_or(ser::Error::CorruptedData)?;
|
||||||
|
|
||||||
match State::from_u8(fl) {
|
match State::from_u8(fl) {
|
||||||
Some(flags) => Ok(PeerData {
|
Some(flags) => Ok(PeerData {
|
||||||
addr: addr.0,
|
addr: addr.0,
|
||||||
capabilities: capabilities,
|
capabilities,
|
||||||
user_agent: user_agent,
|
user_agent,
|
||||||
flags: flags,
|
flags: flags,
|
||||||
last_banned: last_banned,
|
last_banned: lb,
|
||||||
ban_reason: ban_reason,
|
ban_reason,
|
||||||
|
last_connected,
|
||||||
}),
|
}),
|
||||||
None => Err(ser::Error::CorruptedData),
|
None => Err(ser::Error::CorruptedData),
|
||||||
}
|
}
|
||||||
|
@ -111,7 +122,7 @@ impl PeerStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
||||||
debug!(LOGGER, "save_peer: {:?} marked {:?}", p.addr, p.flags);
|
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
|
||||||
|
|
||||||
let batch = self.db.batch()?;
|
let batch = self.db.batch()?;
|
||||||
batch.put_ser(&peer_key(p.addr)[..], p)?;
|
batch.put_ser(&peer_key(p.addr)[..], p)?;
|
||||||
|
@ -172,6 +183,33 @@ impl PeerStore {
|
||||||
batch.put_ser(&peer_key(peer.addr)[..], &peer)?;
|
batch.put_ser(&peer_key(peer.addr)[..], &peer)?;
|
||||||
batch.commit()
|
batch.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Deletes peers from the storage that satisfy some condition `predicate`
|
||||||
|
pub fn delete_peers<F>(&self, predicate: F) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
F: Fn(&PeerData) -> bool,
|
||||||
|
{
|
||||||
|
let mut to_remove = vec![];
|
||||||
|
|
||||||
|
for x in self.all_peers() {
|
||||||
|
if predicate(&x) {
|
||||||
|
to_remove.push(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete peers in single batch
|
||||||
|
if !to_remove.is_empty() {
|
||||||
|
let batch = self.db.batch()?;
|
||||||
|
|
||||||
|
for peer in to_remove {
|
||||||
|
batch.delete(&peer_key(peer.addr)[..])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.commit()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {
|
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {
|
||||||
|
|
|
@ -17,7 +17,8 @@ use std::fs::File;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
|
|
||||||
|
@ -131,7 +132,7 @@ impl Default for P2PConfig {
|
||||||
P2PConfig {
|
P2PConfig {
|
||||||
host: ipaddr,
|
host: ipaddr,
|
||||||
port: 13414,
|
port: 13414,
|
||||||
capabilities: Capabilities::FAST_SYNC_NODE,
|
capabilities: Capabilities::FULL_NODE,
|
||||||
seeding_type: Seeding::default(),
|
seeding_type: Seeding::default(),
|
||||||
seeds: None,
|
seeds: None,
|
||||||
peers_allow: None,
|
peers_allow: None,
|
||||||
|
@ -192,26 +193,27 @@ impl Default for Seeding {
|
||||||
}
|
}
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Options for what type of interaction a peer supports
|
/// Options for what type of interaction a peer supports
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct Capabilities: u32 {
|
pub struct Capabilities: u32 {
|
||||||
/// We don't know (yet) what the peer can do.
|
/// We don't know (yet) what the peer can do.
|
||||||
const UNKNOWN = 0b00000000;
|
const UNKNOWN = 0b00000000;
|
||||||
/// Full archival node, has the whole history without any pruning.
|
/// Can provide full history of headers back to genesis
|
||||||
const FULL_HIST = 0b00000001;
|
/// (for at least one arbitrary fork).
|
||||||
/// Can provide block headers and the TxHashSet for some recent-enough
|
const HEADER_HIST = 0b00000001;
|
||||||
/// height.
|
/// Can provide block headers and the TxHashSet for some recent-enough
|
||||||
const TXHASHSET_HIST = 0b00000010;
|
/// height.
|
||||||
/// Can provide a list of healthy peers
|
const TXHASHSET_HIST = 0b00000010;
|
||||||
const PEER_LIST = 0b00000100;
|
/// Can provide a list of healthy peers
|
||||||
|
const PEER_LIST = 0b00000100;
|
||||||
|
|
||||||
const FAST_SYNC_NODE = Capabilities::TXHASHSET_HIST.bits
|
/// All nodes right now are "full nodes".
|
||||||
| Capabilities::PEER_LIST.bits;
|
/// Some nodes internally may maintain longer block histories (archival_mode)
|
||||||
|
/// but we do not advertise this to other nodes.
|
||||||
const FULL_NODE = Capabilities::FULL_HIST.bits
|
const FULL_NODE = Capabilities::HEADER_HIST.bits
|
||||||
| Capabilities::TXHASHSET_HIST.bits
|
| Capabilities::TXHASHSET_HIST.bits
|
||||||
| Capabilities::PEER_LIST.bits;
|
| Capabilities::PEER_LIST.bits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Types of connection
|
/// Types of connection
|
||||||
|
@ -258,23 +260,23 @@ pub struct PeerInfo {
|
||||||
impl PeerInfo {
|
impl PeerInfo {
|
||||||
/// The current total_difficulty of the peer.
|
/// The current total_difficulty of the peer.
|
||||||
pub fn total_difficulty(&self) -> Difficulty {
|
pub fn total_difficulty(&self) -> Difficulty {
|
||||||
self.live_info.read().unwrap().total_difficulty
|
self.live_info.read().total_difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The current height of the peer.
|
/// The current height of the peer.
|
||||||
pub fn height(&self) -> u64 {
|
pub fn height(&self) -> u64 {
|
||||||
self.live_info.read().unwrap().height
|
self.live_info.read().height
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Time of last_seen for this peer (via ping/pong).
|
/// Time of last_seen for this peer (via ping/pong).
|
||||||
pub fn last_seen(&self) -> DateTime<Utc> {
|
pub fn last_seen(&self) -> DateTime<Utc> {
|
||||||
self.live_info.read().unwrap().last_seen
|
self.live_info.read().last_seen
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the total_difficulty, height and last_seen of the peer.
|
/// Update the total_difficulty, height and last_seen of the peer.
|
||||||
/// Takes a write lock on the live_info.
|
/// Takes a write lock on the live_info.
|
||||||
pub fn update(&self, height: u64, total_difficulty: Difficulty) {
|
pub fn update(&self, height: u64, total_difficulty: Difficulty) {
|
||||||
let mut live_info = self.live_info.write().unwrap();
|
let mut live_info = self.live_info.write();
|
||||||
if total_difficulty != live_info.total_difficulty {
|
if total_difficulty != live_info.total_difficulty {
|
||||||
live_info.stuck_detector = Utc::now();
|
live_info.stuck_detector = Utc::now();
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,8 +58,6 @@ fn peer_handshake() {
|
||||||
net_adapter.clone(),
|
net_adapter.clone(),
|
||||||
Hash::from_vec(&vec![]),
|
Hash::from_vec(&vec![]),
|
||||||
Arc::new(AtomicBool::new(false)),
|
Arc::new(AtomicBool::new(false)),
|
||||||
false,
|
|
||||||
None,
|
|
||||||
).unwrap(),
|
).unwrap(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ blake2-rfc = "0.2"
|
||||||
rand = "0.5"
|
rand = "0.5"
|
||||||
serde = "1"
|
serde = "1"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
log = "0.4"
|
||||||
chrono = "0.4.4"
|
chrono = "0.4.4"
|
||||||
|
|
||||||
grin_core = { path = "../core" }
|
grin_core = { path = "../core" }
|
||||||
|
|
|
@ -30,7 +30,7 @@ extern crate serde;
|
||||||
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
|
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
|
|
||||||
mod pool;
|
mod pool;
|
||||||
|
|
154
pool/src/pool.rs
154
pool/src/pool.rs
|
@ -16,7 +16,8 @@
|
||||||
//! Used for both the txpool and stempool layers in the pool.
|
//! Used for both the txpool and stempool layers in the pool.
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::consensus;
|
use core::consensus;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
|
@ -25,7 +26,6 @@ use core::core::transaction;
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
|
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
|
||||||
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
// max weight leaving minimum space for a coinbase
|
// max weight leaving minimum space for a coinbase
|
||||||
const MAX_MINEABLE_WEIGHT: usize =
|
const MAX_MINEABLE_WEIGHT: usize =
|
||||||
|
@ -48,8 +48,8 @@ impl Pool {
|
||||||
) -> Pool {
|
) -> Pool {
|
||||||
Pool {
|
Pool {
|
||||||
entries: vec![],
|
entries: vec![],
|
||||||
blockchain: chain.clone(),
|
blockchain: chain,
|
||||||
verifier_cache: verifier_cache.clone(),
|
verifier_cache,
|
||||||
name,
|
name,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,34 +74,34 @@ impl Pool {
|
||||||
&self,
|
&self,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
nonce: u64,
|
nonce: u64,
|
||||||
kern_ids: &Vec<ShortId>,
|
kern_ids: &[ShortId],
|
||||||
) -> (Vec<Transaction>, Vec<ShortId>) {
|
) -> (Vec<Transaction>, Vec<ShortId>) {
|
||||||
let mut rehashed = HashMap::new();
|
let mut txs = vec![];
|
||||||
|
let mut found_ids = vec![];
|
||||||
|
|
||||||
// Rehash all entries in the pool using short_ids based on provided hash and nonce.
|
// Rehash all entries in the pool using short_ids based on provided hash and nonce.
|
||||||
for x in &self.entries {
|
'outer: for x in &self.entries {
|
||||||
for k in x.tx.kernels() {
|
for k in x.tx.kernels() {
|
||||||
// rehash each kernel to calculate the block specific short_id
|
// rehash each kernel to calculate the block specific short_id
|
||||||
let short_id = k.short_id(&hash, nonce);
|
let short_id = k.short_id(&hash, nonce);
|
||||||
rehashed.insert(short_id, x.tx.hash());
|
if kern_ids.contains(&short_id) {
|
||||||
|
txs.push(x.tx.clone());
|
||||||
|
found_ids.push(short_id);
|
||||||
|
}
|
||||||
|
if found_ids.len() == kern_ids.len() {
|
||||||
|
break 'outer;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
txs.dedup();
|
||||||
// Retrive the txs from the pool by the set of unique hashes.
|
(
|
||||||
let hashes: HashSet<_> = rehashed.values().collect();
|
txs,
|
||||||
let txs = hashes.into_iter().filter_map(|x| self.get_tx(*x)).collect();
|
kern_ids
|
||||||
|
.into_iter()
|
||||||
// Calculate the missing ids based on the ids passed in
|
.filter(|id| !found_ids.contains(id))
|
||||||
// and the ids that successfully matched txs.
|
.cloned()
|
||||||
let matched_ids: HashSet<_> = rehashed.keys().collect();
|
.collect(),
|
||||||
let all_ids: HashSet<_> = kern_ids.iter().collect();
|
)
|
||||||
let missing_ids = all_ids
|
|
||||||
.difference(&matched_ids)
|
|
||||||
.map(|x| *x)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
(txs, missing_ids)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Take pool transactions, filtering and ordering them in a way that's
|
/// Take pool transactions, filtering and ordering them in a way that's
|
||||||
|
@ -133,8 +133,7 @@ impl Pool {
|
||||||
// Iteratively apply the txs to the current chain state,
|
// Iteratively apply the txs to the current chain state,
|
||||||
// rejecting any that do not result in a valid state.
|
// rejecting any that do not result in a valid state.
|
||||||
// Return a vec of all the valid txs.
|
// Return a vec of all the valid txs.
|
||||||
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
let txs = self.validate_raw_txs(flat_txs, None, &header)?;
|
||||||
let txs = self.validate_raw_txs(flat_txs, None, &header, &block_sums)?;
|
|
||||||
Ok(txs)
|
Ok(txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,8 +158,7 @@ impl Pool {
|
||||||
extra_tx: Option<Transaction>,
|
extra_tx: Option<Transaction>,
|
||||||
header: &BlockHeader,
|
header: &BlockHeader,
|
||||||
) -> Result<Vec<Transaction>, PoolError> {
|
) -> Result<Vec<Transaction>, PoolError> {
|
||||||
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
let valid_txs = self.validate_raw_txs(txs, extra_tx, header)?;
|
||||||
let valid_txs = self.validate_raw_txs(txs, extra_tx, header, &block_sums)?;
|
|
||||||
Ok(valid_txs)
|
Ok(valid_txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,10 +171,10 @@ impl Pool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transition the specified pool entries to the new state.
|
// Transition the specified pool entries to the new state.
|
||||||
pub fn transition_to_state(&mut self, txs: &Vec<Transaction>, state: PoolEntryState) {
|
pub fn transition_to_state(&mut self, txs: &[Transaction], state: PoolEntryState) {
|
||||||
for x in self.entries.iter_mut() {
|
for x in &mut self.entries {
|
||||||
if txs.contains(&x.tx) {
|
if txs.contains(&x.tx) {
|
||||||
x.state = state.clone();
|
x.state = state;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -190,18 +188,6 @@ impl Pool {
|
||||||
extra_txs: Vec<Transaction>,
|
extra_txs: Vec<Transaction>,
|
||||||
header: &BlockHeader,
|
header: &BlockHeader,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})",
|
|
||||||
self.name,
|
|
||||||
entry.tx.hash(),
|
|
||||||
entry.src,
|
|
||||||
entry.tx.inputs().len(),
|
|
||||||
entry.tx.outputs().len(),
|
|
||||||
entry.tx.kernels().len(),
|
|
||||||
header.hash(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Combine all the txs from the pool with any extra txs provided.
|
// Combine all the txs from the pool with any extra txs provided.
|
||||||
let mut txs = self.all_transactions();
|
let mut txs = self.all_transactions();
|
||||||
|
|
||||||
|
@ -228,6 +214,17 @@ impl Pool {
|
||||||
// Validate aggregated tx against a known chain state.
|
// Validate aggregated tx against a known chain state.
|
||||||
self.validate_raw_tx(&agg_tx, header)?;
|
self.validate_raw_tx(&agg_tx, header)?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"add_to_pool [{}]: {} ({}), in/out/kern: {}/{}/{}, pool: {} (at block {})",
|
||||||
|
self.name,
|
||||||
|
entry.tx.hash(),
|
||||||
|
entry.src.debug_name,
|
||||||
|
entry.tx.inputs().len(),
|
||||||
|
entry.tx.outputs().len(),
|
||||||
|
entry.tx.kernels().len(),
|
||||||
|
self.size(),
|
||||||
|
header.hash(),
|
||||||
|
);
|
||||||
// If we get here successfully then we can safely add the entry to the pool.
|
// If we get here successfully then we can safely add the entry to the pool.
|
||||||
self.entries.push(entry);
|
self.entries.push(entry);
|
||||||
|
|
||||||
|
@ -239,8 +236,14 @@ impl Pool {
|
||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
header: &BlockHeader,
|
header: &BlockHeader,
|
||||||
) -> Result<BlockSums, PoolError> {
|
) -> Result<BlockSums, PoolError> {
|
||||||
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
let new_sums = self.apply_txs_to_block_sums(&block_sums, vec![tx.clone()], header)?;
|
|
||||||
|
// Validate the tx against current chain state.
|
||||||
|
// Check all inputs are in the current UTXO set.
|
||||||
|
// Check all outputs are unique in current UTXO set.
|
||||||
|
self.blockchain.validate_tx(tx)?;
|
||||||
|
|
||||||
|
let new_sums = self.apply_tx_to_block_sums(tx, header)?;
|
||||||
Ok(new_sums)
|
Ok(new_sums)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,7 +252,6 @@ impl Pool {
|
||||||
txs: Vec<Transaction>,
|
txs: Vec<Transaction>,
|
||||||
extra_tx: Option<Transaction>,
|
extra_tx: Option<Transaction>,
|
||||||
header: &BlockHeader,
|
header: &BlockHeader,
|
||||||
block_sums: &BlockSums,
|
|
||||||
) -> Result<Vec<Transaction>, PoolError> {
|
) -> Result<Vec<Transaction>, PoolError> {
|
||||||
let mut valid_txs = vec![];
|
let mut valid_txs = vec![];
|
||||||
|
|
||||||
|
@ -260,10 +262,12 @@ impl Pool {
|
||||||
};
|
};
|
||||||
candidate_txs.extend(valid_txs.clone());
|
candidate_txs.extend(valid_txs.clone());
|
||||||
candidate_txs.push(tx.clone());
|
candidate_txs.push(tx.clone());
|
||||||
if self
|
|
||||||
.apply_txs_to_block_sums(&block_sums, candidate_txs, header)
|
// Build a single aggregate tx from candidate txs.
|
||||||
.is_ok()
|
let agg_tx = transaction::aggregate(candidate_txs)?;
|
||||||
{
|
|
||||||
|
// We know the tx is valid if the entire aggregate tx is valid.
|
||||||
|
if self.validate_raw_tx(&agg_tx, header).is_ok() {
|
||||||
valid_txs.push(tx);
|
valid_txs.push(tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -271,28 +275,20 @@ impl Pool {
|
||||||
Ok(valid_txs)
|
Ok(valid_txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_txs_to_block_sums(
|
fn apply_tx_to_block_sums(
|
||||||
&self,
|
&self,
|
||||||
block_sums: &BlockSums,
|
tx: &Transaction,
|
||||||
txs: Vec<Transaction>,
|
|
||||||
header: &BlockHeader,
|
header: &BlockHeader,
|
||||||
) -> Result<BlockSums, PoolError> {
|
) -> Result<BlockSums, PoolError> {
|
||||||
// Build a single aggregate tx and validate it.
|
|
||||||
let tx = transaction::aggregate(txs)?;
|
|
||||||
tx.validate(self.verifier_cache.clone())?;
|
|
||||||
|
|
||||||
// Validate the tx against current chain state.
|
|
||||||
// Check all inputs are in the current UTXO set.
|
|
||||||
// Check all outputs are unique in current UTXO set.
|
|
||||||
self.blockchain.validate_tx(&tx)?;
|
|
||||||
|
|
||||||
let overage = tx.overage();
|
let overage = tx.overage();
|
||||||
let offset = (header.total_kernel_offset() + tx.offset)?;
|
let offset = (header.total_kernel_offset() + tx.offset)?;
|
||||||
|
|
||||||
|
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
||||||
|
|
||||||
// Verify the kernel sums for the block_sums with the new tx applied,
|
// Verify the kernel sums for the block_sums with the new tx applied,
|
||||||
// accounting for overage and offset.
|
// accounting for overage and offset.
|
||||||
let (utxo_sum, kernel_sum) =
|
let (utxo_sum, kernel_sum) =
|
||||||
(block_sums.clone(), &tx as &Committed).verify_kernel_sums(overage, offset)?;
|
(block_sums, tx as &Committed).verify_kernel_sums(overage, offset)?;
|
||||||
|
|
||||||
Ok(BlockSums {
|
Ok(BlockSums {
|
||||||
utxo_sum,
|
utxo_sum,
|
||||||
|
@ -314,7 +310,7 @@ impl Pool {
|
||||||
}
|
}
|
||||||
|
|
||||||
for x in existing_entries {
|
for x in existing_entries {
|
||||||
let _ = self.add_to_pool(x.clone(), extra_txs.clone(), header);
|
let _ = self.add_to_pool(x, extra_txs.clone(), header);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -355,20 +351,7 @@ impl Pool {
|
||||||
tx_buckets
|
tx_buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter txs in the pool based on the latest block.
|
pub fn find_matching_transactions(&self, kernels: &[TxKernel]) -> Vec<Transaction> {
|
||||||
// Reject any txs where we see a matching tx kernel in the block.
|
|
||||||
// Also reject any txs where we see a conflicting tx,
|
|
||||||
// where an input is spent in a different tx.
|
|
||||||
fn remaining_transactions(&self, block: &Block) -> Vec<Transaction> {
|
|
||||||
self.entries
|
|
||||||
.iter()
|
|
||||||
.filter(|x| !x.tx.kernels().iter().any(|y| block.kernels().contains(y)))
|
|
||||||
.filter(|x| !x.tx.inputs().iter().any(|y| block.inputs().contains(y)))
|
|
||||||
.map(|x| x.tx.clone())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_matching_transactions(&self, kernels: Vec<TxKernel>) -> Vec<Transaction> {
|
|
||||||
// While the inputs outputs can be cut-through the kernel will stay intact
|
// While the inputs outputs can be cut-through the kernel will stay intact
|
||||||
// In order to deaggregate tx we look for tx with the same kernel
|
// In order to deaggregate tx we look for tx with the same kernel
|
||||||
let mut found_txs = vec![];
|
let mut found_txs = vec![];
|
||||||
|
@ -378,7 +361,7 @@ impl Pool {
|
||||||
|
|
||||||
// Check each transaction in the pool
|
// Check each transaction in the pool
|
||||||
for entry in &self.entries {
|
for entry in &self.entries {
|
||||||
let entry_kernel_set = entry.tx.kernels().iter().cloned().collect::<HashSet<_>>();
|
let entry_kernel_set = entry.tx.kernels().iter().collect::<HashSet<_>>();
|
||||||
if entry_kernel_set.is_subset(&kernel_set) {
|
if entry_kernel_set.is_subset(&kernel_set) {
|
||||||
found_txs.push(entry.tx.clone());
|
found_txs.push(entry.tx.clone());
|
||||||
}
|
}
|
||||||
|
@ -388,10 +371,15 @@ impl Pool {
|
||||||
|
|
||||||
/// Quick reconciliation step - we can evict any txs in the pool where
|
/// Quick reconciliation step - we can evict any txs in the pool where
|
||||||
/// inputs or kernels intersect with the block.
|
/// inputs or kernels intersect with the block.
|
||||||
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
pub fn reconcile_block(&mut self, block: &Block) {
|
||||||
let candidate_txs = self.remaining_transactions(block);
|
// Filter txs in the pool based on the latest block.
|
||||||
self.entries.retain(|x| candidate_txs.contains(&x.tx));
|
// Reject any txs where we see a matching tx kernel in the block.
|
||||||
Ok(())
|
// Also reject any txs where we see a conflicting tx,
|
||||||
|
// where an input is spent in a different tx.
|
||||||
|
self.entries.retain(|x| {
|
||||||
|
!x.tx.kernels().iter().any(|y| block.kernels().contains(y))
|
||||||
|
&& !x.tx.inputs().iter().any(|y| block.inputs().contains(y))
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn size(&self) -> usize {
|
pub fn size(&self) -> usize {
|
||||||
|
|
|
@ -17,7 +17,9 @@
|
||||||
//! resulting tx pool can be added to the current chain state to produce a
|
//! resulting tx pool can be added to the current chain state to produce a
|
||||||
//! valid chain state.
|
//! valid chain state.
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::collections::VecDeque;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
|
|
||||||
|
@ -28,6 +30,9 @@ use core::core::{transaction, Block, BlockHeader, Transaction};
|
||||||
use pool::Pool;
|
use pool::Pool;
|
||||||
use types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource};
|
use types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource};
|
||||||
|
|
||||||
|
// Cache this many txs to handle a potential fork and re-org.
|
||||||
|
const REORG_CACHE_SIZE: usize = 100;
|
||||||
|
|
||||||
/// Transaction pool implementation.
|
/// Transaction pool implementation.
|
||||||
pub struct TransactionPool {
|
pub struct TransactionPool {
|
||||||
/// Pool Config
|
/// Pool Config
|
||||||
|
@ -36,6 +41,8 @@ pub struct TransactionPool {
|
||||||
pub txpool: Pool,
|
pub txpool: Pool,
|
||||||
/// Our Dandelion "stempool".
|
/// Our Dandelion "stempool".
|
||||||
pub stempool: Pool,
|
pub stempool: Pool,
|
||||||
|
/// Cache of previous txs in case of a re-org.
|
||||||
|
pub reorg_cache: Arc<RwLock<VecDeque<PoolEntry>>>,
|
||||||
/// The blockchain
|
/// The blockchain
|
||||||
pub blockchain: Arc<BlockChain>,
|
pub blockchain: Arc<BlockChain>,
|
||||||
pub verifier_cache: Arc<RwLock<VerifierCache>>,
|
pub verifier_cache: Arc<RwLock<VerifierCache>>,
|
||||||
|
@ -53,8 +60,13 @@ impl TransactionPool {
|
||||||
) -> TransactionPool {
|
) -> TransactionPool {
|
||||||
TransactionPool {
|
TransactionPool {
|
||||||
config,
|
config,
|
||||||
txpool: Pool::new(chain.clone(), verifier_cache.clone(), format!("txpool")),
|
txpool: Pool::new(chain.clone(), verifier_cache.clone(), "txpool".to_string()),
|
||||||
stempool: Pool::new(chain.clone(), verifier_cache.clone(), format!("stempool")),
|
stempool: Pool::new(
|
||||||
|
chain.clone(),
|
||||||
|
verifier_cache.clone(),
|
||||||
|
"stempool".to_string(),
|
||||||
|
),
|
||||||
|
reorg_cache: Arc::new(RwLock::new(VecDeque::new())),
|
||||||
blockchain: chain,
|
blockchain: chain,
|
||||||
verifier_cache,
|
verifier_cache,
|
||||||
adapter,
|
adapter,
|
||||||
|
@ -68,13 +80,23 @@ impl TransactionPool {
|
||||||
fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
|
fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
|
||||||
// Add tx to stempool (passing in all txs from txpool to validate against).
|
// Add tx to stempool (passing in all txs from txpool to validate against).
|
||||||
self.stempool
|
self.stempool
|
||||||
.add_to_pool(entry.clone(), self.txpool.all_transactions(), header)?;
|
.add_to_pool(entry, self.txpool.all_transactions(), header)?;
|
||||||
|
|
||||||
// Note: we do not notify the adapter here,
|
// Note: we do not notify the adapter here,
|
||||||
// we let the dandelion monitor handle this.
|
// we let the dandelion monitor handle this.
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_to_reorg_cache(&mut self, entry: PoolEntry) -> Result<(), PoolError> {
|
||||||
|
let mut cache = self.reorg_cache.write();
|
||||||
|
cache.push_back(entry);
|
||||||
|
if cache.len() > REORG_CACHE_SIZE {
|
||||||
|
cache.pop_front();
|
||||||
|
}
|
||||||
|
debug!("added tx to reorg_cache: size now {}", cache.len());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn add_to_txpool(
|
fn add_to_txpool(
|
||||||
&mut self,
|
&mut self,
|
||||||
mut entry: PoolEntry,
|
mut entry: PoolEntry,
|
||||||
|
@ -82,9 +104,7 @@ impl TransactionPool {
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
// First deaggregate the tx based on current txpool txs.
|
// First deaggregate the tx based on current txpool txs.
|
||||||
if entry.tx.kernels().len() > 1 {
|
if entry.tx.kernels().len() > 1 {
|
||||||
let txs = self
|
let txs = self.txpool.find_matching_transactions(entry.tx.kernels());
|
||||||
.txpool
|
|
||||||
.find_matching_transactions(entry.tx.kernels().clone());
|
|
||||||
if !txs.is_empty() {
|
if !txs.is_empty() {
|
||||||
let tx = transaction::deaggregate(entry.tx, txs)?;
|
let tx = transaction::deaggregate(entry.tx, txs)?;
|
||||||
tx.validate(self.verifier_cache.clone())?;
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
|
@ -96,8 +116,10 @@ impl TransactionPool {
|
||||||
|
|
||||||
// We now need to reconcile the stempool based on the new state of the txpool.
|
// We now need to reconcile the stempool based on the new state of the txpool.
|
||||||
// Some stempool txs may no longer be valid and we need to evict them.
|
// Some stempool txs may no longer be valid and we need to evict them.
|
||||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
{
|
||||||
self.stempool.reconcile(txpool_tx, header)?;
|
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||||
|
self.stempool.reconcile(txpool_tx, header)?;
|
||||||
|
}
|
||||||
|
|
||||||
self.adapter.tx_accepted(&entry.tx);
|
self.adapter.tx_accepted(&entry.tx);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -123,7 +145,7 @@ impl TransactionPool {
|
||||||
|
|
||||||
// Make sure the transaction is valid before anything else.
|
// Make sure the transaction is valid before anything else.
|
||||||
tx.validate(self.verifier_cache.clone())
|
tx.validate(self.verifier_cache.clone())
|
||||||
.map_err(|e| PoolError::InvalidTx(e))?;
|
.map_err(PoolError::InvalidTx)?;
|
||||||
|
|
||||||
// Check the tx lock_time is valid based on current chain state.
|
// Check the tx lock_time is valid based on current chain state.
|
||||||
self.blockchain.verify_tx_lock_height(&tx)?;
|
self.blockchain.verify_tx_lock_height(&tx)?;
|
||||||
|
@ -135,28 +157,46 @@ impl TransactionPool {
|
||||||
state: PoolEntryState::Fresh,
|
state: PoolEntryState::Fresh,
|
||||||
src,
|
src,
|
||||||
tx_at: Utc::now(),
|
tx_at: Utc::now(),
|
||||||
tx: tx.clone(),
|
tx,
|
||||||
};
|
};
|
||||||
|
|
||||||
if stem {
|
if stem {
|
||||||
|
// TODO - what happens to txs in the stempool in a re-org scenario?
|
||||||
self.add_to_stempool(entry, header)?;
|
self.add_to_stempool(entry, header)?;
|
||||||
} else {
|
} else {
|
||||||
self.add_to_txpool(entry, header)?;
|
self.add_to_txpool(entry.clone(), header)?;
|
||||||
|
self.add_to_reorg_cache(entry)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reconcile_reorg_cache(&mut self, header: &BlockHeader) -> Result<(), PoolError> {
|
||||||
|
let entries = self.reorg_cache.read().iter().cloned().collect::<Vec<_>>();
|
||||||
|
debug!("reconcile_reorg_cache: size: {} ...", entries.len());
|
||||||
|
for entry in entries {
|
||||||
|
let _ = &self.add_to_txpool(entry.clone(), header);
|
||||||
|
}
|
||||||
|
debug!("reconcile_reorg_cache: ... done.");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Reconcile the transaction pool (both txpool and stempool) against the
|
/// Reconcile the transaction pool (both txpool and stempool) against the
|
||||||
/// provided block.
|
/// provided block.
|
||||||
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
||||||
// First reconcile the txpool.
|
// First reconcile the txpool.
|
||||||
self.txpool.reconcile_block(block)?;
|
self.txpool.reconcile_block(block);
|
||||||
self.txpool.reconcile(None, &block.header)?;
|
self.txpool.reconcile(None, &block.header)?;
|
||||||
|
|
||||||
// Then reconcile the stempool, accounting for the txpool txs.
|
// Take our "reorg_cache" and see if this block means
|
||||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
// we need to (re)add old txs due to a fork and re-org.
|
||||||
self.stempool.reconcile_block(block)?;
|
self.reconcile_reorg_cache(&block.header)?;
|
||||||
self.stempool.reconcile(txpool_tx, &block.header)?;
|
|
||||||
|
// Now reconcile our stempool, accounting for the updated txpool txs.
|
||||||
|
self.stempool.reconcile_block(block);
|
||||||
|
{
|
||||||
|
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||||
|
self.stempool.reconcile(txpool_tx, &block.header)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -168,7 +208,7 @@ impl TransactionPool {
|
||||||
&self,
|
&self,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
nonce: u64,
|
nonce: u64,
|
||||||
kern_ids: &Vec<ShortId>,
|
kern_ids: &[ShortId],
|
||||||
) -> (Vec<Transaction>, Vec<ShortId>) {
|
) -> (Vec<Transaction>, Vec<ShortId>) {
|
||||||
self.txpool.retrieve_transactions(hash, nonce, kern_ids)
|
self.txpool.retrieve_transactions(hash, nonce, kern_ids)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
use chrono::prelude::{DateTime, Utc};
|
use chrono::prelude::{DateTime, Utc};
|
||||||
|
|
||||||
use core::consensus;
|
use core::consensus;
|
||||||
|
use core::core::block;
|
||||||
use core::core::committed;
|
use core::core::committed;
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::Hash;
|
||||||
use core::core::transaction::{self, Transaction};
|
use core::core::transaction::{self, Transaction};
|
||||||
|
@ -128,7 +129,7 @@ pub struct PoolEntry {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The possible states a pool entry can be in.
|
/// The possible states a pool entry can be in.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
pub enum PoolEntryState {
|
pub enum PoolEntryState {
|
||||||
/// A new entry, not yet processed.
|
/// A new entry, not yet processed.
|
||||||
Fresh,
|
Fresh,
|
||||||
|
@ -163,6 +164,8 @@ pub struct TxSource {
|
||||||
pub enum PoolError {
|
pub enum PoolError {
|
||||||
/// An invalid pool entry caused by underlying tx validation error
|
/// An invalid pool entry caused by underlying tx validation error
|
||||||
InvalidTx(transaction::Error),
|
InvalidTx(transaction::Error),
|
||||||
|
/// An invalid pool entry caused by underlying block validation error
|
||||||
|
InvalidBlock(block::Error),
|
||||||
/// Underlying keychain error.
|
/// Underlying keychain error.
|
||||||
Keychain(keychain::Error),
|
Keychain(keychain::Error),
|
||||||
/// Underlying "committed" error.
|
/// Underlying "committed" error.
|
||||||
|
@ -192,6 +195,12 @@ impl From<transaction::Error> for PoolError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<block::Error> for PoolError {
|
||||||
|
fn from(e: block::Error) -> PoolError {
|
||||||
|
PoolError::InvalidBlock(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<keychain::Error> for PoolError {
|
impl From<keychain::Error> for PoolError {
|
||||||
fn from(e: keychain::Error) -> PoolError {
|
fn from(e: keychain::Error) -> PoolError {
|
||||||
PoolError::Keychain(e)
|
PoolError::Keychain(e)
|
||||||
|
|
|
@ -25,7 +25,8 @@ extern crate rand;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
use core::core::{Block, BlockHeader, Transaction};
|
use core::core::{Block, BlockHeader, Transaction};
|
||||||
|
@ -80,7 +81,7 @@ fn test_transaction_pool_block_building() {
|
||||||
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
|
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
// Add the three root txs to the pool.
|
// Add the three root txs to the pool.
|
||||||
write_pool
|
write_pool
|
||||||
|
@ -105,7 +106,7 @@ fn test_transaction_pool_block_building() {
|
||||||
}
|
}
|
||||||
|
|
||||||
let txs = {
|
let txs = {
|
||||||
let read_pool = pool.read().unwrap();
|
let read_pool = pool.read();
|
||||||
read_pool.prepare_mineable_transactions().unwrap()
|
read_pool.prepare_mineable_transactions().unwrap()
|
||||||
};
|
};
|
||||||
// children should have been aggregated into parents
|
// children should have been aggregated into parents
|
||||||
|
@ -123,7 +124,7 @@ fn test_transaction_pool_block_building() {
|
||||||
// Now reconcile the transaction pool with the new block
|
// Now reconcile the transaction pool with the new block
|
||||||
// and check the resulting contents of the pool are what we expect.
|
// and check the resulting contents of the pool are what we expect.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
write_pool.reconcile_block(&block).unwrap();
|
write_pool.reconcile_block(&block).unwrap();
|
||||||
|
|
||||||
assert_eq!(write_pool.total_size(), 0);
|
assert_eq!(write_pool.total_size(), 0);
|
||||||
|
|
|
@ -25,7 +25,8 @@ extern crate rand;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::core::{Block, BlockHeader};
|
use core::core::{Block, BlockHeader};
|
||||||
|
|
||||||
|
@ -127,7 +128,7 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
// First we add the above transactions to the pool.
|
// First we add the above transactions to the pool.
|
||||||
// All should be accepted.
|
// All should be accepted.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert_eq!(write_pool.total_size(), 0);
|
assert_eq!(write_pool.total_size(), 0);
|
||||||
|
|
||||||
for tx in &txs_to_add {
|
for tx in &txs_to_add {
|
||||||
|
@ -165,13 +166,13 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
|
|
||||||
// Check the pool still contains everything we expect at this point.
|
// Check the pool still contains everything we expect at this point.
|
||||||
{
|
{
|
||||||
let write_pool = pool.write().unwrap();
|
let write_pool = pool.write();
|
||||||
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
// And reconcile the pool with this latest block.
|
// And reconcile the pool with this latest block.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
write_pool.reconcile_block(&block).unwrap();
|
write_pool.reconcile_block(&block).unwrap();
|
||||||
|
|
||||||
assert_eq!(write_pool.total_size(), 4);
|
assert_eq!(write_pool.total_size(), 4);
|
||||||
|
|
|
@ -25,7 +25,8 @@ extern crate rand;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use common::*;
|
use common::*;
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::Hash;
|
||||||
|
@ -83,7 +84,7 @@ fn test_coinbase_maturity() {
|
||||||
let pool = RwLock::new(test_setup(chain, verifier_cache));
|
let pool = RwLock::new(test_setup(chain, verifier_cache));
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
let tx = test_transaction(&keychain, vec![50], vec![49]);
|
let tx = test_transaction(&keychain, vec![50], vec![49]);
|
||||||
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
|
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
|
||||||
Err(PoolError::ImmatureCoinbase) => {}
|
Err(PoolError::ImmatureCoinbase) => {}
|
||||||
|
|
|
@ -28,7 +28,8 @@ extern crate rand;
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
|
@ -98,7 +99,7 @@ impl ChainAdapter {
|
||||||
batch.commit().unwrap();
|
batch.commit().unwrap();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut utxo = self.utxo.write().unwrap();
|
let mut utxo = self.utxo.write();
|
||||||
for x in block.inputs() {
|
for x in block.inputs() {
|
||||||
utxo.remove(&x.commitment());
|
utxo.remove(&x.commitment());
|
||||||
}
|
}
|
||||||
|
@ -129,7 +130,7 @@ impl BlockChain for ChainAdapter {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||||
let utxo = self.utxo.read().unwrap();
|
let utxo = self.utxo.read();
|
||||||
|
|
||||||
for x in tx.outputs() {
|
for x in tx.outputs() {
|
||||||
if utxo.contains(&x.commitment()) {
|
if utxo.contains(&x.commitment()) {
|
||||||
|
|
|
@ -25,7 +25,8 @@ extern crate rand;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use common::*;
|
use common::*;
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
|
@ -72,7 +73,7 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
// Add this tx to the pool (stem=false, direct to txpool).
|
// Add this tx to the pool (stem=false, direct to txpool).
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), initial_tx, false, &header)
|
.add_to_pool(test_source(), initial_tx, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -86,7 +87,7 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
// Take a write lock and add a couple of tx entries to the pool.
|
// Take a write lock and add a couple of tx entries to the pool.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
// Check we have a single initial tx in the pool.
|
// Check we have a single initial tx in the pool.
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
|
@ -110,7 +111,7 @@ fn test_the_transaction_pool() {
|
||||||
// This will fail during tx aggregation due to duplicate outputs and duplicate
|
// This will fail during tx aggregation due to duplicate outputs and duplicate
|
||||||
// kernels.
|
// kernels.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx1.clone(), true, &header)
|
.add_to_pool(test_source(), tx1.clone(), true, &header)
|
||||||
|
@ -122,7 +123,7 @@ fn test_the_transaction_pool() {
|
||||||
// tx).
|
// tx).
|
||||||
{
|
{
|
||||||
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx1a, true, &header)
|
.add_to_pool(test_source(), tx1a, true, &header)
|
||||||
|
@ -133,7 +134,7 @@ fn test_the_transaction_pool() {
|
||||||
// Test adding a tx attempting to spend a non-existent output.
|
// Test adding a tx attempting to spend a non-existent output.
|
||||||
{
|
{
|
||||||
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
|
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), bad_tx, true, &header)
|
.add_to_pool(test_source(), bad_tx, true, &header)
|
||||||
|
@ -147,7 +148,7 @@ fn test_the_transaction_pool() {
|
||||||
// to be immediately stolen via a "replay" tx.
|
// to be immediately stolen via a "replay" tx.
|
||||||
{
|
{
|
||||||
let tx = test_transaction(&keychain, vec![900], vec![498]);
|
let tx = test_transaction(&keychain, vec![900], vec![498]);
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx, true, &header)
|
.add_to_pool(test_source(), tx, true, &header)
|
||||||
|
@ -157,7 +158,7 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
// Confirm the tx pool correctly identifies an invalid tx (already spent).
|
// Confirm the tx pool correctly identifies an invalid tx (already spent).
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
|
@ -171,7 +172,7 @@ fn test_the_transaction_pool() {
|
||||||
// Check we can take some entries from the stempool and "fluff" them into the
|
// Check we can take some entries from the stempool and "fluff" them into the
|
||||||
// txpool. This also exercises multi-kernel txs.
|
// txpool. This also exercises multi-kernel txs.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
let agg_tx = write_pool
|
let agg_tx = write_pool
|
||||||
.stempool
|
.stempool
|
||||||
.aggregate_transaction()
|
.aggregate_transaction()
|
||||||
|
@ -189,7 +190,7 @@ fn test_the_transaction_pool() {
|
||||||
// We will do this be adding a new tx to the pool
|
// We will do this be adding a new tx to the pool
|
||||||
// that is a superset of a tx already in the pool.
|
// that is a superset of a tx already in the pool.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
||||||
// tx1 and tx2 are already in the txpool (in aggregated form)
|
// tx1 and tx2 are already in the txpool (in aggregated form)
|
||||||
|
@ -210,7 +211,7 @@ fn test_the_transaction_pool() {
|
||||||
// Check we cannot "double spend" an output spent in a previous block.
|
// Check we cannot "double spend" an output spent in a previous block.
|
||||||
// We use the initial coinbase output here for convenience.
|
// We use the initial coinbase output here for convenience.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
let double_spend_tx =
|
let double_spend_tx =
|
||||||
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
|
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
|
||||||
|
|
|
@ -13,8 +13,8 @@ hyper-staticfile = "0.3"
|
||||||
itertools = "0.7"
|
itertools = "0.7"
|
||||||
lmdb-zero = "0.4.4"
|
lmdb-zero = "0.4.4"
|
||||||
rand = "0.5"
|
rand = "0.5"
|
||||||
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
|
|
||||||
serde = "1"
|
serde = "1"
|
||||||
|
log = "0.4"
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
chrono = "0.4.4"
|
chrono = "0.4.4"
|
||||||
|
|
|
@ -17,9 +17,10 @@
|
||||||
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain::{self, ChainAdapter, Options};
|
use chain::{self, ChainAdapter, Options};
|
||||||
use chrono::prelude::{DateTime, Utc};
|
use chrono::prelude::{DateTime, Utc};
|
||||||
|
@ -34,7 +35,7 @@ use p2p;
|
||||||
use pool;
|
use pool;
|
||||||
use rand::prelude::*;
|
use rand::prelude::*;
|
||||||
use store;
|
use store;
|
||||||
use util::{OneTime, LOGGER};
|
use util::OneTime;
|
||||||
|
|
||||||
/// Implementation of the NetAdapter for the . Gets notified when new
|
/// Implementation of the NetAdapter for the . Gets notified when new
|
||||||
/// blocks and transactions are received and forwards to the chain and pool
|
/// blocks and transactions are received and forwards to the chain and pool
|
||||||
|
@ -73,7 +74,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
let header = self.chain().head_header().unwrap();
|
let header = self.chain().head_header().unwrap();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
||||||
tx_hash,
|
tx_hash,
|
||||||
tx.inputs().len(),
|
tx.inputs().len(),
|
||||||
|
@ -82,18 +82,17 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
);
|
);
|
||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut tx_pool = self.tx_pool.write().unwrap();
|
let mut tx_pool = self.tx_pool.write();
|
||||||
tx_pool.add_to_pool(source, tx, stem, &header)
|
tx_pool.add_to_pool(source, tx, stem, &header)
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
debug!(LOGGER, "Transaction {} rejected: {:?}", tx_hash, e);
|
debug!("Transaction {} rejected: {:?}", tx_hash, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
|
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
b.header.height,
|
b.header.height,
|
||||||
|
@ -108,7 +107,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
||||||
let bhash = cb.hash();
|
let bhash = cb.hash();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
|
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
|
||||||
bhash,
|
bhash,
|
||||||
cb.header.height,
|
cb.header.height,
|
||||||
|
@ -124,7 +122,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
match core::Block::hydrate_from(cb, vec![]) {
|
match core::Block::hydrate_from(cb, vec![]) {
|
||||||
Ok(block) => self.process_block(block, addr),
|
Ok(block) => self.process_block(block, addr),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e);
|
debug!("Invalid hydrated block {}: {}", cb_hash, e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,17 +132,16 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
.chain()
|
.chain()
|
||||||
.process_block_header(&cb.header, self.chain_opts())
|
.process_block_header(&cb.header, self.chain_opts())
|
||||||
{
|
{
|
||||||
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e);
|
debug!("Invalid compact block header {}: {}", cb_hash, e);
|
||||||
return !e.is_bad_data();
|
return !e.is_bad_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
let (txs, missing_short_ids) = {
|
let (txs, missing_short_ids) = {
|
||||||
let tx_pool = self.tx_pool.read().unwrap();
|
let tx_pool = self.tx_pool.read();
|
||||||
tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids())
|
tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids())
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
|
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
|
||||||
txs.len(),
|
txs.len(),
|
||||||
missing_short_ids.len(),
|
missing_short_ids.len(),
|
||||||
|
@ -158,7 +155,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
let block = match core::Block::hydrate_from(cb.clone(), txs) {
|
let block = match core::Block::hydrate_from(cb.clone(), txs) {
|
||||||
Ok(block) => block,
|
Ok(block) => block,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(LOGGER, "Invalid hydrated block {}: {}", cb.hash(), e);
|
debug!("Invalid hydrated block {}: {}", cb.hash(), e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -168,29 +165,22 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
|
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
|
debug!("adapter: successfully hydrated block from tx pool!");
|
||||||
self.process_block(block, addr)
|
self.process_block(block, addr)
|
||||||
} else {
|
} else {
|
||||||
if self.sync_state.status() == SyncStatus::NoSync {
|
if self.sync_state.status() == SyncStatus::NoSync {
|
||||||
debug!(
|
debug!("adapter: block invalid after hydration, requesting full block");
|
||||||
LOGGER,
|
|
||||||
"adapter: block invalid after hydration, requesting full block"
|
|
||||||
);
|
|
||||||
self.request_block(&cb.header, &addr);
|
self.request_block(&cb.header, &addr);
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"adapter: block invalid after hydration, ignoring it, cause still syncing"
|
"adapter: block invalid after hydration, ignoring it, cause still syncing"
|
||||||
);
|
);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!("adapter: failed to retrieve previous block header (still syncing?)");
|
||||||
LOGGER,
|
|
||||||
"adapter: failed to retrieve previous block header (still syncing?)"
|
|
||||||
);
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,8 +189,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
||||||
let bhash = bh.hash();
|
let bhash = bh.hash();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"Received block header {} at {} from {}, going to process.",
|
||||||
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
|
bhash, bh.height, addr,
|
||||||
);
|
);
|
||||||
|
|
||||||
// pushing the new block header through the header chain pipeline
|
// pushing the new block header through the header chain pipeline
|
||||||
|
@ -208,16 +198,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
let res = self.chain().process_block_header(&bh, self.chain_opts());
|
let res = self.chain().process_block_header(&bh, self.chain_opts());
|
||||||
|
|
||||||
if let &Err(ref e) = &res {
|
if let &Err(ref e) = &res {
|
||||||
debug!(
|
debug!("Block header {} refused by chain: {:?}", bhash, e.kind());
|
||||||
LOGGER,
|
|
||||||
"Block header {} refused by chain: {:?}",
|
|
||||||
bhash,
|
|
||||||
e.kind()
|
|
||||||
);
|
|
||||||
if e.is_bad_data() {
|
if e.is_bad_data() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"header_received: {} is a bad header, resetting header head",
|
||||||
"header_received: {} is a bad header, resetting header head", bhash
|
bhash
|
||||||
);
|
);
|
||||||
let _ = self.chain().reset_head();
|
let _ = self.chain().reset_head();
|
||||||
return false;
|
return false;
|
||||||
|
@ -238,7 +223,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
|
|
||||||
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"Received block headers {:?} from {}",
|
"Received block headers {:?} from {}",
|
||||||
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
|
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
|
||||||
addr,
|
addr,
|
||||||
|
@ -251,7 +235,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
// try to add headers to our header chain
|
// try to add headers to our header chain
|
||||||
let res = self.chain().sync_block_headers(&bhs, self.chain_opts());
|
let res = self.chain().sync_block_headers(&bhs, self.chain_opts());
|
||||||
if let &Err(ref e) = &res {
|
if let &Err(ref e) = &res {
|
||||||
debug!(LOGGER, "Block headers refused by chain: {:?}", e);
|
debug!("Block headers refused by chain: {:?}", e);
|
||||||
|
|
||||||
if e.is_bad_data() {
|
if e.is_bad_data() {
|
||||||
return false;
|
return false;
|
||||||
|
@ -261,14 +245,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
||||||
debug!(LOGGER, "locate_headers: {:?}", locator,);
|
debug!("locate_headers: {:?}", locator,);
|
||||||
|
|
||||||
let header = match self.find_common_header(locator) {
|
let header = match self.find_common_header(locator) {
|
||||||
Some(header) => header,
|
Some(header) => header,
|
||||||
None => return vec![],
|
None => return vec![],
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
|
debug!("locate_headers: common header: {:?}", header.hash(),);
|
||||||
|
|
||||||
// looks like we know one, getting as many following headers as allowed
|
// looks like we know one, getting as many following headers as allowed
|
||||||
let hh = header.height;
|
let hh = header.height;
|
||||||
|
@ -280,18 +264,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
Err(e) => match e.kind() {
|
Err(e) => match e.kind() {
|
||||||
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
|
||||||
_ => {
|
_ => {
|
||||||
error!(LOGGER, "Could not build header locator: {:?}", e);
|
error!("Could not build header locator: {:?}", e);
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!("locate_headers: returning headers: {}", headers.len(),);
|
||||||
LOGGER,
|
|
||||||
"locate_headers: returning headers: {}",
|
|
||||||
headers.len(),
|
|
||||||
);
|
|
||||||
|
|
||||||
headers
|
headers
|
||||||
}
|
}
|
||||||
|
@ -316,10 +296,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
reader: read,
|
reader: read,
|
||||||
}),
|
}),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!("Couldn't produce txhashset data for block {}: {:?}", h, e);
|
||||||
LOGGER,
|
|
||||||
"Couldn't produce txhashset data for block {}: {:?}", h, e
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -366,12 +343,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
.chain()
|
.chain()
|
||||||
.txhashset_write(h, txhashset_data, self.sync_state.as_ref())
|
.txhashset_write(h, txhashset_data, self.sync_state.as_ref())
|
||||||
{
|
{
|
||||||
error!(LOGGER, "Failed to save txhashset archive: {}", e);
|
error!("Failed to save txhashset archive: {}", e);
|
||||||
let is_good_data = !e.is_bad_data();
|
let is_good_data = !e.is_bad_data();
|
||||||
self.sync_state.set_sync_error(types::Error::Chain(e));
|
self.sync_state.set_sync_error(types::Error::Chain(e));
|
||||||
is_good_data
|
is_good_data
|
||||||
} else {
|
} else {
|
||||||
info!(LOGGER, "Received valid txhashset data for {}.", h);
|
info!("Received valid txhashset data for {}.", h);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -446,7 +423,7 @@ impl NetToChainAdapter {
|
||||||
self.find_common_header(locator[1..].to_vec())
|
self.find_common_header(locator[1..].to_vec())
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
error!(LOGGER, "Could not build header locator: {:?}", e);
|
error!("Could not build header locator: {:?}", e);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -456,14 +433,11 @@ impl NetToChainAdapter {
|
||||||
// pushing the new block through the chain pipeline
|
// pushing the new block through the chain pipeline
|
||||||
// remembering to reset the head if we have a bad block
|
// remembering to reset the head if we have a bad block
|
||||||
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
||||||
if !self.archive_mode {
|
// We cannot process blocks earlier than the horizon so check for this here.
|
||||||
|
{
|
||||||
let head = self.chain().head().unwrap();
|
let head = self.chain().head().unwrap();
|
||||||
// we have a fast sync'd node and are sent a block older than our horizon,
|
let horizon = head.height.saturating_sub(global::cut_through_horizon() as u64);
|
||||||
// only sync can do something with that
|
if b.header.height < horizon {
|
||||||
if b.header.height < head
|
|
||||||
.height
|
|
||||||
.saturating_sub(global::cut_through_horizon() as u64)
|
|
||||||
{
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -478,8 +452,8 @@ impl NetToChainAdapter {
|
||||||
}
|
}
|
||||||
Err(ref e) if e.is_bad_data() => {
|
Err(ref e) if e.is_bad_data() => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"adapter: process_block: {} is a bad block, resetting head",
|
||||||
"adapter: process_block: {} is a bad block, resetting head", bhash
|
bhash
|
||||||
);
|
);
|
||||||
let _ = self.chain().reset_head();
|
let _ = self.chain().reset_head();
|
||||||
|
|
||||||
|
@ -494,14 +468,13 @@ impl NetToChainAdapter {
|
||||||
chain::ErrorKind::Orphan => {
|
chain::ErrorKind::Orphan => {
|
||||||
// make sure we did not miss the parent block
|
// make sure we did not miss the parent block
|
||||||
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
|
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
|
||||||
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
debug!("adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
||||||
self.request_block_by_hash(prev_hash, &addr)
|
self.request_block_by_hash(prev_hash, &addr)
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"adapter: process_block: block {} refused by chain: {}",
|
"adapter: process_block: block {} refused by chain: {}",
|
||||||
bhash,
|
bhash,
|
||||||
e.kind()
|
e.kind()
|
||||||
|
@ -526,8 +499,8 @@ impl NetToChainAdapter {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"adapter: process_block: ***** validating full chain state at {}",
|
||||||
"adapter: process_block: ***** validating full chain state at {}", bhash,
|
bhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.chain()
|
self.chain()
|
||||||
|
@ -535,7 +508,6 @@ impl NetToChainAdapter {
|
||||||
.expect("chain validation failed, hard stop");
|
.expect("chain validation failed, hard stop");
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"adapter: process_block: ***** done validating full chain state, took {}s",
|
"adapter: process_block: ***** done validating full chain state, took {}s",
|
||||||
now.elapsed().as_secs(),
|
now.elapsed().as_secs(),
|
||||||
);
|
);
|
||||||
|
@ -557,7 +529,7 @@ impl NetToChainAdapter {
|
||||||
.name("compactor".to_string())
|
.name("compactor".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
if let Err(e) = chain.compact() {
|
if let Err(e) = chain.compact() {
|
||||||
error!(LOGGER, "Could not compact chain: {:?}", e);
|
error!("Could not compact chain: {:?}", e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -591,23 +563,19 @@ impl NetToChainAdapter {
|
||||||
match self.chain().block_exists(h) {
|
match self.chain().block_exists(h) {
|
||||||
Ok(false) => match self.peers().get_connected_peer(addr) {
|
Ok(false) => match self.peers().get_connected_peer(addr) {
|
||||||
None => debug!(
|
None => debug!(
|
||||||
LOGGER,
|
|
||||||
"send_block_request_to_peer: can't send request to peer {:?}, not connected",
|
"send_block_request_to_peer: can't send request to peer {:?}, not connected",
|
||||||
addr
|
addr
|
||||||
),
|
),
|
||||||
Some(peer) => {
|
Some(peer) => {
|
||||||
if let Err(e) = f(&peer, h) {
|
if let Err(e) = f(&peer, h) {
|
||||||
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e)
|
error!("send_block_request_to_peer: failed: {:?}", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(true) => debug!(
|
Ok(true) => debug!("send_block_request_to_peer: block {} already known", h),
|
||||||
LOGGER,
|
|
||||||
"send_block_request_to_peer: block {} already known", h
|
|
||||||
),
|
|
||||||
Err(e) => error!(
|
Err(e) => error!(
|
||||||
LOGGER,
|
"send_block_request_to_peer: failed to check block exists: {:?}",
|
||||||
"send_block_request_to_peer: failed to check block exists: {:?}", e
|
e
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -638,11 +606,10 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
|
debug!("adapter: block_accepted: {:?}", b.hash());
|
||||||
|
|
||||||
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
if let Err(e) = self.tx_pool.write().reconcile_block(b) {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"Pool could not update itself at block {}: {:?}",
|
"Pool could not update itself at block {}: {:?}",
|
||||||
b.hash(),
|
b.hash(),
|
||||||
e,
|
e,
|
||||||
|
|
|
@ -15,11 +15,11 @@
|
||||||
//! Server stat collection types, to be used by tests, logging or GUI/TUI
|
//! Server stat collection types, to be used by tests, logging or GUI/TUI
|
||||||
//! to collect information about server status
|
//! to collect information about server status
|
||||||
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::Arc;
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::pow::Difficulty;
|
use core::consensus::graph_weight;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
|
|
||||||
|
@ -31,8 +31,6 @@ use p2p;
|
||||||
/// and populated when required
|
/// and populated when required
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ServerStateInfo {
|
pub struct ServerStateInfo {
|
||||||
/// whether we're in a state of waiting for peers at startup
|
|
||||||
pub awaiting_peers: Arc<AtomicBool>,
|
|
||||||
/// Stratum stats
|
/// Stratum stats
|
||||||
pub stratum_stats: Arc<RwLock<StratumStats>>,
|
pub stratum_stats: Arc<RwLock<StratumStats>>,
|
||||||
}
|
}
|
||||||
|
@ -40,7 +38,6 @@ pub struct ServerStateInfo {
|
||||||
impl Default for ServerStateInfo {
|
impl Default for ServerStateInfo {
|
||||||
fn default() -> ServerStateInfo {
|
fn default() -> ServerStateInfo {
|
||||||
ServerStateInfo {
|
ServerStateInfo {
|
||||||
awaiting_peers: Arc::new(AtomicBool::new(false)),
|
|
||||||
stratum_stats: Arc::new(RwLock::new(StratumStats::default())),
|
stratum_stats: Arc::new(RwLock::new(StratumStats::default())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,8 +54,6 @@ pub struct ServerStats {
|
||||||
pub header_head: chain::Tip,
|
pub header_head: chain::Tip,
|
||||||
/// Whether we're currently syncing
|
/// Whether we're currently syncing
|
||||||
pub sync_status: SyncStatus,
|
pub sync_status: SyncStatus,
|
||||||
/// Whether we're awaiting peers
|
|
||||||
pub awaiting_peers: bool,
|
|
||||||
/// Handle to current stratum server stats
|
/// Handle to current stratum server stats
|
||||||
pub stratum_stats: StratumStats,
|
pub stratum_stats: StratumStats,
|
||||||
/// Peer stats
|
/// Peer stats
|
||||||
|
@ -163,8 +158,7 @@ pub struct PeerStats {
|
||||||
impl StratumStats {
|
impl StratumStats {
|
||||||
/// Calculate network hashrate
|
/// Calculate network hashrate
|
||||||
pub fn network_hashrate(&self) -> f64 {
|
pub fn network_hashrate(&self) -> f64 {
|
||||||
42.0 * (self.network_difficulty as f64 / Difficulty::scale(self.edge_bits as u8) as f64)
|
42.0 * (self.network_difficulty as f64 / graph_weight(self.edge_bits as u8) as f64) / 60.0
|
||||||
/ 60.0
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
|
|
||||||
//! Server types
|
//! Server types
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use api;
|
use api;
|
||||||
use chain;
|
use chain;
|
||||||
|
@ -24,7 +25,6 @@ use core::{core, pow};
|
||||||
use p2p;
|
use p2p;
|
||||||
use pool;
|
use pool;
|
||||||
use store;
|
use store;
|
||||||
use util::LOGGER;
|
|
||||||
use wallet;
|
use wallet;
|
||||||
|
|
||||||
/// Error type wrapping underlying module errors.
|
/// Error type wrapping underlying module errors.
|
||||||
|
@ -44,6 +44,8 @@ pub enum Error {
|
||||||
Wallet(wallet::Error),
|
Wallet(wallet::Error),
|
||||||
/// Error originating from the cuckoo miner
|
/// Error originating from the cuckoo miner
|
||||||
Cuckoo(pow::Error),
|
Cuckoo(pow::Error),
|
||||||
|
/// Error originating from the transaction pool.
|
||||||
|
Pool(pool::PoolError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<core::block::Error> for Error {
|
impl From<core::block::Error> for Error {
|
||||||
|
@ -87,6 +89,12 @@ impl From<wallet::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<pool::PoolError> for Error {
|
||||||
|
fn from(e: pool::PoolError) -> Error {
|
||||||
|
Error::Pool(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Type of seeding the server will use to find other peers on the network.
|
/// Type of seeding the server will use to find other peers on the network.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
pub enum ChainValidationMode {
|
pub enum ChainValidationMode {
|
||||||
|
@ -160,28 +168,6 @@ pub struct ServerConfig {
|
||||||
pub stratum_mining_config: Option<StratumServerConfig>,
|
pub stratum_mining_config: Option<StratumServerConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerConfig {
|
|
||||||
/// Configuration items validation check
|
|
||||||
pub fn validation_check(&mut self) {
|
|
||||||
// check [server.p2p_config.capabilities] with 'archive_mode' in [server]
|
|
||||||
if let Some(archive) = self.archive_mode {
|
|
||||||
// note: slog not available before config loaded, only print here.
|
|
||||||
if archive != self
|
|
||||||
.p2p_config
|
|
||||||
.capabilities
|
|
||||||
.contains(p2p::Capabilities::FULL_HIST)
|
|
||||||
{
|
|
||||||
// if conflict, 'archive_mode' win
|
|
||||||
self.p2p_config
|
|
||||||
.capabilities
|
|
||||||
.toggle(p2p::Capabilities::FULL_HIST);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo: other checks if needed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
impl Default for ServerConfig {
|
||||||
fn default() -> ServerConfig {
|
fn default() -> ServerConfig {
|
||||||
ServerConfig {
|
ServerConfig {
|
||||||
|
@ -250,6 +236,9 @@ pub enum SyncStatus {
|
||||||
Initial,
|
Initial,
|
||||||
/// Not syncing
|
/// Not syncing
|
||||||
NoSync,
|
NoSync,
|
||||||
|
/// Not enough peers to do anything yet, boolean indicates whether
|
||||||
|
/// we should wait at all or ignore and start ASAP
|
||||||
|
AwaitingPeers(bool),
|
||||||
/// Downloading block headers
|
/// Downloading block headers
|
||||||
HeaderSync {
|
HeaderSync {
|
||||||
current_height: u64,
|
current_height: u64,
|
||||||
|
@ -297,12 +286,12 @@ impl SyncState {
|
||||||
/// Whether the current state matches any active syncing operation.
|
/// Whether the current state matches any active syncing operation.
|
||||||
/// Note: This includes our "initial" state.
|
/// Note: This includes our "initial" state.
|
||||||
pub fn is_syncing(&self) -> bool {
|
pub fn is_syncing(&self) -> bool {
|
||||||
*self.current.read().unwrap() != SyncStatus::NoSync
|
*self.current.read() != SyncStatus::NoSync
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Current syncing status
|
/// Current syncing status
|
||||||
pub fn status(&self) -> SyncStatus {
|
pub fn status(&self) -> SyncStatus {
|
||||||
*self.current.read().unwrap()
|
*self.current.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the syncing status
|
/// Update the syncing status
|
||||||
|
@ -311,12 +300,9 @@ impl SyncState {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut status = self.current.write().unwrap();
|
let mut status = self.current.write();
|
||||||
|
|
||||||
debug!(
|
debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
|
||||||
LOGGER,
|
|
||||||
"sync_state: sync_status: {:?} -> {:?}", *status, new_status,
|
|
||||||
);
|
|
||||||
|
|
||||||
*status = new_status;
|
*status = new_status;
|
||||||
}
|
}
|
||||||
|
@ -324,7 +310,7 @@ impl SyncState {
|
||||||
/// Update txhashset downloading progress
|
/// Update txhashset downloading progress
|
||||||
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
|
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
|
||||||
if let SyncStatus::TxHashsetDownload { .. } = new_status {
|
if let SyncStatus::TxHashsetDownload { .. } = new_status {
|
||||||
let mut status = self.current.write().unwrap();
|
let mut status = self.current.write();
|
||||||
*status = new_status;
|
*status = new_status;
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
|
@ -334,7 +320,7 @@ impl SyncState {
|
||||||
|
|
||||||
/// Communicate sync error
|
/// Communicate sync error
|
||||||
pub fn set_sync_error(&self, error: Error) {
|
pub fn set_sync_error(&self, error: Error) {
|
||||||
*self.sync_error.write().unwrap() = Some(error);
|
*self.sync_error.write() = Some(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get sync error
|
/// Get sync error
|
||||||
|
@ -344,7 +330,7 @@ impl SyncState {
|
||||||
|
|
||||||
/// Clear sync error
|
/// Clear sync error
|
||||||
pub fn clear_sync_error(&self) {
|
pub fn clear_sync_error(&self) {
|
||||||
*self.sync_error.write().unwrap() = None;
|
*self.sync_error.write() = None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +340,7 @@ impl chain::TxHashsetWriteStatus for SyncState {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
|
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
|
||||||
let mut status = self.current.write().unwrap();
|
let mut status = self.current.write();
|
||||||
match *status {
|
match *status {
|
||||||
SyncStatus::TxHashsetValidation {
|
SyncStatus::TxHashsetValidation {
|
||||||
kernels,
|
kernels,
|
||||||
|
|
|
@ -15,15 +15,15 @@
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::Hashed;
|
||||||
use core::core::transaction;
|
use core::core::transaction;
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
|
use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// A process to monitor transactions in the stempool.
|
/// A process to monitor transactions in the stempool.
|
||||||
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
|
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
|
||||||
|
@ -39,7 +39,7 @@ pub fn monitor_transactions(
|
||||||
verifier_cache: Arc<RwLock<VerifierCache>>,
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
||||||
stop: Arc<AtomicBool>,
|
stop: Arc<AtomicBool>,
|
||||||
) {
|
) {
|
||||||
debug!(LOGGER, "Started Dandelion transaction monitor.");
|
debug!("Started Dandelion transaction monitor.");
|
||||||
|
|
||||||
let _ = thread::Builder::new()
|
let _ = thread::Builder::new()
|
||||||
.name("dandelion".to_string())
|
.name("dandelion".to_string())
|
||||||
|
@ -57,26 +57,26 @@ pub fn monitor_transactions(
|
||||||
// Aggregate them up to give a single (valid) aggregated tx and propagate it
|
// Aggregate them up to give a single (valid) aggregated tx and propagate it
|
||||||
// to the next Dandelion relay along the stem.
|
// to the next Dandelion relay along the stem.
|
||||||
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
||||||
error!(LOGGER, "dand_mon: Problem with stem phase.");
|
error!("dand_mon: Problem with stem phase.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 2: find all "ToFluff" entries in stempool from last run.
|
// Step 2: find all "ToFluff" entries in stempool from last run.
|
||||||
// Aggregate them up to give a single (valid) aggregated tx and (re)add it
|
// Aggregate them up to give a single (valid) aggregated tx and (re)add it
|
||||||
// to our pool with stem=false (which will then broadcast it).
|
// to our pool with stem=false (which will then broadcast it).
|
||||||
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
||||||
error!(LOGGER, "dand_mon: Problem with fluff phase.");
|
error!("dand_mon: Problem with fluff phase.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: now find all "Fresh" entries in stempool since last run.
|
// Step 3: now find all "Fresh" entries in stempool since last run.
|
||||||
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
|
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
|
||||||
// We will process these in the next run (waiting patience secs).
|
// We will process these in the next run (waiting patience secs).
|
||||||
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
||||||
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
|
error!("dand_mon: Problem processing fresh pool entries.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 4: now find all expired entries based on embargo timer.
|
// Step 4: now find all expired entries based on embargo timer.
|
||||||
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
||||||
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
|
error!("dand_mon: Problem processing fresh pool entries.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -86,14 +86,20 @@ fn process_stem_phase(
|
||||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||||
verifier_cache: Arc<RwLock<VerifierCache>>,
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
let mut tx_pool = tx_pool.write().unwrap();
|
let mut tx_pool = tx_pool.write();
|
||||||
|
|
||||||
let header = tx_pool.chain_head()?;
|
let header = tx_pool.chain_head()?;
|
||||||
|
|
||||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
|
||||||
let stem_txs = tx_pool
|
let stem_txs = tx_pool
|
||||||
.stempool
|
.stempool
|
||||||
.get_transactions_in_state(PoolEntryState::ToStem);
|
.get_transactions_in_state(PoolEntryState::ToStem);
|
||||||
|
|
||||||
|
if stem_txs.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||||
|
|
||||||
let stem_txs = tx_pool
|
let stem_txs = tx_pool
|
||||||
.stempool
|
.stempool
|
||||||
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||||
|
@ -102,21 +108,14 @@ fn process_stem_phase(
|
||||||
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
|
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
|
||||||
|
|
||||||
if stem_txs.len() > 0 {
|
if stem_txs.len() > 0 {
|
||||||
debug!(
|
debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Found {} txs for stemming.",
|
|
||||||
stem_txs.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||||
agg_tx.validate(verifier_cache.clone())?;
|
agg_tx.validate(verifier_cache.clone())?;
|
||||||
|
|
||||||
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
debug!(
|
debug!("dand_mon: Unable to propagate stem tx. No relay, fluffing instead.");
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Unable to propagate stem tx. No relay, fluffing instead."
|
|
||||||
);
|
|
||||||
|
|
||||||
let src = TxSource {
|
let src = TxSource {
|
||||||
debug_name: "no_relay".to_string(),
|
debug_name: "no_relay".to_string(),
|
||||||
|
@ -133,14 +132,20 @@ fn process_fluff_phase(
|
||||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||||
verifier_cache: Arc<RwLock<VerifierCache>>,
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
let mut tx_pool = tx_pool.write().unwrap();
|
let mut tx_pool = tx_pool.write();
|
||||||
|
|
||||||
let header = tx_pool.chain_head()?;
|
let header = tx_pool.chain_head()?;
|
||||||
|
|
||||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
|
||||||
let stem_txs = tx_pool
|
let stem_txs = tx_pool
|
||||||
.stempool
|
.stempool
|
||||||
.get_transactions_in_state(PoolEntryState::ToFluff);
|
.get_transactions_in_state(PoolEntryState::ToFluff);
|
||||||
|
|
||||||
|
if stem_txs.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||||
|
|
||||||
let stem_txs = tx_pool
|
let stem_txs = tx_pool
|
||||||
.stempool
|
.stempool
|
||||||
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||||
|
@ -149,11 +154,7 @@ fn process_fluff_phase(
|
||||||
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
|
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
|
||||||
|
|
||||||
if stem_txs.len() > 0 {
|
if stem_txs.len() > 0 {
|
||||||
debug!(
|
debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Found {} txs for fluffing.",
|
|
||||||
stem_txs.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||||
agg_tx.validate(verifier_cache.clone())?;
|
agg_tx.validate(verifier_cache.clone())?;
|
||||||
|
@ -172,7 +173,7 @@ fn process_fresh_entries(
|
||||||
dandelion_config: DandelionConfig,
|
dandelion_config: DandelionConfig,
|
||||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
let mut tx_pool = tx_pool.write().unwrap();
|
let mut tx_pool = tx_pool.write();
|
||||||
|
|
||||||
let mut rng = thread_rng();
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
|
@ -185,7 +186,6 @@ fn process_fresh_entries(
|
||||||
|
|
||||||
if fresh_entries.len() > 0 {
|
if fresh_entries.len() > 0 {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Found {} fresh entries in stempool.",
|
"dand_mon: Found {} fresh entries in stempool.",
|
||||||
fresh_entries.len()
|
fresh_entries.len()
|
||||||
);
|
);
|
||||||
|
@ -212,31 +212,23 @@ fn process_expired_entries(
|
||||||
|
|
||||||
let mut expired_entries = vec![];
|
let mut expired_entries = vec![];
|
||||||
{
|
{
|
||||||
let tx_pool = tx_pool.read().unwrap();
|
let tx_pool = tx_pool.read();
|
||||||
for entry in tx_pool
|
for entry in tx_pool
|
||||||
.stempool
|
.stempool
|
||||||
.entries
|
.entries
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|x| x.tx_at.timestamp() < cutoff)
|
.filter(|x| x.tx_at.timestamp() < cutoff)
|
||||||
{
|
{
|
||||||
debug!(
|
debug!("dand_mon: Embargo timer expired for {:?}", entry.tx.hash());
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Embargo timer expired for {:?}",
|
|
||||||
entry.tx.hash()
|
|
||||||
);
|
|
||||||
expired_entries.push(entry.clone());
|
expired_entries.push(entry.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if expired_entries.len() > 0 {
|
if expired_entries.len() > 0 {
|
||||||
debug!(
|
debug!("dand_mon: Found {} expired txs.", expired_entries.len());
|
||||||
LOGGER,
|
|
||||||
"dand_mon: Found {} expired txs.",
|
|
||||||
expired_entries.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut tx_pool = tx_pool.write().unwrap();
|
let mut tx_pool = tx_pool.write();
|
||||||
let header = tx_pool.chain_head()?;
|
let header = tx_pool.chain_head()?;
|
||||||
|
|
||||||
for entry in expired_entries {
|
for entry in expired_entries {
|
||||||
|
@ -245,11 +237,8 @@ fn process_expired_entries(
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
};
|
};
|
||||||
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
|
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
|
||||||
Ok(_) => debug!(
|
Ok(_) => debug!("dand_mon: embargo expired, fluffed tx successfully."),
|
||||||
LOGGER,
|
Err(e) => debug!("dand_mon: Failed to fluff expired tx - {:?}", e),
|
||||||
"dand_mon: embargo expired, fluffed tx successfully."
|
|
||||||
),
|
|
||||||
Err(e) => debug!(LOGGER, "dand_mon: Failed to fluff expired tx - {:?}", e),
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,9 +12,10 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! Mining plugin manager, using the cuckoo-miner crate to provide
|
//! Seeds a server with initial peers on first start and keep monitoring
|
||||||
//! a mining worker implementation
|
//! peer counts to connect to more if neeed. Seedin strategy is
|
||||||
//!
|
//! configurable with either no peers, a user-defined list or a preset
|
||||||
|
//! list of DNS records (the default).
|
||||||
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
use chrono::{Duration, MIN_DATE};
|
use chrono::{Duration, MIN_DATE};
|
||||||
|
@ -27,7 +28,6 @@ use std::{cmp, io, str, thread, time};
|
||||||
use p2p;
|
use p2p;
|
||||||
use p2p::ChainAdapter;
|
use p2p::ChainAdapter;
|
||||||
use pool::DandelionConfig;
|
use pool::DandelionConfig;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
// DNS Seeds with contact email associated
|
// DNS Seeds with contact email associated
|
||||||
const DNS_SEEDS: &'static [&'static str] = &[
|
const DNS_SEEDS: &'static [&'static str] = &[
|
||||||
|
@ -60,10 +60,18 @@ pub fn connect_and_monitor(
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut prev = MIN_DATE.and_hms(0, 0, 0);
|
let mut prev = MIN_DATE.and_hms(0, 0, 0);
|
||||||
|
let mut prev_expire_check = MIN_DATE.and_hms(0, 0, 0);
|
||||||
let mut prev_ping = Utc::now();
|
let mut prev_ping = Utc::now();
|
||||||
let mut start_attempt = 0;
|
let mut start_attempt = 0;
|
||||||
|
|
||||||
while !stop.load(Ordering::Relaxed) {
|
while !stop.load(Ordering::Relaxed) {
|
||||||
|
// Check for and remove expired peers from the storage
|
||||||
|
if Utc::now() - prev_expire_check > Duration::hours(1) {
|
||||||
|
peers.remove_expired();
|
||||||
|
|
||||||
|
prev_expire_check = Utc::now();
|
||||||
|
}
|
||||||
|
|
||||||
// make several attempts to get peers as quick as possible
|
// make several attempts to get peers as quick as possible
|
||||||
// with exponential backoff
|
// with exponential backoff
|
||||||
if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) {
|
if Utc::now() - prev > Duration::seconds(cmp::min(20, 1 << start_attempt)) {
|
||||||
|
@ -111,6 +119,7 @@ fn monitor_peers(
|
||||||
let mut healthy_count = 0;
|
let mut healthy_count = 0;
|
||||||
let mut banned_count = 0;
|
let mut banned_count = 0;
|
||||||
let mut defuncts = vec![];
|
let mut defuncts = vec![];
|
||||||
|
|
||||||
for x in peers.all_peers() {
|
for x in peers.all_peers() {
|
||||||
match x.flags {
|
match x.flags {
|
||||||
p2p::State::Banned => {
|
p2p::State::Banned => {
|
||||||
|
@ -119,8 +128,8 @@ fn monitor_peers(
|
||||||
if interval >= config.ban_window() {
|
if interval >= config.ban_window() {
|
||||||
peers.unban_peer(&x.addr);
|
peers.unban_peer(&x.addr);
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"monitor_peers: unbanned {} after {} seconds",
|
||||||
"monitor_peers: unbanned {} after {} seconds", x.addr, interval
|
x.addr, interval
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
banned_count += 1;
|
banned_count += 1;
|
||||||
|
@ -132,7 +141,6 @@ fn monitor_peers(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"monitor_peers: on {}:{}, {} connected ({} most_work). \
|
"monitor_peers: on {}:{}, {} connected ({} most_work). \
|
||||||
all {} = {} healthy + {} banned + {} defunct",
|
all {} = {} healthy + {} banned + {} defunct",
|
||||||
config.host,
|
config.host,
|
||||||
|
@ -158,8 +166,8 @@ fn monitor_peers(
|
||||||
let mut connected_peers: Vec<SocketAddr> = vec![];
|
let mut connected_peers: Vec<SocketAddr> = vec![];
|
||||||
for p in peers.connected_peers() {
|
for p in peers.connected_peers() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"monitor_peers: {}:{} ask {} for more peers",
|
||||||
"monitor_peers: {}:{} ask {} for more peers", config.host, config.port, p.info.addr,
|
config.host, config.port, p.info.addr,
|
||||||
);
|
);
|
||||||
let _ = p.send_peer_request(capabilities);
|
let _ = p.send_peer_request(capabilities);
|
||||||
connected_peers.push(p.info.addr)
|
connected_peers.push(p.info.addr)
|
||||||
|
@ -178,7 +186,7 @@ fn monitor_peers(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => debug!(LOGGER, "monitor_peers: no preferred peers"),
|
None => debug!("monitor_peers: no preferred peers"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// take a random defunct peer and mark it healthy: over a long period any
|
// take a random defunct peer and mark it healthy: over a long period any
|
||||||
|
@ -197,8 +205,8 @@ fn monitor_peers(
|
||||||
);
|
);
|
||||||
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
|
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"monitor_peers: on {}:{}, queue to soon try {}",
|
||||||
"monitor_peers: on {}:{}, queue to soon try {}", config.host, config.port, p.addr,
|
config.host, config.port, p.addr,
|
||||||
);
|
);
|
||||||
tx.send(p.addr).unwrap();
|
tx.send(p.addr).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -208,13 +216,13 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
|
||||||
// Dandelion Relay Updater
|
// Dandelion Relay Updater
|
||||||
let dandelion_relay = peers.get_dandelion_relay();
|
let dandelion_relay = peers.get_dandelion_relay();
|
||||||
if dandelion_relay.is_empty() {
|
if dandelion_relay.is_empty() {
|
||||||
debug!(LOGGER, "monitor_peers: no dandelion relay updating");
|
debug!("monitor_peers: no dandelion relay updating");
|
||||||
peers.update_dandelion_relay();
|
peers.update_dandelion_relay();
|
||||||
} else {
|
} else {
|
||||||
for last_added in dandelion_relay.keys() {
|
for last_added in dandelion_relay.keys() {
|
||||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||||
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
||||||
debug!(LOGGER, "monitor_peers: updating expired dandelion relay");
|
debug!("monitor_peers: updating expired dandelion relay");
|
||||||
peers.update_dandelion_relay();
|
peers.update_dandelion_relay();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -230,7 +238,7 @@ fn connect_to_seeds_and_preferred_peers(
|
||||||
peers_preferred_list: Option<Vec<SocketAddr>>,
|
peers_preferred_list: Option<Vec<SocketAddr>>,
|
||||||
) {
|
) {
|
||||||
// check if we have some peers in db
|
// check if we have some peers in db
|
||||||
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_HIST, 100);
|
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_NODE, 100);
|
||||||
|
|
||||||
// if so, get their addresses, otherwise use our seeds
|
// if so, get their addresses, otherwise use our seeds
|
||||||
let mut peer_addrs = if peers.len() > 3 {
|
let mut peer_addrs = if peers.len() > 3 {
|
||||||
|
@ -242,11 +250,11 @@ fn connect_to_seeds_and_preferred_peers(
|
||||||
// If we have preferred peers add them to the connection
|
// If we have preferred peers add them to the connection
|
||||||
match peers_preferred_list {
|
match peers_preferred_list {
|
||||||
Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred),
|
Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred),
|
||||||
None => debug!(LOGGER, "No preferred peers"),
|
None => debug!("No preferred peers"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if peer_addrs.len() == 0 {
|
if peer_addrs.len() == 0 {
|
||||||
warn!(LOGGER, "No seeds were retrieved.");
|
warn!("No seeds were retrieved.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// connect to this first set of addresses
|
// connect to this first set of addresses
|
||||||
|
@ -311,7 +319,7 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
|
||||||
let mut addresses: Vec<SocketAddr> = vec![];
|
let mut addresses: Vec<SocketAddr> = vec![];
|
||||||
for dns_seed in DNS_SEEDS {
|
for dns_seed in DNS_SEEDS {
|
||||||
let temp_addresses = addresses.clone();
|
let temp_addresses = addresses.clone();
|
||||||
debug!(LOGGER, "Retrieving seed nodes from dns {}", dns_seed);
|
debug!("Retrieving seed nodes from dns {}", dns_seed);
|
||||||
match (dns_seed.to_owned(), 0).to_socket_addrs() {
|
match (dns_seed.to_owned(), 0).to_socket_addrs() {
|
||||||
Ok(addrs) => addresses.append(
|
Ok(addrs) => addresses.append(
|
||||||
&mut (addrs
|
&mut (addrs
|
||||||
|
@ -321,13 +329,10 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
|
||||||
}).filter(|addr| !temp_addresses.contains(addr))
|
}).filter(|addr| !temp_addresses.contains(addr))
|
||||||
.collect()),
|
.collect()),
|
||||||
),
|
),
|
||||||
Err(e) => debug!(
|
Err(e) => debug!("Failed to resolve seed {:?} got error {:?}", dns_seed, e),
|
||||||
LOGGER,
|
|
||||||
"Failed to resolve seed {:?} got error {:?}", dns_seed, e
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!(LOGGER, "Retrieved seed addresses: {:?}", addresses);
|
debug!("Retrieved seed addresses: {:?}", addresses);
|
||||||
addresses
|
addresses
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,9 @@
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use api;
|
use api;
|
||||||
use chain;
|
use chain;
|
||||||
|
@ -27,8 +28,7 @@ use common::adapters::{
|
||||||
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
|
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
|
||||||
};
|
};
|
||||||
use common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
|
use common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
|
||||||
use common::types::{Error, ServerConfig, StratumServerConfig, SyncState};
|
use common::types::{Error, ServerConfig, StratumServerConfig, SyncState, SyncStatus};
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
use core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||||
use core::{consensus, genesis, global, pow};
|
use core::{consensus, genesis, global, pow};
|
||||||
use grin::{dandelion_monitor, seed, sync};
|
use grin::{dandelion_monitor, seed, sync};
|
||||||
|
@ -38,7 +38,6 @@ use p2p;
|
||||||
use pool;
|
use pool;
|
||||||
use store;
|
use store;
|
||||||
use util::file::get_first_line;
|
use util::file::get_first_line;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Grin server holding internal structures.
|
/// Grin server holding internal structures.
|
||||||
pub struct Server {
|
pub struct Server {
|
||||||
|
@ -79,7 +78,7 @@ impl Server {
|
||||||
if let Some(s) = enable_stratum_server {
|
if let Some(s) = enable_stratum_server {
|
||||||
if s {
|
if s {
|
||||||
{
|
{
|
||||||
let mut stratum_stats = serv.state_info.stratum_stats.write().unwrap();
|
let mut stratum_stats = serv.state_info.stratum_stats.write();
|
||||||
stratum_stats.is_enabled = true;
|
stratum_stats.is_enabled = true;
|
||||||
}
|
}
|
||||||
serv.start_stratum_server(c.clone());
|
serv.start_stratum_server(c.clone());
|
||||||
|
@ -111,18 +110,6 @@ impl Server {
|
||||||
Some(b) => b,
|
Some(b) => b,
|
||||||
};
|
};
|
||||||
|
|
||||||
// If archive mode is enabled then the flags should contains the FULL_HIST flag
|
|
||||||
if archive_mode && !config
|
|
||||||
.p2p_config
|
|
||||||
.capabilities
|
|
||||||
.contains(p2p::Capabilities::FULL_HIST)
|
|
||||||
{
|
|
||||||
config
|
|
||||||
.p2p_config
|
|
||||||
.capabilities
|
|
||||||
.insert(p2p::Capabilities::FULL_HIST);
|
|
||||||
}
|
|
||||||
|
|
||||||
let stop = Arc::new(AtomicBool::new(false));
|
let stop = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
// Shared cache for verification results.
|
// Shared cache for verification results.
|
||||||
|
@ -155,7 +142,7 @@ impl Server {
|
||||||
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
|
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
|
||||||
};
|
};
|
||||||
|
|
||||||
info!(LOGGER, "Starting server, genesis block: {}", genesis.hash());
|
info!("Starting server, genesis block: {}", genesis.hash());
|
||||||
|
|
||||||
let db_env = Arc::new(store::new_env(config.db_root.clone()));
|
let db_env = Arc::new(store::new_env(config.db_root.clone()));
|
||||||
let shared_chain = Arc::new(chain::Chain::init(
|
let shared_chain = Arc::new(chain::Chain::init(
|
||||||
|
@ -170,8 +157,6 @@ impl Server {
|
||||||
|
|
||||||
pool_adapter.set_chain(shared_chain.clone());
|
pool_adapter.set_chain(shared_chain.clone());
|
||||||
|
|
||||||
let awaiting_peers = Arc::new(AtomicBool::new(false));
|
|
||||||
|
|
||||||
let net_adapter = Arc::new(NetToChainAdapter::new(
|
let net_adapter = Arc::new(NetToChainAdapter::new(
|
||||||
sync_state.clone(),
|
sync_state.clone(),
|
||||||
archive_mode,
|
archive_mode,
|
||||||
|
@ -181,11 +166,6 @@ impl Server {
|
||||||
config.clone(),
|
config.clone(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let block_1_hash = match shared_chain.get_header_by_height(1) {
|
|
||||||
Ok(header) => Some(header.hash()),
|
|
||||||
Err(_) => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let peer_db_env = Arc::new(store::new_named_env(config.db_root.clone(), "peer".into()));
|
let peer_db_env = Arc::new(store::new_named_env(config.db_root.clone(), "peer".into()));
|
||||||
let p2p_server = Arc::new(p2p::Server::new(
|
let p2p_server = Arc::new(p2p::Server::new(
|
||||||
peer_db_env,
|
peer_db_env,
|
||||||
|
@ -194,8 +174,6 @@ impl Server {
|
||||||
net_adapter.clone(),
|
net_adapter.clone(),
|
||||||
genesis.hash(),
|
genesis.hash(),
|
||||||
stop.clone(),
|
stop.clone(),
|
||||||
archive_mode,
|
|
||||||
block_1_hash,
|
|
||||||
)?);
|
)?);
|
||||||
chain_adapter.init(p2p_server.peers.clone());
|
chain_adapter.init(p2p_server.peers.clone());
|
||||||
pool_net_adapter.init(p2p_server.peers.clone());
|
pool_net_adapter.init(p2p_server.peers.clone());
|
||||||
|
@ -204,10 +182,7 @@ impl Server {
|
||||||
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
|
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
|
||||||
let seeder = match config.p2p_config.seeding_type.clone() {
|
let seeder = match config.p2p_config.seeding_type.clone() {
|
||||||
p2p::Seeding::None => {
|
p2p::Seeding::None => {
|
||||||
warn!(
|
warn!("No seed configured, will stay solo until connected to");
|
||||||
LOGGER,
|
|
||||||
"No seed configured, will stay solo until connected to"
|
|
||||||
);
|
|
||||||
seed::predefined_seeds(vec![])
|
seed::predefined_seeds(vec![])
|
||||||
}
|
}
|
||||||
p2p::Seeding::List => {
|
p2p::Seeding::List => {
|
||||||
|
@ -234,18 +209,13 @@ impl Server {
|
||||||
|
|
||||||
// Defaults to None (optional) in config file.
|
// Defaults to None (optional) in config file.
|
||||||
// This translates to false here so we do not skip by default.
|
// This translates to false here so we do not skip by default.
|
||||||
let skip_sync_wait = match config.skip_sync_wait {
|
let skip_sync_wait = config.skip_sync_wait.unwrap_or(false);
|
||||||
None => false,
|
sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait));
|
||||||
Some(b) => b,
|
|
||||||
};
|
|
||||||
|
|
||||||
sync::run_sync(
|
sync::run_sync(
|
||||||
sync_state.clone(),
|
sync_state.clone(),
|
||||||
awaiting_peers.clone(),
|
|
||||||
p2p_server.peers.clone(),
|
p2p_server.peers.clone(),
|
||||||
shared_chain.clone(),
|
shared_chain.clone(),
|
||||||
skip_sync_wait,
|
|
||||||
archive_mode,
|
|
||||||
stop.clone(),
|
stop.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -254,7 +224,7 @@ impl Server {
|
||||||
.name("p2p-server".to_string())
|
.name("p2p-server".to_string())
|
||||||
.spawn(move || p2p_inner.listen());
|
.spawn(move || p2p_inner.listen());
|
||||||
|
|
||||||
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
|
info!("Starting rest apis at: {}", &config.api_http_addr);
|
||||||
let api_secret = get_first_line(config.api_secret_path.clone());
|
let api_secret = get_first_line(config.api_secret_path.clone());
|
||||||
api::start_rest_apis(
|
api::start_rest_apis(
|
||||||
config.api_http_addr.clone(),
|
config.api_http_addr.clone(),
|
||||||
|
@ -265,10 +235,7 @@ impl Server {
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
info!(
|
info!("Starting dandelion monitor: {}", &config.api_http_addr);
|
||||||
LOGGER,
|
|
||||||
"Starting dandelion monitor: {}", &config.api_http_addr
|
|
||||||
);
|
|
||||||
dandelion_monitor::monitor_transactions(
|
dandelion_monitor::monitor_transactions(
|
||||||
config.dandelion_config.clone(),
|
config.dandelion_config.clone(),
|
||||||
tx_pool.clone(),
|
tx_pool.clone(),
|
||||||
|
@ -276,7 +243,7 @@ impl Server {
|
||||||
stop.clone(),
|
stop.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
warn!(LOGGER, "Grin server started.");
|
warn!("Grin server started.");
|
||||||
Ok(Server {
|
Ok(Server {
|
||||||
config,
|
config,
|
||||||
p2p: p2p_server,
|
p2p: p2p_server,
|
||||||
|
@ -285,7 +252,6 @@ impl Server {
|
||||||
verifier_cache,
|
verifier_cache,
|
||||||
sync_state,
|
sync_state,
|
||||||
state_info: ServerStateInfo {
|
state_info: ServerStateInfo {
|
||||||
awaiting_peers: awaiting_peers,
|
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
stop,
|
stop,
|
||||||
|
@ -335,7 +301,7 @@ impl Server {
|
||||||
/// internal miner, and should only be used for automated testing. Burns
|
/// internal miner, and should only be used for automated testing. Burns
|
||||||
/// reward if wallet_listener_url is 'None'
|
/// reward if wallet_listener_url is 'None'
|
||||||
pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) {
|
pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) {
|
||||||
info!(LOGGER, "start_test_miner - start",);
|
info!("start_test_miner - start",);
|
||||||
let sync_state = self.sync_state.clone();
|
let sync_state = self.sync_state.clone();
|
||||||
let config_wallet_url = match wallet_listener_url.clone() {
|
let config_wallet_url = match wallet_listener_url.clone() {
|
||||||
Some(u) => u,
|
Some(u) => u,
|
||||||
|
@ -388,8 +354,7 @@ impl Server {
|
||||||
/// other
|
/// other
|
||||||
/// consumers
|
/// consumers
|
||||||
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
|
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
|
||||||
let stratum_stats = self.state_info.stratum_stats.read().unwrap().clone();
|
let stratum_stats = self.state_info.stratum_stats.read().clone();
|
||||||
let awaiting_peers = self.state_info.awaiting_peers.load(Ordering::Relaxed);
|
|
||||||
|
|
||||||
// Fill out stats on our current difficulty calculation
|
// Fill out stats on our current difficulty calculation
|
||||||
// TODO: check the overhead of calculating this again isn't too much
|
// TODO: check the overhead of calculating this again isn't too much
|
||||||
|
@ -406,7 +371,6 @@ impl Server {
|
||||||
let mut last_time = last_blocks[0].timestamp;
|
let mut last_time = last_blocks[0].timestamp;
|
||||||
let tip_height = self.chain.head().unwrap().height as i64;
|
let tip_height = self.chain.head().unwrap().height as i64;
|
||||||
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
|
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
|
||||||
|
|
||||||
let mut i = 1;
|
let mut i = 1;
|
||||||
|
|
||||||
let diff_entries: Vec<DiffBlock> = last_blocks
|
let diff_entries: Vec<DiffBlock> = last_blocks
|
||||||
|
@ -414,7 +378,7 @@ impl Server {
|
||||||
.skip(1)
|
.skip(1)
|
||||||
.map(|n| {
|
.map(|n| {
|
||||||
let dur = n.timestamp - last_time;
|
let dur = n.timestamp - last_time;
|
||||||
let height = earliest_block_height + i + 1;
|
let height = earliest_block_height + i;
|
||||||
i += 1;
|
i += 1;
|
||||||
last_time = n.timestamp;
|
last_time = n.timestamp;
|
||||||
DiffBlock {
|
DiffBlock {
|
||||||
|
@ -450,7 +414,6 @@ impl Server {
|
||||||
head: self.head(),
|
head: self.head(),
|
||||||
header_head: self.header_head(),
|
header_head: self.header_head(),
|
||||||
sync_status: self.sync_state.status(),
|
sync_status: self.sync_state.status(),
|
||||||
awaiting_peers: awaiting_peers,
|
|
||||||
stratum_stats: stratum_stats,
|
stratum_stats: stratum_stats,
|
||||||
peer_stats: peer_stats,
|
peer_stats: peer_stats,
|
||||||
diff_stats: diff_stats,
|
diff_stats: diff_stats,
|
||||||
|
@ -466,6 +429,6 @@ impl Server {
|
||||||
/// Stops the test miner without stopping the p2p layer
|
/// Stops the test miner without stopping the p2p layer
|
||||||
pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) {
|
pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) {
|
||||||
stop.store(true, Ordering::Relaxed);
|
stop.store(true, Ordering::Relaxed);
|
||||||
info!(LOGGER, "stop_test_miner - stop",);
|
info!("stop_test_miner - stop",);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ use common::types::{SyncState, SyncStatus};
|
||||||
use core::core::hash::{Hash, Hashed, ZERO_HASH};
|
use core::core::hash::{Hash, Hashed, ZERO_HASH};
|
||||||
use core::global;
|
use core::global;
|
||||||
use p2p;
|
use p2p;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub struct BodySync {
|
pub struct BodySync {
|
||||||
chain: Arc<chain::Chain>,
|
chain: Arc<chain::Chain>,
|
||||||
|
@ -87,14 +86,13 @@ impl BodySync {
|
||||||
|
|
||||||
fn body_sync(&mut self) {
|
fn body_sync(&mut self) {
|
||||||
let horizon = global::cut_through_horizon() as u64;
|
let horizon = global::cut_through_horizon() as u64;
|
||||||
let body_head: chain::Tip = self.chain.head().unwrap();
|
let body_head = self.chain.head().unwrap();
|
||||||
let header_head: chain::Tip = self.chain.header_head().unwrap();
|
let header_head = self.chain.header_head().unwrap();
|
||||||
let sync_head: chain::Tip = self.chain.get_sync_head().unwrap();
|
let sync_head = self.chain.get_sync_head().unwrap();
|
||||||
|
|
||||||
self.reset();
|
self.reset();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}",
|
"body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}",
|
||||||
body_head.last_block_h,
|
body_head.last_block_h,
|
||||||
body_head.height,
|
body_head.height,
|
||||||
|
@ -123,15 +121,16 @@ impl BodySync {
|
||||||
}
|
}
|
||||||
hashes.reverse();
|
hashes.reverse();
|
||||||
|
|
||||||
|
if oldest_height < header_head.height.saturating_sub(horizon) {
|
||||||
|
debug!("body_sync: cannot sync full blocks earlier than horizon.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let peers = self.peers.more_work_peers();
|
||||||
|
|
||||||
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
|
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
|
||||||
// 10) max will be 80 if all 8 peers are advertising more work
|
// 10) max will be 80 if all 8 peers are advertising more work
|
||||||
// also if the chain is already saturated with orphans, throttle
|
// also if the chain is already saturated with orphans, throttle
|
||||||
let peers = if oldest_height < header_head.height.saturating_sub(horizon) {
|
|
||||||
self.peers.more_work_archival_peers()
|
|
||||||
} else {
|
|
||||||
self.peers.more_work_peers()
|
|
||||||
};
|
|
||||||
|
|
||||||
let block_count = cmp::min(
|
let block_count = cmp::min(
|
||||||
cmp::min(100, peers.len() * p2p::SEND_CHANNEL_CAP),
|
cmp::min(100, peers.len() * p2p::SEND_CHANNEL_CAP),
|
||||||
chain::MAX_ORPHAN_SIZE.saturating_sub(self.chain.orphans_len()) + 1,
|
chain::MAX_ORPHAN_SIZE.saturating_sub(self.chain.orphans_len()) + 1,
|
||||||
|
@ -148,7 +147,6 @@ impl BodySync {
|
||||||
|
|
||||||
if hashes_to_get.len() > 0 {
|
if hashes_to_get.len() > 0 {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"block_sync: {}/{} requesting blocks {:?} from {} peers",
|
"block_sync: {}/{} requesting blocks {:?} from {} peers",
|
||||||
body_head.height,
|
body_head.height,
|
||||||
header_head.height,
|
header_head.height,
|
||||||
|
@ -161,7 +159,7 @@ impl BodySync {
|
||||||
for hash in hashes_to_get.clone() {
|
for hash in hashes_to_get.clone() {
|
||||||
if let Some(peer) = peers_iter.next() {
|
if let Some(peer) = peers_iter.next() {
|
||||||
if let Err(e) = peer.send_block_request(*hash) {
|
if let Err(e) = peer.send_block_request(*hash) {
|
||||||
debug!(LOGGER, "Skipped request to {}: {:?}", peer.info.addr, e);
|
debug!("Skipped request to {}: {:?}", peer.info.addr, e);
|
||||||
} else {
|
} else {
|
||||||
self.body_sync_hashes.push(hash.clone());
|
self.body_sync_hashes.push(hash.clone());
|
||||||
}
|
}
|
||||||
|
@ -199,7 +197,6 @@ impl BodySync {
|
||||||
.filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x))
|
.filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"body_sync: {}/{} blocks received, and no more in 200ms",
|
"body_sync: {}/{} blocks received, and no more in 200ms",
|
||||||
self.body_sync_hashes.len() - hashes_not_get.len(),
|
self.body_sync_hashes.len() - hashes_not_get.len(),
|
||||||
self.body_sync_hashes.len(),
|
self.body_sync_hashes.len(),
|
||||||
|
@ -210,7 +207,6 @@ impl BodySync {
|
||||||
None => {
|
None => {
|
||||||
if Utc::now() - self.sync_start_ts > Duration::seconds(5) {
|
if Utc::now() - self.sync_start_ts > Duration::seconds(5) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"body_sync: 0/{} blocks received in 5s",
|
"body_sync: 0/{} blocks received in 5s",
|
||||||
self.body_sync_hashes.len(),
|
self.body_sync_hashes.len(),
|
||||||
);
|
);
|
||||||
|
|
|
@ -20,7 +20,6 @@ use chain;
|
||||||
use common::types::{Error, SyncState, SyncStatus};
|
use common::types::{Error, SyncState, SyncStatus};
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use p2p::{self, Peer};
|
use p2p::{self, Peer};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub struct HeaderSync {
|
pub struct HeaderSync {
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
|
@ -55,12 +54,11 @@ impl HeaderSync {
|
||||||
|
|
||||||
let enable_header_sync = match status {
|
let enable_header_sync = match status {
|
||||||
SyncStatus::BodySync { .. } | SyncStatus::HeaderSync { .. } => true,
|
SyncStatus::BodySync { .. } | SyncStatus::HeaderSync { .. } => true,
|
||||||
SyncStatus::NoSync | SyncStatus::Initial => {
|
SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => {
|
||||||
// Reset sync_head to header_head on transition to HeaderSync,
|
// Reset sync_head to header_head on transition to HeaderSync,
|
||||||
// but ONLY on initial transition to HeaderSync state.
|
// but ONLY on initial transition to HeaderSync state.
|
||||||
let sync_head = self.chain.get_sync_head().unwrap();
|
let sync_head = self.chain.get_sync_head().unwrap();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}",
|
"sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}",
|
||||||
sync_head.hash(),
|
sync_head.hash(),
|
||||||
sync_head.height,
|
sync_head.height,
|
||||||
|
@ -104,7 +102,7 @@ impl HeaderSync {
|
||||||
|
|
||||||
// always enable header sync on initial state transition from NoSync / Initial
|
// always enable header sync on initial state transition from NoSync / Initial
|
||||||
let force_sync = match self.sync_state.status() {
|
let force_sync = match self.sync_state.status() {
|
||||||
SyncStatus::NoSync | SyncStatus::Initial => true,
|
SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -141,8 +139,8 @@ impl HeaderSync {
|
||||||
fn request_headers(&mut self, peer: &Peer) {
|
fn request_headers(&mut self, peer: &Peer) {
|
||||||
if let Ok(locator) = self.get_locator() {
|
if let Ok(locator) = self.get_locator() {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"sync: request_headers: asking {} for headers, {:?}",
|
||||||
"sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator,
|
peer.info.addr, locator,
|
||||||
);
|
);
|
||||||
|
|
||||||
let _ = peer.send_header_request(locator);
|
let _ = peer.send_header_request(locator);
|
||||||
|
@ -165,7 +163,7 @@ impl HeaderSync {
|
||||||
self.history_locators.clear();
|
self.history_locators.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(LOGGER, "sync: locator heights : {:?}", heights);
|
debug!("sync: locator heights : {:?}", heights);
|
||||||
|
|
||||||
let mut locator: Vec<Hash> = vec![];
|
let mut locator: Vec<Hash> = vec![];
|
||||||
let mut current = self.chain.get_block_header(&tip.last_block_h);
|
let mut current = self.chain.get_block_header(&tip.last_block_h);
|
||||||
|
@ -237,7 +235,7 @@ impl HeaderSync {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(LOGGER, "sync: locator heights': {:?}", new_heights);
|
debug!("sync: locator heights': {:?}", new_heights);
|
||||||
|
|
||||||
// shrink history_locators properly
|
// shrink history_locators properly
|
||||||
if heights.len() > 1 {
|
if heights.len() > 1 {
|
||||||
|
@ -258,14 +256,13 @@ impl HeaderSync {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"sync: history locators: len={}, shrunk={}",
|
"sync: history locators: len={}, shrunk={}",
|
||||||
self.history_locators.len(),
|
self.history_locators.len(),
|
||||||
shrunk_size
|
shrunk_size
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(LOGGER, "sync: locator: {:?}", locator);
|
debug!("sync: locator: {:?}", locator);
|
||||||
|
|
||||||
Ok(locator)
|
Ok(locator)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ use common::types::{Error, SyncState, SyncStatus};
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::Hashed;
|
||||||
use core::global;
|
use core::global;
|
||||||
use p2p::{self, Peer};
|
use p2p::{self, Peer};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Fast sync has 3 "states":
|
/// Fast sync has 3 "states":
|
||||||
/// * syncing headers
|
/// * syncing headers
|
||||||
|
@ -33,7 +32,6 @@ pub struct StateSync {
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
peers: Arc<p2p::Peers>,
|
peers: Arc<p2p::Peers>,
|
||||||
chain: Arc<chain::Chain>,
|
chain: Arc<chain::Chain>,
|
||||||
archive_mode: bool,
|
|
||||||
|
|
||||||
prev_fast_sync: Option<DateTime<Utc>>,
|
prev_fast_sync: Option<DateTime<Utc>>,
|
||||||
fast_sync_peer: Option<Arc<Peer>>,
|
fast_sync_peer: Option<Arc<Peer>>,
|
||||||
|
@ -44,13 +42,11 @@ impl StateSync {
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
peers: Arc<p2p::Peers>,
|
peers: Arc<p2p::Peers>,
|
||||||
chain: Arc<chain::Chain>,
|
chain: Arc<chain::Chain>,
|
||||||
archive_mode: bool,
|
|
||||||
) -> StateSync {
|
) -> StateSync {
|
||||||
StateSync {
|
StateSync {
|
||||||
sync_state,
|
sync_state,
|
||||||
peers,
|
peers,
|
||||||
chain,
|
chain,
|
||||||
archive_mode,
|
|
||||||
prev_fast_sync: None,
|
prev_fast_sync: None,
|
||||||
fast_sync_peer: None,
|
fast_sync_peer: None,
|
||||||
}
|
}
|
||||||
|
@ -65,8 +61,8 @@ impl StateSync {
|
||||||
head: &chain::Tip,
|
head: &chain::Tip,
|
||||||
highest_height: u64,
|
highest_height: u64,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let need_state_sync = !self.archive_mode
|
let need_state_sync =
|
||||||
&& highest_height.saturating_sub(head.height) > global::cut_through_horizon() as u64;
|
highest_height.saturating_sub(head.height) > global::cut_through_horizon() as u64;
|
||||||
if !need_state_sync {
|
if !need_state_sync {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -76,11 +72,8 @@ impl StateSync {
|
||||||
// check sync error
|
// check sync error
|
||||||
{
|
{
|
||||||
let clone = self.sync_state.sync_error();
|
let clone = self.sync_state.sync_error();
|
||||||
if let Some(ref sync_error) = *clone.read().unwrap() {
|
if let Some(ref sync_error) = *clone.read() {
|
||||||
error!(
|
error!("fast_sync: error = {:?}. restart fast sync", sync_error);
|
||||||
LOGGER,
|
|
||||||
"fast_sync: error = {:?}. restart fast sync", sync_error
|
|
||||||
);
|
|
||||||
sync_need_restart = true;
|
sync_need_restart = true;
|
||||||
}
|
}
|
||||||
drop(clone);
|
drop(clone);
|
||||||
|
@ -92,8 +85,8 @@ impl StateSync {
|
||||||
if !peer.is_connected() {
|
if !peer.is_connected() {
|
||||||
sync_need_restart = true;
|
sync_need_restart = true;
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
"fast_sync: peer connection lost: {:?}. restart",
|
||||||
"fast_sync: peer connection lost: {:?}. restart", peer.info.addr,
|
peer.info.addr,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,10 +103,7 @@ impl StateSync {
|
||||||
|
|
||||||
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
|
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
|
||||||
if download_timeout {
|
if download_timeout {
|
||||||
error!(
|
error!("fast_sync: TxHashsetDownload status timeout in 10 minutes!");
|
||||||
LOGGER,
|
|
||||||
"fast_sync: TxHashsetDownload status timeout in 10 minutes!"
|
|
||||||
);
|
|
||||||
self.sync_state
|
self.sync_state
|
||||||
.set_sync_error(Error::P2P(p2p::Error::Timeout));
|
.set_sync_error(Error::P2P(p2p::Error::Timeout));
|
||||||
}
|
}
|
||||||
|
@ -168,7 +158,6 @@ impl StateSync {
|
||||||
}
|
}
|
||||||
let bhash = txhashset_head.hash();
|
let bhash = txhashset_head.hash();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}",
|
"fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}",
|
||||||
header_head.height,
|
header_head.height,
|
||||||
header_head.last_block_h,
|
header_head.last_block_h,
|
||||||
|
@ -176,7 +165,7 @@ impl StateSync {
|
||||||
bhash
|
bhash
|
||||||
);
|
);
|
||||||
if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) {
|
if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) {
|
||||||
error!(LOGGER, "fast_sync: send_txhashset_request err! {:?}", e);
|
error!("fast_sync: send_txhashset_request err! {:?}", e);
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
return Ok(peer.clone());
|
return Ok(peer.clone());
|
||||||
|
|
|
@ -23,184 +23,188 @@ use core::pow::Difficulty;
|
||||||
use grin::sync::body_sync::BodySync;
|
use grin::sync::body_sync::BodySync;
|
||||||
use grin::sync::header_sync::HeaderSync;
|
use grin::sync::header_sync::HeaderSync;
|
||||||
use grin::sync::state_sync::StateSync;
|
use grin::sync::state_sync::StateSync;
|
||||||
use p2p::{self, Peers};
|
use p2p;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub fn run_sync(
|
pub fn run_sync(
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
awaiting_peers: Arc<AtomicBool>,
|
|
||||||
peers: Arc<p2p::Peers>,
|
peers: Arc<p2p::Peers>,
|
||||||
chain: Arc<chain::Chain>,
|
chain: Arc<chain::Chain>,
|
||||||
skip_sync_wait: bool,
|
|
||||||
archive_mode: bool,
|
|
||||||
stop: Arc<AtomicBool>,
|
stop: Arc<AtomicBool>,
|
||||||
) {
|
) {
|
||||||
let _ = thread::Builder::new()
|
let _ = thread::Builder::new()
|
||||||
.name("sync".to_string())
|
.name("sync".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
sync_loop(
|
let runner = SyncRunner::new(sync_state, peers, chain, stop);
|
||||||
sync_state,
|
runner.sync_loop();
|
||||||
awaiting_peers,
|
|
||||||
peers,
|
|
||||||
chain,
|
|
||||||
skip_sync_wait,
|
|
||||||
archive_mode,
|
|
||||||
stop,
|
|
||||||
)
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn wait_for_min_peers(
|
pub struct SyncRunner {
|
||||||
awaiting_peers: Arc<AtomicBool>,
|
|
||||||
peers: Arc<p2p::Peers>,
|
|
||||||
chain: Arc<chain::Chain>,
|
|
||||||
skip_sync_wait: bool,
|
|
||||||
) {
|
|
||||||
// Initial sleep to give us time to peer with some nodes.
|
|
||||||
// Note: Even if we have "skip_sync_wait" we need to wait a
|
|
||||||
// short period of time for tests to do the right thing.
|
|
||||||
let wait_secs = if skip_sync_wait { 3 } else { 30 };
|
|
||||||
|
|
||||||
let head = chain.head().unwrap();
|
|
||||||
|
|
||||||
awaiting_peers.store(true, Ordering::Relaxed);
|
|
||||||
let mut n = 0;
|
|
||||||
const MIN_PEERS: usize = 3;
|
|
||||||
loop {
|
|
||||||
let wp = peers.more_work_peers();
|
|
||||||
// exit loop when:
|
|
||||||
// * we have more than MIN_PEERS more_work peers
|
|
||||||
// * we are synced already, e.g. grin was quickly restarted
|
|
||||||
// * timeout
|
|
||||||
if wp.len() > MIN_PEERS
|
|
||||||
|| (wp.len() == 0 && peers.enough_peers() && head.total_difficulty > Difficulty::zero())
|
|
||||||
|| n > wait_secs
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
thread::sleep(time::Duration::from_secs(1));
|
|
||||||
n += 1;
|
|
||||||
}
|
|
||||||
awaiting_peers.store(false, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Starts the syncing loop, just spawns two threads that loop forever
|
|
||||||
fn sync_loop(
|
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
awaiting_peers: Arc<AtomicBool>,
|
|
||||||
peers: Arc<p2p::Peers>,
|
peers: Arc<p2p::Peers>,
|
||||||
chain: Arc<chain::Chain>,
|
chain: Arc<chain::Chain>,
|
||||||
skip_sync_wait: bool,
|
|
||||||
archive_mode: bool,
|
|
||||||
stop: Arc<AtomicBool>,
|
stop: Arc<AtomicBool>,
|
||||||
) {
|
|
||||||
// Wait for connections reach at least MIN_PEERS
|
|
||||||
wait_for_min_peers(awaiting_peers, peers.clone(), chain.clone(), skip_sync_wait);
|
|
||||||
|
|
||||||
// Our 3 main sync stages
|
|
||||||
let mut header_sync = HeaderSync::new(sync_state.clone(), peers.clone(), chain.clone());
|
|
||||||
let mut body_sync = BodySync::new(sync_state.clone(), peers.clone(), chain.clone());
|
|
||||||
let mut state_sync = StateSync::new(
|
|
||||||
sync_state.clone(),
|
|
||||||
peers.clone(),
|
|
||||||
chain.clone(),
|
|
||||||
archive_mode,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Highest height seen on the network, generally useful for a fast test on
|
|
||||||
// whether some sync is needed
|
|
||||||
let mut highest_height = 0;
|
|
||||||
|
|
||||||
// Main syncing loop
|
|
||||||
while !stop.load(Ordering::Relaxed) {
|
|
||||||
thread::sleep(time::Duration::from_millis(10));
|
|
||||||
|
|
||||||
// check whether syncing is generally needed, when we compare our state with others
|
|
||||||
let (syncing, most_work_height) =
|
|
||||||
needs_syncing(sync_state.as_ref(), peers.clone(), chain.clone());
|
|
||||||
|
|
||||||
if most_work_height > 0 {
|
|
||||||
// we can occasionally get a most work height of 0 if read locks fail
|
|
||||||
highest_height = most_work_height;
|
|
||||||
}
|
|
||||||
|
|
||||||
// quick short-circuit (and a decent sleep) if no syncing is needed
|
|
||||||
if !syncing {
|
|
||||||
sync_state.update(SyncStatus::NoSync);
|
|
||||||
thread::sleep(time::Duration::from_secs(10));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if syncing is needed
|
|
||||||
let head = chain.head().unwrap();
|
|
||||||
let header_head = chain.header_head().unwrap();
|
|
||||||
|
|
||||||
// run each sync stage, each of them deciding whether they're needed
|
|
||||||
// except for body sync that only runs if state sync is off or done
|
|
||||||
header_sync.check_run(&header_head, highest_height);
|
|
||||||
if !state_sync.check_run(&header_head, &head, highest_height) {
|
|
||||||
body_sync.check_run(&head, highest_height);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether we're currently syncing the chain or we're fully caught up and
|
impl SyncRunner {
|
||||||
/// just receiving blocks through gossip.
|
fn new(
|
||||||
fn needs_syncing(
|
sync_state: Arc<SyncState>,
|
||||||
sync_state: &SyncState,
|
peers: Arc<p2p::Peers>,
|
||||||
peers: Arc<Peers>,
|
chain: Arc<chain::Chain>,
|
||||||
chain: Arc<chain::Chain>,
|
stop: Arc<AtomicBool>,
|
||||||
) -> (bool, u64) {
|
) -> SyncRunner {
|
||||||
let local_diff = chain.head().unwrap().total_difficulty;
|
SyncRunner {
|
||||||
let peer = peers.most_work_peer();
|
sync_state,
|
||||||
let is_syncing = sync_state.is_syncing();
|
peers,
|
||||||
let mut most_work_height = 0;
|
chain,
|
||||||
|
stop,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// if we're already syncing, we're caught up if no peer has a higher
|
fn wait_for_min_peers(&self) {
|
||||||
// difficulty than us
|
// Initial sleep to give us time to peer with some nodes.
|
||||||
if is_syncing {
|
// Note: Even if we have skip peer wait we need to wait a
|
||||||
if let Some(peer) = peer {
|
// short period of time for tests to do the right thing.
|
||||||
most_work_height = peer.info.height();
|
let wait_secs = if let SyncStatus::AwaitingPeers(true) = self.sync_state.status() {
|
||||||
if peer.info.total_difficulty() <= local_diff {
|
30
|
||||||
let ch = chain.head().unwrap();
|
} else {
|
||||||
info!(
|
3
|
||||||
LOGGER,
|
};
|
||||||
"synchronized at {} @ {} [{}]",
|
|
||||||
local_diff.to_num(),
|
|
||||||
ch.height,
|
|
||||||
ch.last_block_h
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ = chain.reset_head();
|
let head = self.chain.head().unwrap();
|
||||||
return (false, most_work_height);
|
|
||||||
|
let mut n = 0;
|
||||||
|
const MIN_PEERS: usize = 3;
|
||||||
|
loop {
|
||||||
|
let wp = self.peers.more_work_peers();
|
||||||
|
// exit loop when:
|
||||||
|
// * we have more than MIN_PEERS more_work peers
|
||||||
|
// * we are synced already, e.g. grin was quickly restarted
|
||||||
|
// * timeout
|
||||||
|
if wp.len() > MIN_PEERS
|
||||||
|
|| (wp.len() == 0
|
||||||
|
&& self.peers.enough_peers()
|
||||||
|
&& head.total_difficulty > Difficulty::zero())
|
||||||
|
|| n > wait_secs
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread::sleep(time::Duration::from_secs(1));
|
||||||
|
n += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts the syncing loop, just spawns two threads that loop forever
|
||||||
|
fn sync_loop(&self) {
|
||||||
|
// Wait for connections reach at least MIN_PEERS
|
||||||
|
self.wait_for_min_peers();
|
||||||
|
|
||||||
|
// Our 3 main sync stages
|
||||||
|
let mut header_sync = HeaderSync::new(
|
||||||
|
self.sync_state.clone(),
|
||||||
|
self.peers.clone(),
|
||||||
|
self.chain.clone(),
|
||||||
|
);
|
||||||
|
let mut body_sync = BodySync::new(
|
||||||
|
self.sync_state.clone(),
|
||||||
|
self.peers.clone(),
|
||||||
|
self.chain.clone(),
|
||||||
|
);
|
||||||
|
let mut state_sync = StateSync::new(
|
||||||
|
self.sync_state.clone(),
|
||||||
|
self.peers.clone(),
|
||||||
|
self.chain.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Highest height seen on the network, generally useful for a fast test on
|
||||||
|
// whether some sync is needed
|
||||||
|
let mut highest_height = 0;
|
||||||
|
|
||||||
|
// Main syncing loop
|
||||||
|
while !self.stop.load(Ordering::Relaxed) {
|
||||||
|
thread::sleep(time::Duration::from_millis(10));
|
||||||
|
|
||||||
|
// check whether syncing is generally needed, when we compare our state with others
|
||||||
|
let (syncing, most_work_height) = self.needs_syncing();
|
||||||
|
|
||||||
|
if most_work_height > 0 {
|
||||||
|
// we can occasionally get a most work height of 0 if read locks fail
|
||||||
|
highest_height = most_work_height;
|
||||||
|
}
|
||||||
|
|
||||||
|
// quick short-circuit (and a decent sleep) if no syncing is needed
|
||||||
|
if !syncing {
|
||||||
|
self.sync_state.update(SyncStatus::NoSync);
|
||||||
|
thread::sleep(time::Duration::from_secs(10));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if syncing is needed
|
||||||
|
let head = self.chain.head().unwrap();
|
||||||
|
let header_head = self.chain.header_head().unwrap();
|
||||||
|
|
||||||
|
// run each sync stage, each of them deciding whether they're needed
|
||||||
|
// except for body sync that only runs if state sync is off or done
|
||||||
|
header_sync.check_run(&header_head, highest_height);
|
||||||
|
if !state_sync.check_run(&header_head, &head, highest_height) {
|
||||||
|
body_sync.check_run(&head, highest_height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether we're currently syncing the chain or we're fully caught up and
|
||||||
|
/// just receiving blocks through gossip.
|
||||||
|
fn needs_syncing(&self) -> (bool, u64) {
|
||||||
|
let local_diff = self.chain.head().unwrap().total_difficulty;
|
||||||
|
let peer = self.peers.most_work_peer();
|
||||||
|
let is_syncing = self.sync_state.is_syncing();
|
||||||
|
let mut most_work_height = 0;
|
||||||
|
|
||||||
|
// if we're already syncing, we're caught up if no peer has a higher
|
||||||
|
// difficulty than us
|
||||||
|
if is_syncing {
|
||||||
|
if let Some(peer) = peer {
|
||||||
|
most_work_height = peer.info.height();
|
||||||
|
if peer.info.total_difficulty() <= local_diff {
|
||||||
|
let ch = self.chain.head().unwrap();
|
||||||
|
info!(
|
||||||
|
"synchronized at {} @ {} [{}]",
|
||||||
|
local_diff.to_num(),
|
||||||
|
ch.height,
|
||||||
|
ch.last_block_h
|
||||||
|
);
|
||||||
|
|
||||||
|
let _ = self.chain.reset_head();
|
||||||
|
return (false, most_work_height);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("sync: no peers available, disabling sync");
|
||||||
|
return (false, 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
warn!(LOGGER, "sync: no peers available, disabling sync");
|
if let Some(peer) = peer {
|
||||||
return (false, 0);
|
most_work_height = peer.info.height();
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if let Some(peer) = peer {
|
|
||||||
most_work_height = peer.info.height();
|
|
||||||
|
|
||||||
// sum the last 5 difficulties to give us the threshold
|
// sum the last 5 difficulties to give us the threshold
|
||||||
let threshold = chain
|
let threshold = self
|
||||||
.difficulty_iter()
|
.chain
|
||||||
.map(|x| x.difficulty)
|
.difficulty_iter()
|
||||||
.take(5)
|
.map(|x| x.difficulty)
|
||||||
.fold(Difficulty::zero(), |sum, val| sum + val);
|
.take(5)
|
||||||
|
.fold(Difficulty::zero(), |sum, val| sum + val);
|
||||||
|
|
||||||
let peer_diff = peer.info.total_difficulty();
|
let peer_diff = peer.info.total_difficulty();
|
||||||
if peer_diff > local_diff.clone() + threshold.clone() {
|
if peer_diff > local_diff.clone() + threshold.clone() {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
|
||||||
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
|
local_diff,
|
||||||
local_diff,
|
peer_diff,
|
||||||
peer_diff,
|
threshold,
|
||||||
threshold,
|
);
|
||||||
);
|
return (true, most_work_height);
|
||||||
return (true, most_work_height);
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
(is_syncing, most_work_height)
|
||||||
}
|
}
|
||||||
(is_syncing, most_work_height)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
|
|
||||||
extern crate grin_api as api;
|
extern crate grin_api as api;
|
||||||
|
|
|
@ -17,9 +17,10 @@
|
||||||
|
|
||||||
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
|
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain;
|
use chain;
|
||||||
use common::types::Error;
|
use common::types::Error;
|
||||||
|
@ -27,7 +28,7 @@ use core::core::verifier_cache::VerifierCache;
|
||||||
use core::{consensus, core, ser};
|
use core::{consensus, core, ser};
|
||||||
use keychain::{ExtKeychain, Identifier, Keychain};
|
use keychain::{ExtKeychain, Identifier, Keychain};
|
||||||
use pool;
|
use pool;
|
||||||
use util::{self, LOGGER};
|
use util;
|
||||||
use wallet::{self, BlockFees};
|
use wallet::{self, BlockFees};
|
||||||
|
|
||||||
// Ensure a block suitable for mining is built and returned
|
// Ensure a block suitable for mining is built and returned
|
||||||
|
@ -54,24 +55,22 @@ pub fn get_block(
|
||||||
self::Error::Chain(c) => match c.kind() {
|
self::Error::Chain(c) => match c.kind() {
|
||||||
chain::ErrorKind::DuplicateCommitment(_) => {
|
chain::ErrorKind::DuplicateCommitment(_) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Duplicate commit for potential coinbase detected. Trying next derivation."
|
"Duplicate commit for potential coinbase detected. Trying next derivation."
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
error!(LOGGER, "Chain Error: {}", c);
|
error!("Chain Error: {}", c);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
self::Error::Wallet(_) => {
|
self::Error::Wallet(_) => {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"Error building new block: Can't connect to wallet listener at {:?}; will retry",
|
"Error building new block: Can't connect to wallet listener at {:?}; will retry",
|
||||||
wallet_listener_url.as_ref().unwrap()
|
wallet_listener_url.as_ref().unwrap()
|
||||||
);
|
);
|
||||||
thread::sleep(Duration::from_secs(wallet_retry_interval));
|
thread::sleep(Duration::from_secs(wallet_retry_interval));
|
||||||
}
|
}
|
||||||
ae => {
|
ae => {
|
||||||
warn!(LOGGER, "Error building new block: {:?}. Retrying.", ae);
|
warn!("Error building new block: {:?}. Retrying.", ae);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
thread::sleep(Duration::from_millis(100));
|
thread::sleep(Duration::from_millis(100));
|
||||||
|
@ -106,15 +105,10 @@ fn build_block(
|
||||||
|
|
||||||
// Determine the difficulty our block should be at.
|
// Determine the difficulty our block should be at.
|
||||||
// Note: do not keep the difficulty_iter in scope (it has an active batch).
|
// Note: do not keep the difficulty_iter in scope (it has an active batch).
|
||||||
let difficulty = consensus::next_difficulty(1, chain.difficulty_iter());
|
let difficulty = consensus::next_difficulty(head.height + 1, chain.difficulty_iter());
|
||||||
|
|
||||||
// extract current transaction from the pool
|
// extract current transaction from the pool
|
||||||
// TODO - we have a lot of unwrap() going on in this fn...
|
let txs = tx_pool.read().prepare_mineable_transactions()?;
|
||||||
let txs = tx_pool
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.prepare_mineable_transactions()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// build the coinbase and the block itself
|
// build the coinbase and the block itself
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
|
@ -137,7 +131,6 @@ fn build_block(
|
||||||
|
|
||||||
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
|
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
|
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
|
||||||
b.inputs().len(),
|
b.inputs().len(),
|
||||||
b.outputs().len(),
|
b.outputs().len(),
|
||||||
|
@ -162,10 +155,7 @@ fn build_block(
|
||||||
|
|
||||||
//Some other issue, possibly duplicate kernel
|
//Some other issue, possibly duplicate kernel
|
||||||
_ => {
|
_ => {
|
||||||
error!(
|
error!("Error setting txhashset root to build a block: {:?}", e);
|
||||||
LOGGER,
|
|
||||||
"Error setting txhashset root to build a block: {:?}", e
|
|
||||||
);
|
|
||||||
Err(Error::Chain(
|
Err(Error::Chain(
|
||||||
chain::ErrorKind::Other(format!("{:?}", e)).into(),
|
chain::ErrorKind::Other(format!("{:?}", e)).into(),
|
||||||
))
|
))
|
||||||
|
@ -179,7 +169,7 @@ fn build_block(
|
||||||
/// Probably only want to do this when testing.
|
/// Probably only want to do this when testing.
|
||||||
///
|
///
|
||||||
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
|
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
|
||||||
warn!(LOGGER, "Burning block fees: {:?}", block_fees);
|
warn!("Burning block fees: {:?}", block_fees);
|
||||||
let keychain = ExtKeychain::from_random_seed().unwrap();
|
let keychain = ExtKeychain::from_random_seed().unwrap();
|
||||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||||
let (out, kernel) =
|
let (out, kernel) =
|
||||||
|
@ -212,7 +202,7 @@ fn get_coinbase(
|
||||||
..block_fees
|
..block_fees
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(LOGGER, "get_coinbase: {:?}", block_fees);
|
debug!("get_coinbase: {:?}", block_fees);
|
||||||
return Ok((output, kernel, block_fees));
|
return Ok((output, kernel, block_fees));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,10 @@ use serde_json::Value;
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::io::{BufRead, ErrorKind, Write};
|
use std::io::{BufRead, ErrorKind, Write};
|
||||||
use std::net::{TcpListener, TcpStream};
|
use std::net::{TcpListener, TcpStream};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
use std::{cmp, thread};
|
use std::{cmp, thread};
|
||||||
|
use util::{Mutex, RwLock};
|
||||||
|
|
||||||
use chain;
|
use chain;
|
||||||
use common::stats::{StratumStats, WorkerStats};
|
use common::stats::{StratumStats, WorkerStats};
|
||||||
|
@ -34,7 +35,7 @@ use core::{pow, ser};
|
||||||
use keychain;
|
use keychain;
|
||||||
use mining::mine_block;
|
use mining::mine_block;
|
||||||
use pool;
|
use pool;
|
||||||
use util::{self, LOGGER};
|
use util;
|
||||||
|
|
||||||
// ----------------------------------------
|
// ----------------------------------------
|
||||||
// http://www.jsonrpc.org/specification
|
// http://www.jsonrpc.org/specification
|
||||||
|
@ -113,7 +114,6 @@ fn accept_workers(
|
||||||
match stream {
|
match stream {
|
||||||
Ok(stream) => {
|
Ok(stream) => {
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) New connection: {}",
|
"(Server ID: {}) New connection: {}",
|
||||||
id,
|
id,
|
||||||
stream.peer_addr().unwrap()
|
stream.peer_addr().unwrap()
|
||||||
|
@ -122,22 +122,19 @@ fn accept_workers(
|
||||||
.set_nonblocking(true)
|
.set_nonblocking(true)
|
||||||
.expect("set_nonblocking call failed");
|
.expect("set_nonblocking call failed");
|
||||||
let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream));
|
let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream));
|
||||||
workers.lock().unwrap().push(worker);
|
workers.lock().push(worker);
|
||||||
// stats for this worker (worker stat objects are added and updated but never
|
// stats for this worker (worker stat objects are added and updated but never
|
||||||
// removed)
|
// removed)
|
||||||
let mut worker_stats = WorkerStats::default();
|
let mut worker_stats = WorkerStats::default();
|
||||||
worker_stats.is_connected = true;
|
worker_stats.is_connected = true;
|
||||||
worker_stats.id = worker_id.to_string();
|
worker_stats.id = worker_id.to_string();
|
||||||
worker_stats.pow_difficulty = 1; // XXX TODO
|
worker_stats.pow_difficulty = 1; // XXX TODO
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
stratum_stats.worker_stats.push(worker_stats);
|
stratum_stats.worker_stats.push(worker_stats);
|
||||||
worker_id = worker_id + 1;
|
worker_id = worker_id + 1;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!("(Server ID: {}) Error accepting connection: {:?}", id, e);
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Error accepting connection: {:?}", id, e
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -184,8 +181,8 @@ impl Worker {
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
"(Server ID: {}) Error in connection with stratum client: {}",
|
||||||
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
|
self.id, e
|
||||||
);
|
);
|
||||||
self.error = true;
|
self.error = true;
|
||||||
return None;
|
return None;
|
||||||
|
@ -205,16 +202,16 @@ impl Worker {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
"(Server ID: {}) Error in connection with stratum client: {}",
|
||||||
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
|
self.id, e
|
||||||
);
|
);
|
||||||
self.error = true;
|
self.error = true;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
"(Server ID: {}) Error in connection with stratum client: {}",
|
||||||
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
|
self.id, e
|
||||||
);
|
);
|
||||||
self.error = true;
|
self.error = true;
|
||||||
return;
|
return;
|
||||||
|
@ -285,7 +282,7 @@ impl StratumServer {
|
||||||
|
|
||||||
// Handle an RPC request message from the worker(s)
|
// Handle an RPC request message from the worker(s)
|
||||||
fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) {
|
fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) {
|
||||||
let mut workers_l = self.workers.lock().unwrap();
|
let mut workers_l = self.workers.lock();
|
||||||
for num in 0..workers_l.len() {
|
for num in 0..workers_l.len() {
|
||||||
match workers_l[num].read_message() {
|
match workers_l[num].read_message() {
|
||||||
Some(the_message) => {
|
Some(the_message) => {
|
||||||
|
@ -295,7 +292,6 @@ impl StratumServer {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// not a valid JSON RpcRequest - disconnect the worker
|
// not a valid JSON RpcRequest - disconnect the worker
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Failed to parse JSONRpc: {} - {:?}",
|
"(Server ID: {}) Failed to parse JSONRpc: {} - {:?}",
|
||||||
self.id,
|
self.id,
|
||||||
e.description(),
|
e.description(),
|
||||||
|
@ -306,7 +302,7 @@ impl StratumServer {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
let worker_stats_id = stratum_stats
|
let worker_stats_id = stratum_stats
|
||||||
.worker_stats
|
.worker_stats
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -408,11 +404,8 @@ impl StratumServer {
|
||||||
let job_template = self.build_block_template();
|
let job_template = self.build_block_template();
|
||||||
let response = serde_json::to_value(&job_template).unwrap();
|
let response = serde_json::to_value(&job_template).unwrap();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) sending block {} with id {} to single worker",
|
"(Server ID: {}) sending block {} with id {} to single worker",
|
||||||
self.id,
|
self.id, job_template.height, job_template.job_id,
|
||||||
job_template.height,
|
|
||||||
job_template.job_id,
|
|
||||||
);
|
);
|
||||||
return Ok(response);
|
return Ok(response);
|
||||||
}
|
}
|
||||||
|
@ -451,8 +444,8 @@ impl StratumServer {
|
||||||
if params.height != self.current_block_versions.last().unwrap().header.height {
|
if params.height != self.current_block_versions.last().unwrap().header.height {
|
||||||
// Return error status
|
// Return error status
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"(Server ID: {}) Share at height {} submitted too late",
|
||||||
"(Server ID: {}) Share at height {} submitted too late", self.id, params.height,
|
self.id, params.height,
|
||||||
);
|
);
|
||||||
worker_stats.num_stale += 1;
|
worker_stats.num_stale += 1;
|
||||||
let e = RpcError {
|
let e = RpcError {
|
||||||
|
@ -466,11 +459,8 @@ impl StratumServer {
|
||||||
if b.is_none() {
|
if b.is_none() {
|
||||||
// Return error status
|
// Return error status
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}",
|
"(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}",
|
||||||
self.id,
|
self.id, params.height, params.job_id,
|
||||||
params.height,
|
|
||||||
params.job_id,
|
|
||||||
);
|
);
|
||||||
worker_stats.num_rejected += 1;
|
worker_stats.num_rejected += 1;
|
||||||
let e = RpcError {
|
let e = RpcError {
|
||||||
|
@ -490,11 +480,8 @@ impl StratumServer {
|
||||||
if share_difficulty < self.minimum_share_difficulty {
|
if share_difficulty < self.minimum_share_difficulty {
|
||||||
// Return error status
|
// Return error status
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Share rejected due to low difficulty: {}/{}",
|
"(Server ID: {}) Share rejected due to low difficulty: {}/{}",
|
||||||
self.id,
|
self.id, share_difficulty, self.minimum_share_difficulty,
|
||||||
share_difficulty,
|
|
||||||
self.minimum_share_difficulty,
|
|
||||||
);
|
);
|
||||||
worker_stats.num_rejected += 1;
|
worker_stats.num_rejected += 1;
|
||||||
let e = RpcError {
|
let e = RpcError {
|
||||||
|
@ -510,7 +497,6 @@ impl StratumServer {
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
// Return error status
|
// Return error status
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Failed to validate solution at height {}: {}: {}",
|
"(Server ID: {}) Failed to validate solution at height {}: {}: {}",
|
||||||
self.id,
|
self.id,
|
||||||
params.height,
|
params.height,
|
||||||
|
@ -527,15 +513,14 @@ impl StratumServer {
|
||||||
share_is_block = true;
|
share_is_block = true;
|
||||||
// Log message to make it obvious we found a block
|
// Log message to make it obvious we found a block
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
"(Server ID: {}) Solution Found for block {} - Yay!!!",
|
||||||
"(Server ID: {}) Solution Found for block {} - Yay!!!", self.id, params.height
|
self.id, params.height
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
// Do some validation but dont submit
|
// Do some validation but dont submit
|
||||||
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
|
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
|
||||||
// Return error status
|
// Return error status
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
|
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
|
||||||
self.id,
|
self.id,
|
||||||
params.height,
|
params.height,
|
||||||
|
@ -556,7 +541,6 @@ impl StratumServer {
|
||||||
Some(login) => login.clone(),
|
Some(login) => login.clone(),
|
||||||
};
|
};
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}",
|
"(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}",
|
||||||
self.id,
|
self.id,
|
||||||
b.hash(),
|
b.hash(),
|
||||||
|
@ -582,18 +566,16 @@ impl StratumServer {
|
||||||
// Purge dead/sick workers - remove all workers marked in error state
|
// Purge dead/sick workers - remove all workers marked in error state
|
||||||
fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize {
|
fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize {
|
||||||
let mut start = 0;
|
let mut start = 0;
|
||||||
let mut workers_l = self.workers.lock().unwrap();
|
let mut workers_l = self.workers.lock();
|
||||||
loop {
|
loop {
|
||||||
for num in start..workers_l.len() {
|
for num in start..workers_l.len() {
|
||||||
if workers_l[num].error == true {
|
if workers_l[num].error == true {
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
"(Server ID: {}) Dropping worker: {}",
|
||||||
"(Server ID: {}) Dropping worker: {}",
|
self.id, workers_l[num].id
|
||||||
self.id,
|
);
|
||||||
workers_l[num].id;
|
|
||||||
);
|
|
||||||
// Update worker stats
|
// Update worker stats
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
let worker_stats_id = stratum_stats
|
let worker_stats_id = stratum_stats
|
||||||
.worker_stats
|
.worker_stats
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -607,7 +589,7 @@ impl StratumServer {
|
||||||
start = num + 1;
|
start = num + 1;
|
||||||
}
|
}
|
||||||
if start >= workers_l.len() {
|
if start >= workers_l.len() {
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
stratum_stats.num_workers = workers_l.len();
|
stratum_stats.num_workers = workers_l.len();
|
||||||
return stratum_stats.num_workers;
|
return stratum_stats.num_workers;
|
||||||
}
|
}
|
||||||
|
@ -630,16 +612,13 @@ impl StratumServer {
|
||||||
};
|
};
|
||||||
let job_request_json = serde_json::to_string(&job_request).unwrap();
|
let job_request_json = serde_json::to_string(&job_request).unwrap();
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) sending block {} with id {} to stratum clients",
|
"(Server ID: {}) sending block {} with id {} to stratum clients",
|
||||||
self.id,
|
self.id, job_template.height, job_template.job_id,
|
||||||
job_template.height,
|
|
||||||
job_template.job_id,
|
|
||||||
);
|
);
|
||||||
// Push the new block to all connected clients
|
// Push the new block to all connected clients
|
||||||
// NOTE: We do not give a unique nonce (should we?) so miners need
|
// NOTE: We do not give a unique nonce (should we?) so miners need
|
||||||
// to choose one for themselves
|
// to choose one for themselves
|
||||||
let mut workers_l = self.workers.lock().unwrap();
|
let mut workers_l = self.workers.lock();
|
||||||
for num in 0..workers_l.len() {
|
for num in 0..workers_l.len() {
|
||||||
workers_l[num].write_message(job_request_json.clone());
|
workers_l[num].write_message(job_request_json.clone());
|
||||||
}
|
}
|
||||||
|
@ -658,11 +637,8 @@ impl StratumServer {
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
) {
|
) {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
|
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
|
||||||
self.id,
|
self.id, edge_bits, proof_size
|
||||||
edge_bits,
|
|
||||||
proof_size
|
|
||||||
);
|
);
|
||||||
|
|
||||||
self.sync_state = sync_state;
|
self.sync_state = sync_state;
|
||||||
|
@ -691,13 +667,12 @@ impl StratumServer {
|
||||||
|
|
||||||
// We have started
|
// We have started
|
||||||
{
|
{
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
stratum_stats.is_running = true;
|
stratum_stats.is_running = true;
|
||||||
stratum_stats.edge_bits = edge_bits as u16;
|
stratum_stats.edge_bits = edge_bits as u16;
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
|
||||||
"Stratum server started on {}",
|
"Stratum server started on {}",
|
||||||
self.config.stratum_server_addr.clone().unwrap()
|
self.config.stratum_server_addr.clone().unwrap()
|
||||||
);
|
);
|
||||||
|
@ -753,7 +728,7 @@ impl StratumServer {
|
||||||
deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
|
deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut stratum_stats = stratum_stats.write().unwrap();
|
let mut stratum_stats = stratum_stats.write();
|
||||||
stratum_stats.block_height = new_block.header.height;
|
stratum_stats.block_height = new_block.header.height;
|
||||||
stratum_stats.network_difficulty = self.current_difficulty;
|
stratum_stats.network_difficulty = self.current_difficulty;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,8 @@
|
||||||
|
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
|
use util::RwLock;
|
||||||
|
|
||||||
use chain;
|
use chain;
|
||||||
use common::types::StratumServerConfig;
|
use common::types::StratumServerConfig;
|
||||||
|
@ -30,7 +31,6 @@ use core::global;
|
||||||
use core::pow::PoWContext;
|
use core::pow::PoWContext;
|
||||||
use mining::mine_block;
|
use mining::mine_block;
|
||||||
use pool;
|
use pool;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub struct Miner {
|
pub struct Miner {
|
||||||
config: StratumServerConfig,
|
config: StratumServerConfig,
|
||||||
|
@ -84,7 +84,6 @@ impl Miner {
|
||||||
let deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
|
let deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
|
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
|
||||||
self.debug_output_id,
|
self.debug_output_id,
|
||||||
global::min_edge_bits(),
|
global::min_edge_bits(),
|
||||||
|
@ -115,10 +114,8 @@ impl Miner {
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) No solution found after {} iterations, continuing...",
|
"(Server ID: {}) No solution found after {} iterations, continuing...",
|
||||||
self.debug_output_id,
|
self.debug_output_id, iter_count
|
||||||
iter_count
|
|
||||||
);
|
);
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
@ -127,8 +124,8 @@ impl Miner {
|
||||||
/// chain anytime required and looking for PoW solution.
|
/// chain anytime required and looking for PoW solution.
|
||||||
pub fn run_loop(&self, wallet_listener_url: Option<String>) {
|
pub fn run_loop(&self, wallet_listener_url: Option<String>) {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
"(Server ID: {}) Starting test miner loop.",
|
||||||
"(Server ID: {}) Starting test miner loop.", self.debug_output_id
|
self.debug_output_id
|
||||||
);
|
);
|
||||||
|
|
||||||
// iteration, we keep the returned derivation to provide it back when
|
// iteration, we keep the returned derivation to provide it back when
|
||||||
|
@ -136,7 +133,7 @@ impl Miner {
|
||||||
let mut key_id = None;
|
let mut key_id = None;
|
||||||
|
|
||||||
while !self.stop.load(Ordering::Relaxed) {
|
while !self.stop.load(Ordering::Relaxed) {
|
||||||
trace!(LOGGER, "in miner loop. key_id: {:?}", key_id);
|
trace!("in miner loop. key_id: {:?}", key_id);
|
||||||
|
|
||||||
// get the latest chain state and build a block on top of it
|
// get the latest chain state and build a block on top of it
|
||||||
let head = self.chain.head_header().unwrap();
|
let head = self.chain.head_header().unwrap();
|
||||||
|
@ -160,7 +157,6 @@ impl Miner {
|
||||||
// we found a solution, push our block through the chain processing pipeline
|
// we found a solution, push our block through the chain processing pipeline
|
||||||
if sol {
|
if sol {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Found valid proof of work, adding block {}.",
|
"(Server ID: {}) Found valid proof of work, adding block {}.",
|
||||||
self.debug_output_id,
|
self.debug_output_id,
|
||||||
b.hash()
|
b.hash()
|
||||||
|
@ -168,26 +164,21 @@ impl Miner {
|
||||||
let res = self.chain.process_block(b, chain::Options::MINE);
|
let res = self.chain.process_block(b, chain::Options::MINE);
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) Error validating mined block: {:?}",
|
"(Server ID: {}) Error validating mined block: {:?}",
|
||||||
self.debug_output_id,
|
self.debug_output_id, e
|
||||||
e
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
trace!(LOGGER, "resetting key_id in miner to None");
|
trace!("resetting key_id in miner to None");
|
||||||
key_id = None;
|
key_id = None;
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
"setting pubkey in miner to pubkey from block_fees - {:?}",
|
||||||
"setting pubkey in miner to pubkey from block_fees - {:?}", block_fees
|
block_fees
|
||||||
);
|
);
|
||||||
key_id = block_fees.key_id();
|
key_id = block_fees.key_id();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
info!("(Server ID: {}) test miner exit.", self.debug_output_id);
|
||||||
LOGGER,
|
|
||||||
"(Server ID: {}) test miner exit.", self.debug_output_id
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,6 @@ use std::env;
|
||||||
use std::io::Error;
|
use std::io::Error;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Future returned from `MainService`.
|
/// Future returned from `MainService`.
|
||||||
enum MainFuture {
|
enum MainFuture {
|
||||||
Root,
|
Root,
|
||||||
|
@ -94,10 +92,7 @@ pub fn start_webwallet_server() {
|
||||||
let server = Server::bind(&addr)
|
let server = Server::bind(&addr)
|
||||||
.serve(|| future::ok::<_, Error>(MainService::new()))
|
.serve(|| future::ok::<_, Error>(MainService::new()))
|
||||||
.map_err(|e| eprintln!("server error: {}", e));
|
.map_err(|e| eprintln!("server error: {}", e));
|
||||||
warn!(
|
warn!("Grin Web-Wallet Application is running at http://{}/", addr);
|
||||||
LOGGER,
|
|
||||||
"Grin Web-Wallet Application is running at http://{}/", addr
|
|
||||||
);
|
|
||||||
rt::run(server);
|
rt::run(server);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
|
|
||||||
extern crate grin_api as api;
|
extern crate grin_api as api;
|
||||||
extern crate grin_chain as chain;
|
extern crate grin_chain as chain;
|
||||||
|
@ -26,18 +26,20 @@ extern crate grin_wallet as wallet;
|
||||||
|
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use core::global::{self, ChainTypes};
|
use core::global::{self, ChainTypes};
|
||||||
|
|
||||||
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||||
use util::{init_test_logger, LOGGER};
|
use util::init_test_logger;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn simple_server_wallet() {
|
fn simple_server_wallet() {
|
||||||
init_test_logger();
|
init_test_logger();
|
||||||
info!(LOGGER, "starting simple_server_wallet");
|
info!("starting simple_server_wallet");
|
||||||
|
let test_name_dir = "test_servers";
|
||||||
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
||||||
|
|
||||||
// Run a separate coinbase wallet for coinbase transactions
|
// Run a separate coinbase wallet for coinbase transactions
|
||||||
|
@ -52,7 +54,7 @@ fn simple_server_wallet() {
|
||||||
));
|
));
|
||||||
|
|
||||||
let _ = thread::spawn(move || {
|
let _ = thread::spawn(move || {
|
||||||
let mut w = coinbase_wallet.lock().unwrap();
|
let mut w = coinbase_wallet.lock();
|
||||||
w.run_wallet(0);
|
w.run_wallet(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -81,11 +83,11 @@ fn simple_server_wallet() {
|
||||||
let base_addr = server_config.base_addr;
|
let base_addr = server_config.base_addr;
|
||||||
let api_server_port = server_config.api_server_port;
|
let api_server_port = server_config.api_server_port;
|
||||||
|
|
||||||
warn!(LOGGER, "Testing chain handler");
|
warn!("Testing chain handler");
|
||||||
let tip = get_tip(&base_addr, api_server_port);
|
let tip = get_tip(&base_addr, api_server_port);
|
||||||
assert!(tip.is_ok());
|
assert!(tip.is_ok());
|
||||||
|
|
||||||
warn!(LOGGER, "Testing status handler");
|
warn!("Testing status handler");
|
||||||
let status = get_status(&base_addr, api_server_port);
|
let status = get_status(&base_addr, api_server_port);
|
||||||
assert!(status.is_ok());
|
assert!(status.is_ok());
|
||||||
|
|
||||||
|
@ -96,7 +98,7 @@ fn simple_server_wallet() {
|
||||||
current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!(LOGGER, "Testing block handler");
|
warn!("Testing block handler");
|
||||||
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
|
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
|
||||||
assert!(last_block_by_height.is_ok());
|
assert!(last_block_by_height.is_ok());
|
||||||
let last_block_by_height_compact =
|
let last_block_by_height_compact =
|
||||||
|
@ -110,7 +112,7 @@ fn simple_server_wallet() {
|
||||||
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
|
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
|
||||||
assert!(last_block_by_hash_compact.is_ok());
|
assert!(last_block_by_hash_compact.is_ok());
|
||||||
|
|
||||||
warn!(LOGGER, "Testing chain output handler");
|
warn!("Testing chain output handler");
|
||||||
let start_height = 0;
|
let start_height = 0;
|
||||||
let end_height = current_tip.height;
|
let end_height = current_tip.height;
|
||||||
let outputs_by_height =
|
let outputs_by_height =
|
||||||
|
@ -122,7 +124,7 @@ fn simple_server_wallet() {
|
||||||
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
|
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
|
||||||
assert!(outputs_by_ids2.is_ok());
|
assert!(outputs_by_ids2.is_ok());
|
||||||
|
|
||||||
warn!(LOGGER, "Testing txhashset handler");
|
warn!("Testing txhashset handler");
|
||||||
let roots = get_txhashset_roots(&base_addr, api_server_port);
|
let roots = get_txhashset_roots(&base_addr, api_server_port);
|
||||||
assert!(roots.is_ok());
|
assert!(roots.is_ok());
|
||||||
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
|
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
|
||||||
|
@ -146,7 +148,7 @@ fn simple_server_wallet() {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_p2p() {
|
fn test_p2p() {
|
||||||
init_test_logger();
|
init_test_logger();
|
||||||
info!(LOGGER, "starting test_p2p");
|
info!("starting test_p2p");
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
|
|
||||||
let test_name_dir = "test_servers";
|
let test_name_dir = "test_servers";
|
||||||
|
@ -187,7 +189,7 @@ fn test_p2p() {
|
||||||
thread::sleep(time::Duration::from_millis(2000));
|
thread::sleep(time::Duration::from_millis(2000));
|
||||||
|
|
||||||
// Starting tests
|
// Starting tests
|
||||||
warn!(LOGGER, "Starting P2P Tests");
|
warn!("Starting P2P Tests");
|
||||||
let base_addr = server_config_one.base_addr;
|
let base_addr = server_config_one.base_addr;
|
||||||
let api_server_port = server_config_one.api_server_port;
|
let api_server_port = server_config_one.api_server_port;
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
|
|
||||||
extern crate grin_api as api;
|
extern crate grin_api as api;
|
||||||
extern crate grin_chain as chain;
|
extern crate grin_chain as chain;
|
||||||
|
@ -27,10 +27,9 @@ extern crate grin_wallet as wallet;
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
use util::Mutex;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Start 1 node mining, 1 non mining node and two wallets.
|
/// Start 1 node mining, 1 non mining node and two wallets.
|
||||||
/// Then send a transaction from one wallet to another and propagate it a stem
|
/// Then send a transaction from one wallet to another and propagate it a stem
|
||||||
|
@ -56,12 +55,12 @@ fn test_dandelion_timeout() {
|
||||||
let coinbase_wallet = Arc::new(Mutex::new(
|
let coinbase_wallet = Arc::new(Mutex::new(
|
||||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||||
));
|
));
|
||||||
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
|
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
|
||||||
|
|
||||||
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
||||||
|
|
||||||
let _ = thread::spawn(move || {
|
let _ = thread::spawn(move || {
|
||||||
let mut w = coinbase_wallet.lock().unwrap();
|
let mut w = coinbase_wallet.lock();
|
||||||
w.run_wallet(0);
|
w.run_wallet(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -71,12 +70,12 @@ fn test_dandelion_timeout() {
|
||||||
recp_config.wallet_port = 20002;
|
recp_config.wallet_port = 20002;
|
||||||
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
||||||
let target_wallet_cloned = target_wallet.clone();
|
let target_wallet_cloned = target_wallet.clone();
|
||||||
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
|
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
|
||||||
|
|
||||||
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
||||||
//Start up a second wallet, to receive
|
//Start up a second wallet, to receive
|
||||||
let _ = thread::spawn(move || {
|
let _ = thread::spawn(move || {
|
||||||
let mut w = target_wallet_cloned.lock().unwrap();
|
let mut w = target_wallet_cloned.lock();
|
||||||
w.run_wallet(0);
|
w.run_wallet(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -135,7 +134,7 @@ fn test_dandelion_timeout() {
|
||||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
|
warn!("Sending 50 Grins to recipient wallet");
|
||||||
|
|
||||||
// Sending stem transaction
|
// Sending stem transaction
|
||||||
LocalServerContainer::send_amount_to(
|
LocalServerContainer::send_amount_to(
|
||||||
|
|
|
@ -25,8 +25,9 @@ extern crate blake2_rfc as blake2;
|
||||||
|
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::{fs, thread, time};
|
use std::{fs, thread, time};
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use framework::keychain::Keychain;
|
use framework::keychain::Keychain;
|
||||||
use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig};
|
use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig};
|
||||||
|
@ -532,7 +533,7 @@ impl LocalServerContainerPool {
|
||||||
thread::sleep(time::Duration::from_millis(2000));
|
thread::sleep(time::Duration::from_millis(2000));
|
||||||
}
|
}
|
||||||
let server_ref = s.run_server(run_length);
|
let server_ref = s.run_server(run_length);
|
||||||
return_container_ref.lock().unwrap().push(server_ref);
|
return_container_ref.lock().push(server_ref);
|
||||||
});
|
});
|
||||||
// Not a big fan of sleeping hack here, but there appears to be a
|
// Not a big fan of sleeping hack here, but there appears to be a
|
||||||
// concurrency issue when creating files in rocksdb that causes
|
// concurrency issue when creating files in rocksdb that causes
|
||||||
|
@ -575,7 +576,7 @@ impl LocalServerContainerPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) {
|
pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) {
|
||||||
let locked_servs = servers.lock().unwrap();
|
let locked_servs = servers.lock();
|
||||||
for s in locked_servs.deref() {
|
for s in locked_servs.deref() {
|
||||||
s.stop();
|
s.stop();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,19 +21,19 @@ extern crate grin_servers as servers;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
extern crate grin_wallet as wallet;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
|
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::Hashed;
|
||||||
use core::global::{self, ChainTypes};
|
use core::global::{self, ChainTypes};
|
||||||
|
|
||||||
use util::LOGGER;
|
|
||||||
use wallet::controller;
|
use wallet::controller;
|
||||||
use wallet::libtx::slate::Slate;
|
use wallet::libtx::slate::Slate;
|
||||||
use wallet::libwallet::types::{WalletBackend, WalletInst};
|
use wallet::libwallet::types::{WalletBackend, WalletInst};
|
||||||
|
@ -242,7 +242,7 @@ fn simulate_block_propagation() {
|
||||||
thread::sleep(time::Duration::from_millis(1_000));
|
thread::sleep(time::Duration::from_millis(1_000));
|
||||||
time_spent += 1;
|
time_spent += 1;
|
||||||
if time_spent >= 30 {
|
if time_spent >= 30 {
|
||||||
info!(LOGGER, "simulate_block_propagation - fail on timeout",);
|
info!("simulate_block_propagation - fail on timeout",);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,7 +284,6 @@ fn simulate_full_sync() {
|
||||||
// Get the current header from s1.
|
// Get the current header from s1.
|
||||||
let s1_header = s1.chain.head_header().unwrap();
|
let s1_header = s1.chain.head_header().unwrap();
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"simulate_full_sync - s1 header head: {} at {}",
|
"simulate_full_sync - s1 header head: {} at {}",
|
||||||
s1_header.hash(),
|
s1_header.hash(),
|
||||||
s1_header.height
|
s1_header.height
|
||||||
|
@ -297,7 +296,6 @@ fn simulate_full_sync() {
|
||||||
time_spent += 1;
|
time_spent += 1;
|
||||||
if time_spent >= 30 {
|
if time_spent >= 30 {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"sync fail. s2.head().height: {}, s1_header.height: {}",
|
"sync fail. s2.head().height: {}, s1_header.height: {}",
|
||||||
s2.head().height,
|
s2.head().height,
|
||||||
s1_header.height
|
s1_header.height
|
||||||
|
@ -355,7 +353,6 @@ fn simulate_fast_sync() {
|
||||||
total_wait += 1;
|
total_wait += 1;
|
||||||
if total_wait >= 30 {
|
if total_wait >= 30 {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
|
||||||
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
|
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
|
||||||
s2.head().height,
|
s2.head().height,
|
||||||
s1_header.height,
|
s1_header.height,
|
||||||
|
|
|
@ -23,7 +23,7 @@ extern crate grin_wallet as wallet;
|
||||||
extern crate bufstream;
|
extern crate bufstream;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
|
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
|
@ -38,7 +38,6 @@ use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
|
||||||
use core::global::{self, ChainTypes};
|
use core::global::{self, ChainTypes};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
use framework::{config, stratum_config};
|
use framework::{config, stratum_config};
|
||||||
|
|
||||||
|
@ -77,7 +76,7 @@ fn basic_stratum_server() {
|
||||||
}
|
}
|
||||||
// As this stream falls out of scope it will be disconnected
|
// As this stream falls out of scope it will be disconnected
|
||||||
}
|
}
|
||||||
info!(LOGGER, "stratum server connected");
|
info!("stratum server connected");
|
||||||
|
|
||||||
// Create a few new worker connections
|
// Create a few new worker connections
|
||||||
let mut workers = vec![];
|
let mut workers = vec![];
|
||||||
|
@ -89,7 +88,7 @@ fn basic_stratum_server() {
|
||||||
workers.push(stream);
|
workers.push(stream);
|
||||||
}
|
}
|
||||||
assert!(workers.len() == 5);
|
assert!(workers.len() == 5);
|
||||||
info!(LOGGER, "workers length verification ok");
|
info!("workers length verification ok");
|
||||||
|
|
||||||
// Simulate a worker lost connection
|
// Simulate a worker lost connection
|
||||||
workers.remove(4);
|
workers.remove(4);
|
||||||
|
@ -118,7 +117,7 @@ fn basic_stratum_server() {
|
||||||
assert!(false);
|
assert!(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info!(LOGGER, "a few stratum JSONRpc commands verification ok");
|
info!("a few stratum JSONRpc commands verification ok");
|
||||||
|
|
||||||
// keepalive - expected "ok" result
|
// keepalive - expected "ok" result
|
||||||
let mut response = String::new();
|
let mut response = String::new();
|
||||||
|
@ -129,7 +128,7 @@ fn basic_stratum_server() {
|
||||||
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
||||||
let _st = workers[2].read_line(&mut response);
|
let _st = workers[2].read_line(&mut response);
|
||||||
assert_eq!(response.as_str(), ok_resp);
|
assert_eq!(response.as_str(), ok_resp);
|
||||||
info!(LOGGER, "keepalive test ok");
|
info!("keepalive test ok");
|
||||||
|
|
||||||
// "doesnotexist" - error expected
|
// "doesnotexist" - error expected
|
||||||
let mut response = String::new();
|
let mut response = String::new();
|
||||||
|
@ -140,7 +139,7 @@ fn basic_stratum_server() {
|
||||||
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
||||||
let _st = workers[3].read_line(&mut response);
|
let _st = workers[3].read_line(&mut response);
|
||||||
assert_eq!(response.as_str(), ok_resp);
|
assert_eq!(response.as_str(), ok_resp);
|
||||||
info!(LOGGER, "worker doesnotexist test ok");
|
info!("worker doesnotexist test ok");
|
||||||
|
|
||||||
// Verify stratum server and worker stats
|
// Verify stratum server and worker stats
|
||||||
let stats = s.get_server_stats().unwrap();
|
let stats = s.get_server_stats().unwrap();
|
||||||
|
@ -148,18 +147,18 @@ fn basic_stratum_server() {
|
||||||
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
|
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
|
||||||
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
|
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
|
||||||
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
|
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
|
||||||
info!(LOGGER, "stratum server and worker stats verification ok");
|
info!("stratum server and worker stats verification ok");
|
||||||
|
|
||||||
// Start mining blocks
|
// Start mining blocks
|
||||||
let stop = Arc::new(AtomicBool::new(false));
|
let stop = Arc::new(AtomicBool::new(false));
|
||||||
s.start_test_miner(None, stop.clone());
|
s.start_test_miner(None, stop.clone());
|
||||||
info!(LOGGER, "test miner started");
|
info!("test miner started");
|
||||||
|
|
||||||
// This test is supposed to complete in 3 seconds,
|
// This test is supposed to complete in 3 seconds,
|
||||||
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
|
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
|
||||||
let _handler = thread::spawn(|| {
|
let _handler = thread::spawn(|| {
|
||||||
thread::sleep(time::Duration::from_secs(10));
|
thread::sleep(time::Duration::from_secs(10));
|
||||||
error!(LOGGER, "basic_stratum_server test fail on timeout!");
|
error!("basic_stratum_server test fail on timeout!");
|
||||||
thread::sleep(time::Duration::from_millis(100));
|
thread::sleep(time::Duration::from_millis(100));
|
||||||
process::exit(1);
|
process::exit(1);
|
||||||
});
|
});
|
||||||
|
@ -177,12 +176,12 @@ fn basic_stratum_server() {
|
||||||
let _st = workers[2].read_line(&mut jobtemplate);
|
let _st = workers[2].read_line(&mut jobtemplate);
|
||||||
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
|
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
|
||||||
assert_eq!(job_template["method"], expected);
|
assert_eq!(job_template["method"], expected);
|
||||||
info!(LOGGER, "blocks broadcasting to workers test ok");
|
info!("blocks broadcasting to workers test ok");
|
||||||
|
|
||||||
// Verify stratum server and worker stats
|
// Verify stratum server and worker stats
|
||||||
let stats = s.get_server_stats().unwrap();
|
let stats = s.get_server_stats().unwrap();
|
||||||
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
|
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
|
||||||
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
|
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
|
||||||
assert_ne!(stats.stratum_stats.block_height, 1);
|
assert_ne!(stats.stratum_stats.block_height, 1);
|
||||||
info!(LOGGER, "basic_stratum_server test done and ok.");
|
info!("basic_stratum_server test done and ok.");
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
|
|
||||||
extern crate grin_api as api;
|
extern crate grin_api as api;
|
||||||
extern crate grin_chain as chain;
|
extern crate grin_chain as chain;
|
||||||
|
@ -27,10 +27,9 @@ extern crate grin_wallet as wallet;
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
use util::Mutex;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// Start 1 node mining and two wallets, then send a few
|
/// Start 1 node mining and two wallets, then send a few
|
||||||
/// transactions from one to the other
|
/// transactions from one to the other
|
||||||
|
@ -55,11 +54,11 @@ fn basic_wallet_transactions() {
|
||||||
let coinbase_wallet = Arc::new(Mutex::new(
|
let coinbase_wallet = Arc::new(Mutex::new(
|
||||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||||
));
|
));
|
||||||
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
|
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
|
||||||
|
|
||||||
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
||||||
let _ = thread::spawn(move || {
|
let _ = thread::spawn(move || {
|
||||||
let mut w = coinbase_wallet.lock().unwrap();
|
let mut w = coinbase_wallet.lock();
|
||||||
w.run_wallet(0);
|
w.run_wallet(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -69,11 +68,11 @@ fn basic_wallet_transactions() {
|
||||||
recp_config.wallet_port = 20002;
|
recp_config.wallet_port = 20002;
|
||||||
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
||||||
let target_wallet_cloned = target_wallet.clone();
|
let target_wallet_cloned = target_wallet.clone();
|
||||||
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
|
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
|
||||||
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
||||||
//Start up a second wallet, to receive
|
//Start up a second wallet, to receive
|
||||||
let _ = thread::spawn(move || {
|
let _ = thread::spawn(move || {
|
||||||
let mut w = target_wallet_cloned.lock().unwrap();
|
let mut w = target_wallet_cloned.lock();
|
||||||
w.run_wallet(0);
|
w.run_wallet(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -104,7 +103,7 @@ fn basic_wallet_transactions() {
|
||||||
coinbase_info =
|
coinbase_info =
|
||||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||||
}
|
}
|
||||||
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
|
warn!("Sending 50 Grins to recipient wallet");
|
||||||
LocalServerContainer::send_amount_to(
|
LocalServerContainer::send_amount_to(
|
||||||
&coinbase_wallet_config,
|
&coinbase_wallet_config,
|
||||||
"50.00",
|
"50.00",
|
||||||
|
@ -124,10 +123,7 @@ fn basic_wallet_transactions() {
|
||||||
println!("Recipient wallet info: {:?}", recipient_info);
|
println!("Recipient wallet info: {:?}", recipient_info);
|
||||||
assert!(recipient_info.amount_currently_spendable == 50000000000);
|
assert!(recipient_info.amount_currently_spendable == 50000000000);
|
||||||
|
|
||||||
warn!(
|
warn!("Sending many small transactions to recipient wallet");
|
||||||
LOGGER,
|
|
||||||
"Sending many small transactions to recipient wallet"
|
|
||||||
);
|
|
||||||
for _i in 0..10 {
|
for _i in 0..10 {
|
||||||
LocalServerContainer::send_amount_to(
|
LocalServerContainer::send_amount_to(
|
||||||
&coinbase_wallet_config,
|
&coinbase_wallet_config,
|
||||||
|
|
|
@ -29,7 +29,6 @@ use core::global;
|
||||||
use p2p::Seeding;
|
use p2p::Seeding;
|
||||||
use servers;
|
use servers;
|
||||||
use tui::ui;
|
use tui::ui;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// wrap below to allow UI to clean up on stop
|
/// wrap below to allow UI to clean up on stop
|
||||||
fn start_server(config: servers::ServerConfig) {
|
fn start_server(config: servers::ServerConfig) {
|
||||||
|
@ -37,9 +36,9 @@ fn start_server(config: servers::ServerConfig) {
|
||||||
// Just kill process for now, otherwise the process
|
// Just kill process for now, otherwise the process
|
||||||
// hangs around until sigint because the API server
|
// hangs around until sigint because the API server
|
||||||
// currently has no shutdown facility
|
// currently has no shutdown facility
|
||||||
warn!(LOGGER, "Shutting down...");
|
warn!("Shutting down...");
|
||||||
thread::sleep(Duration::from_millis(1000));
|
thread::sleep(Duration::from_millis(1000));
|
||||||
warn!(LOGGER, "Shutdown complete.");
|
warn!("Shutdown complete.");
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +46,7 @@ fn start_server_tui(config: servers::ServerConfig) {
|
||||||
// Run the UI controller.. here for now for simplicity to access
|
// Run the UI controller.. here for now for simplicity to access
|
||||||
// everything it might need
|
// everything it might need
|
||||||
if config.run_tui.is_some() && config.run_tui.unwrap() {
|
if config.run_tui.is_some() && config.run_tui.unwrap() {
|
||||||
warn!(LOGGER, "Starting GRIN in UI mode...");
|
warn!("Starting GRIN in UI mode...");
|
||||||
servers::Server::start(config, |serv: Arc<servers::Server>| {
|
servers::Server::start(config, |serv: Arc<servers::Server>| {
|
||||||
let running = Arc::new(AtomicBool::new(true));
|
let running = Arc::new(AtomicBool::new(true));
|
||||||
let _ = thread::Builder::new()
|
let _ = thread::Builder::new()
|
||||||
|
@ -60,7 +59,7 @@ fn start_server_tui(config: servers::ServerConfig) {
|
||||||
});
|
});
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
} else {
|
} else {
|
||||||
warn!(LOGGER, "Starting GRIN w/o UI...");
|
warn!("Starting GRIN w/o UI...");
|
||||||
servers::Server::start(config, |serv: Arc<servers::Server>| {
|
servers::Server::start(config, |serv: Arc<servers::Server>| {
|
||||||
let running = Arc::new(AtomicBool::new(true));
|
let running = Arc::new(AtomicBool::new(true));
|
||||||
let r = running.clone();
|
let r = running.clone();
|
||||||
|
@ -70,7 +69,7 @@ fn start_server_tui(config: servers::ServerConfig) {
|
||||||
while running.load(Ordering::SeqCst) {
|
while running.load(Ordering::SeqCst) {
|
||||||
thread::sleep(Duration::from_secs(1));
|
thread::sleep(Duration::from_secs(1));
|
||||||
}
|
}
|
||||||
warn!(LOGGER, "Received SIGINT (Ctrl+C) or SIGTERM (kill).");
|
warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
|
||||||
serv.stop();
|
serv.stop();
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -170,8 +169,8 @@ pub fn server_command(server_args: Option<&ArgMatches>, mut global_config: Globa
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
match daemonize.start() {
|
match daemonize.start() {
|
||||||
Ok(_) => info!(LOGGER, "Grin server successfully started."),
|
Ok(_) => info!("Grin server successfully started."),
|
||||||
Err(e) => error!(LOGGER, "Error starting: {}", e),
|
Err(e) => error!("Error starting: {}", e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),
|
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),
|
||||||
|
|
|
@ -18,9 +18,10 @@ use std::io::Read;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
/// Wallet commands processing
|
/// Wallet commands processing
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::{process, thread};
|
use std::{process, thread};
|
||||||
|
use util::Mutex;
|
||||||
|
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
|
|
||||||
|
@ -34,7 +35,6 @@ use grin_wallet::{
|
||||||
use keychain;
|
use keychain;
|
||||||
use servers::start_webwallet_server;
|
use servers::start_webwallet_server;
|
||||||
use util::file::get_first_line;
|
use util::file::get_first_line;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
pub fn _init_wallet_seed(wallet_config: WalletConfig) {
|
pub fn _init_wallet_seed(wallet_config: WalletConfig) {
|
||||||
if let Err(_) = WalletSeed::from_file(&wallet_config) {
|
if let Err(_) = WalletSeed::from_file(&wallet_config) {
|
||||||
|
@ -72,7 +72,7 @@ pub fn instantiate_wallet(
|
||||||
println!("Error starting wallet: {}", e);
|
println!("Error starting wallet: {}", e);
|
||||||
process::exit(0);
|
process::exit(0);
|
||||||
});
|
});
|
||||||
info!(LOGGER, "Using LMDB Backend for wallet");
|
info!("Using LMDB Backend for wallet");
|
||||||
Box::new(db_wallet)
|
Box::new(db_wallet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
// Generate the initial wallet seed if we are running "wallet init".
|
// Generate the initial wallet seed if we are running "wallet init".
|
||||||
if let ("init", Some(_)) = wallet_args.subcommand() {
|
if let ("init", Some(_)) = wallet_args.subcommand() {
|
||||||
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
|
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
|
||||||
info!(LOGGER, "Wallet seed file created");
|
info!("Wallet seed file created");
|
||||||
let client =
|
let client =
|
||||||
HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
|
HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
|
||||||
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
|
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
|
||||||
|
@ -116,7 +116,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
e, wallet_config
|
e, wallet_config
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
info!(LOGGER, "Wallet database backend created");
|
info!("Wallet database backend created");
|
||||||
// give logging thread a moment to catch up
|
// give logging thread a moment to catch up
|
||||||
thread::sleep(Duration::from_millis(200));
|
thread::sleep(Duration::from_millis(200));
|
||||||
// we are done here with creating the wallet, so just return
|
// we are done here with creating the wallet, so just return
|
||||||
|
@ -267,7 +267,6 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let slate = match result {
|
let slate = match result {
|
||||||
Ok(s) => {
|
Ok(s) => {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"Tx created: {} grin to {} (strategy '{}')",
|
"Tx created: {} grin to {} (strategy '{}')",
|
||||||
core::amount_to_hr_string(amount, false),
|
core::amount_to_hr_string(amount, false),
|
||||||
dest,
|
dest,
|
||||||
|
@ -276,7 +275,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
s
|
s
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Tx not created: {:?}", e);
|
error!("Tx not created: {:?}", e);
|
||||||
match e.kind() {
|
match e.kind() {
|
||||||
// user errors, don't backtrace
|
// user errors, don't backtrace
|
||||||
libwallet::ErrorKind::NotEnoughFunds { .. } => {}
|
libwallet::ErrorKind::NotEnoughFunds { .. } => {}
|
||||||
|
@ -284,7 +283,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
|
libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
|
||||||
_ => {
|
_ => {
|
||||||
// otherwise give full dump
|
// otherwise give full dump
|
||||||
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
|
error!("Backtrace: {}", e.backtrace().unwrap());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
panic!();
|
panic!();
|
||||||
|
@ -293,18 +292,18 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.post_tx(&slate, fluff);
|
let result = api.post_tx(&slate, fluff);
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
info!(LOGGER, "Tx sent",);
|
info!("Tx sent",);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Tx not sent: {:?}", e);
|
error!("Tx not sent: {:?}", e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
"HTTP Destination should start with http://: or https://: {}",
|
||||||
"HTTP Destination should start with http://: or https://: {}", dest
|
dest
|
||||||
);
|
);
|
||||||
panic!();
|
panic!();
|
||||||
}
|
}
|
||||||
|
@ -320,7 +319,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
).expect("Send failed");
|
).expect("Send failed");
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
error!(LOGGER, "unsupported payment method: {}", method);
|
error!("unsupported payment method: {}", method);
|
||||||
panic!();
|
panic!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -353,11 +352,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.post_tx(&slate, fluff);
|
let result = api.post_tx(&slate, fluff);
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
info!(LOGGER, "Tx sent");
|
info!("Tx sent");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Tx not sent: {:?}", e);
|
error!("Tx not sent: {:?}", e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -438,7 +437,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
("repost", Some(repost_args)) => {
|
("repost", Some(repost_args)) => {
|
||||||
let tx_id: u32 = match repost_args.value_of("id") {
|
let tx_id: u32 = match repost_args.value_of("id") {
|
||||||
None => {
|
None => {
|
||||||
error!(LOGGER, "Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
|
error!("Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
|
||||||
panic!();
|
panic!();
|
||||||
}
|
}
|
||||||
Some(tx) => match tx.parse() {
|
Some(tx) => match tx.parse() {
|
||||||
|
@ -455,11 +454,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.post_stored_tx(tx_id, fluff);
|
let result = api.post_stored_tx(tx_id, fluff);
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
info!(LOGGER, "Reposted transaction at {}", tx_id);
|
info!("Reposted transaction at {}", tx_id);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Transaction reposting failed: {}", e);
|
error!("Transaction reposting failed: {}", e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -468,11 +467,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.dump_stored_tx(tx_id, true, f);
|
let result = api.dump_stored_tx(tx_id, true, f);
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
warn!(LOGGER, "Dumped transaction data for tx {} to {}", tx_id, f);
|
warn!("Dumped transaction data for tx {} to {}", tx_id, f);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Transaction reposting failed: {}", e);
|
error!("Transaction reposting failed: {}", e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -487,11 +486,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.cancel_tx(tx_id);
|
let result = api.cancel_tx(tx_id);
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
info!(LOGGER, "Transaction {} Cancelled", tx_id);
|
info!("Transaction {} Cancelled", tx_id);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "TX Cancellation failed: {}", e);
|
error!("TX Cancellation failed: {}", e);
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -500,12 +499,12 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
|
||||||
let result = api.restore();
|
let result = api.restore();
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
info!(LOGGER, "Wallet restore complete",);
|
info!("Wallet restore complete",);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(LOGGER, "Wallet restore failed: {:?}", e);
|
error!("Wallet restore failed: {:?}", e);
|
||||||
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
|
error!("Backtrace: {}", e.backtrace().unwrap());
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ extern crate daemonize;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate log;
|
||||||
extern crate term;
|
extern crate term;
|
||||||
|
|
||||||
extern crate grin_api as api;
|
extern crate grin_api as api;
|
||||||
|
@ -45,7 +45,7 @@ use clap::{App, Arg, SubCommand};
|
||||||
|
|
||||||
use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
|
use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
|
||||||
use core::global;
|
use core::global;
|
||||||
use util::{init_logger, LOGGER};
|
use util::init_logger;
|
||||||
|
|
||||||
// include build information
|
// include build information
|
||||||
pub mod built_info {
|
pub mod built_info {
|
||||||
|
@ -73,9 +73,9 @@ pub fn info_strings() -> (String, String, String) {
|
||||||
|
|
||||||
fn log_build_info() {
|
fn log_build_info() {
|
||||||
let (basic_info, detailed_info, deps) = info_strings();
|
let (basic_info, detailed_info, deps) = info_strings();
|
||||||
info!(LOGGER, "{}", basic_info);
|
info!("{}", basic_info);
|
||||||
debug!(LOGGER, "{}", detailed_info);
|
debug!("{}", detailed_info);
|
||||||
trace!(LOGGER, "{}", deps);
|
trace!("{}", deps);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
@ -378,7 +378,6 @@ fn main() {
|
||||||
l.tui_running = Some(false);
|
l.tui_running = Some(false);
|
||||||
init_logger(Some(l));
|
init_logger(Some(l));
|
||||||
warn!(
|
warn!(
|
||||||
LOGGER,
|
|
||||||
"Using wallet configuration file at {}",
|
"Using wallet configuration file at {}",
|
||||||
w.config_file_path.as_ref().unwrap().to_str().unwrap()
|
w.config_file_path.as_ref().unwrap().to_str().unwrap()
|
||||||
);
|
);
|
||||||
|
@ -399,12 +398,11 @@ fn main() {
|
||||||
global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type);
|
global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type);
|
||||||
if let Some(file_path) = &s.config_file_path {
|
if let Some(file_path) = &s.config_file_path {
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
|
||||||
"Using configuration file at {}",
|
"Using configuration file at {}",
|
||||||
file_path.to_str().unwrap()
|
file_path.to_str().unwrap()
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
info!(LOGGER, "Node configuration file not found, using default");
|
info!("Node configuration file not found, using default");
|
||||||
}
|
}
|
||||||
node_config = Some(s);
|
node_config = Some(s);
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,91 +87,88 @@ impl TUIStatusListener for TUIStatusView {
|
||||||
fn update(c: &mut Cursive, stats: &ServerStats) {
|
fn update(c: &mut Cursive, stats: &ServerStats) {
|
||||||
//find and update here as needed
|
//find and update here as needed
|
||||||
let basic_status = {
|
let basic_status = {
|
||||||
if stats.awaiting_peers {
|
match stats.sync_status {
|
||||||
"Waiting for peers".to_string()
|
SyncStatus::Initial => "Initializing".to_string(),
|
||||||
} else {
|
SyncStatus::NoSync => "Running".to_string(),
|
||||||
match stats.sync_status {
|
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
|
||||||
SyncStatus::Initial => "Initializing".to_string(),
|
SyncStatus::HeaderSync {
|
||||||
SyncStatus::NoSync => "Running".to_string(),
|
current_height,
|
||||||
SyncStatus::HeaderSync {
|
highest_height,
|
||||||
current_height,
|
} => {
|
||||||
highest_height,
|
let percent = if highest_height == 0 {
|
||||||
} => {
|
0
|
||||||
let percent = if highest_height == 0 {
|
} else {
|
||||||
0
|
current_height * 100 / highest_height
|
||||||
|
};
|
||||||
|
format!("Downloading headers: {}%, step 1/4", percent)
|
||||||
|
}
|
||||||
|
SyncStatus::TxHashsetDownload {
|
||||||
|
start_time,
|
||||||
|
downloaded_size,
|
||||||
|
total_size,
|
||||||
|
} => {
|
||||||
|
if total_size > 0 {
|
||||||
|
let percent = if total_size > 0 {
|
||||||
|
downloaded_size * 100 / total_size
|
||||||
} else {
|
} else {
|
||||||
current_height * 100 / highest_height
|
0
|
||||||
};
|
};
|
||||||
format!("Downloading headers: {}%, step 1/4", percent)
|
let start = start_time.timestamp_nanos();
|
||||||
}
|
let fin = Utc::now().timestamp_nanos();
|
||||||
SyncStatus::TxHashsetDownload {
|
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
|
||||||
start_time,
|
|
||||||
downloaded_size,
|
|
||||||
total_size,
|
|
||||||
} => {
|
|
||||||
if total_size > 0 {
|
|
||||||
let percent = if total_size > 0 {
|
|
||||||
downloaded_size * 100 / total_size
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
let start = start_time.timestamp_nanos();
|
|
||||||
let fin = Utc::now().timestamp_nanos();
|
|
||||||
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
|
|
||||||
|
|
||||||
format!("Downloading {}(MB) chain state for fast sync: {}% at {:.1?}(kB/s), step 2/4",
|
format!("Downloading {}(MB) chain state for fast sync: {}% at {:.1?}(kB/s), step 2/4",
|
||||||
total_size / 1_000_000,
|
total_size / 1_000_000,
|
||||||
percent,
|
percent,
|
||||||
if dur_ms > 1.0f64 { downloaded_size as f64 / dur_ms as f64 } else { 0f64 },
|
if dur_ms > 1.0f64 { downloaded_size as f64 / dur_ms as f64 } else { 0f64 },
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
let start = start_time.timestamp_millis();
|
let start = start_time.timestamp_millis();
|
||||||
let fin = Utc::now().timestamp_millis();
|
let fin = Utc::now().timestamp_millis();
|
||||||
let dur_secs = (fin - start) / 1000;
|
let dur_secs = (fin - start) / 1000;
|
||||||
|
|
||||||
format!("Downloading chain state for fast sync. Waiting remote peer to start: {}s, step 2/4",
|
format!("Downloading chain state for fast sync. Waiting remote peer to start: {}s, step 2/4",
|
||||||
dur_secs,
|
dur_secs,
|
||||||
)
|
)
|
||||||
}
|
|
||||||
}
|
|
||||||
SyncStatus::TxHashsetSetup => {
|
|
||||||
"Preparing chain state for validation, step 3/4".to_string()
|
|
||||||
}
|
|
||||||
SyncStatus::TxHashsetValidation {
|
|
||||||
kernels,
|
|
||||||
kernel_total,
|
|
||||||
rproofs,
|
|
||||||
rproof_total,
|
|
||||||
} => {
|
|
||||||
// 10% of overall progress is attributed to kernel validation
|
|
||||||
// 90% to range proofs (which are much longer)
|
|
||||||
let mut percent = if kernel_total > 0 {
|
|
||||||
kernels * 10 / kernel_total
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
percent += if rproof_total > 0 {
|
|
||||||
rproofs * 90 / rproof_total
|
|
||||||
} else {
|
|
||||||
0
|
|
||||||
};
|
|
||||||
format!("Validating chain state: {}%, step 3/4", percent)
|
|
||||||
}
|
|
||||||
SyncStatus::TxHashsetSave => {
|
|
||||||
"Finalizing chain state for fast sync, step 3/4".to_string()
|
|
||||||
}
|
|
||||||
SyncStatus::BodySync {
|
|
||||||
current_height,
|
|
||||||
highest_height,
|
|
||||||
} => {
|
|
||||||
let percent = if highest_height == 0 {
|
|
||||||
0
|
|
||||||
} else {
|
|
||||||
current_height * 100 / highest_height
|
|
||||||
};
|
|
||||||
format!("Downloading blocks: {}%, step 4/4", percent)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
SyncStatus::TxHashsetSetup => {
|
||||||
|
"Preparing chain state for validation, step 3/4".to_string()
|
||||||
|
}
|
||||||
|
SyncStatus::TxHashsetValidation {
|
||||||
|
kernels,
|
||||||
|
kernel_total,
|
||||||
|
rproofs,
|
||||||
|
rproof_total,
|
||||||
|
} => {
|
||||||
|
// 10% of overall progress is attributed to kernel validation
|
||||||
|
// 90% to range proofs (which are much longer)
|
||||||
|
let mut percent = if kernel_total > 0 {
|
||||||
|
kernels * 10 / kernel_total
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
percent += if rproof_total > 0 {
|
||||||
|
rproofs * 90 / rproof_total
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
format!("Validating chain state: {}%, step 3/4", percent)
|
||||||
|
}
|
||||||
|
SyncStatus::TxHashsetSave => {
|
||||||
|
"Finalizing chain state for fast sync, step 3/4".to_string()
|
||||||
|
}
|
||||||
|
SyncStatus::BodySync {
|
||||||
|
current_height,
|
||||||
|
highest_height,
|
||||||
|
} => {
|
||||||
|
let percent = if highest_height == 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
current_height * 100 / highest_height
|
||||||
|
};
|
||||||
|
format!("Downloading blocks: {}%, step 4/4", percent)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
/*let basic_mining_config_status = {
|
/*let basic_mining_config_status = {
|
||||||
|
|
|
@ -37,7 +37,6 @@ use servers::Server;
|
||||||
use tui::constants::ROOT_STACK;
|
use tui::constants::ROOT_STACK;
|
||||||
use tui::types::{TUIStatusListener, UIMessage};
|
use tui::types::{TUIStatusListener, UIMessage};
|
||||||
use tui::{menu, mining, peers, status, version};
|
use tui::{menu, mining, peers, status, version};
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
use built_info;
|
use built_info;
|
||||||
|
|
||||||
|
@ -172,7 +171,7 @@ impl Controller {
|
||||||
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
|
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
|
||||||
while self.ui.step() {
|
while self.ui.step() {
|
||||||
if !running.load(Ordering::SeqCst) {
|
if !running.load(Ordering::SeqCst) {
|
||||||
warn!(LOGGER, "Received SIGINT (Ctrl+C).");
|
warn!("Received SIGINT (Ctrl+C).");
|
||||||
server.stop();
|
server.stop();
|
||||||
self.ui.stop();
|
self.ui.stop();
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue