mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Merge branch 'master' of github.com:ignopeverell/grin
This commit is contained in:
commit
ae9a9c8938
14 changed files with 392 additions and 144 deletions
|
@ -184,7 +184,7 @@ impl DifficultyIter {
|
|||
}
|
||||
|
||||
impl Iterator for DifficultyIter {
|
||||
type Item = Result<(i64, Difficulty), TargetError>;
|
||||
type Item = Result<(u64, Difficulty), TargetError>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let bhe = self.store.get_block_header(&self.next);
|
||||
|
@ -195,7 +195,7 @@ impl Iterator for DifficultyIter {
|
|||
return None;
|
||||
}
|
||||
self.next = bh.previous;
|
||||
Some(Ok((bh.timestamp.to_timespec().sec, bh.difficulty)))
|
||||
Some(Ok((bh.timestamp.to_timespec().sec as u64, bh.difficulty)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ pub const REWARD: u64 = 1_000_000_000;
|
|||
/// that we may reduce this value in the future as we get more data on mining
|
||||
/// with Cuckoo Cycle, networks improve and block propagation is optimized
|
||||
/// (adjusting the reward accordingly).
|
||||
pub const BLOCK_TIME_SEC: i64 = 60;
|
||||
pub const BLOCK_TIME_SEC: u64 = 60;
|
||||
|
||||
/// Cuckoo-cycle proof size (cycle length)
|
||||
pub const PROOFSIZE: usize = 42;
|
||||
|
@ -61,22 +61,22 @@ pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32);
|
|||
pub const MAX_MSG_LEN: u64 = 20_000_000;
|
||||
|
||||
/// The minimum mining difficulty we'll allow
|
||||
pub const MINIMUM_DIFFICULTY: u32 = 10;
|
||||
pub const MINIMUM_DIFFICULTY: u64 = 10;
|
||||
|
||||
/// Time window in blocks to calculate block time median
|
||||
pub const MEDIAN_TIME_WINDOW: u32 = 11;
|
||||
pub const MEDIAN_TIME_WINDOW: u64 = 11;
|
||||
|
||||
/// Number of blocks used to calculate difficulty adjustments
|
||||
pub const DIFFICULTY_ADJUST_WINDOW: u32 = 23;
|
||||
pub const DIFFICULTY_ADJUST_WINDOW: u64 = 23;
|
||||
|
||||
/// Average time span of the difficulty adjustment window
|
||||
pub const BLOCK_TIME_WINDOW: i64 = (DIFFICULTY_ADJUST_WINDOW as i64) * BLOCK_TIME_SEC;
|
||||
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
|
||||
|
||||
/// Maximum size time window used for difficutly adjustments
|
||||
pub const UPPER_TIME_BOUND: i64 = BLOCK_TIME_WINDOW * 4 / 3;
|
||||
pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 4 / 3;
|
||||
|
||||
/// Minimum size time window used for difficutly adjustments
|
||||
pub const LOWER_TIME_BOUND: i64 = BLOCK_TIME_WINDOW * 5 / 6;
|
||||
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 5 / 6;
|
||||
|
||||
/// Error when computing the next difficulty adjustment.
|
||||
#[derive(Debug, Clone)]
|
||||
|
@ -100,7 +100,7 @@ impl fmt::Display for TargetError {
|
|||
/// difference between the median timestamps at the beginning and the end
|
||||
/// of the window.
|
||||
pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
||||
where T: IntoIterator<Item = Result<(i64, Difficulty), TargetError>>
|
||||
where T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>
|
||||
{
|
||||
|
||||
// Block times at the begining and end of the adjustment window, used to
|
||||
|
@ -113,7 +113,7 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
|||
|
||||
// Enumerating backward over blocks
|
||||
for (n, head_info) in cursor.into_iter().enumerate() {
|
||||
let m = n as u32;
|
||||
let m = n as u64;
|
||||
let (ts, diff) = head_info?;
|
||||
|
||||
// Sum each element in the adjustment window. In addition, retain
|
||||
|
@ -156,10 +156,13 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
|||
ts_damp
|
||||
};
|
||||
|
||||
Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW as u32) /
|
||||
Difficulty::from_num(adj_ts as u32))
|
||||
Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) /
|
||||
Difficulty::from_num(adj_ts))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
use std;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use core::target::Difficulty;
|
||||
|
@ -168,18 +171,20 @@ mod test {
|
|||
|
||||
// Builds an iterator for next difficulty calculation with the provided
|
||||
// constant time interval, difficulty and total length.
|
||||
fn repeat(interval: i64, diff: u32, len: u32) -> Vec<Result<(i64, Difficulty), TargetError>> {
|
||||
fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
//watch overflow here, length shouldn't be ridiculous anyhow
|
||||
assert!(len < std::usize::MAX as u64);
|
||||
let diffs = vec![Difficulty::from_num(diff); len as usize];
|
||||
let times = (0..(len as usize)).map(|n| (n as i64) * interval).rev();
|
||||
let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
|
||||
let pairs = times.zip(diffs.iter());
|
||||
pairs.map(|(t, d)| Ok((t, d.clone()))).collect::<Vec<_>>()
|
||||
pairs.map(|(t, d)| Ok((t as u64, d.clone()))).collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn repeat_offs(from: i64,
|
||||
interval: i64,
|
||||
diff: u32,
|
||||
len: u32)
|
||||
-> Vec<Result<(i64, Difficulty), TargetError>> {
|
||||
fn repeat_offs(from: u64,
|
||||
interval: u64,
|
||||
diff: u64,
|
||||
len: u64)
|
||||
-> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
map_vec!(repeat(interval, diff, len), |e| {
|
||||
match e.clone() {
|
||||
Err(e) => Err(e),
|
||||
|
@ -209,7 +214,7 @@ mod test {
|
|||
// checking averaging works, window length is odd so need to compensate a little
|
||||
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW;
|
||||
let mut s1 = repeat(60, 500, sec);
|
||||
let mut s2 = repeat_offs((sec * 60) as i64, 60, 1545, DIFFICULTY_ADJUST_WINDOW / 2);
|
||||
let mut s2 = repeat_offs((sec * 60) as u64, 60, 1545, DIFFICULTY_ADJUST_WINDOW / 2);
|
||||
s2.append(&mut s1);
|
||||
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(999));
|
||||
|
||||
|
|
|
@ -15,60 +15,61 @@
|
|||
//! Definition of the maximum target value a proof-of-work block hash can have
|
||||
//! and
|
||||
//! the related difficulty, defined as the maximum target divided by the hash.
|
||||
//!
|
||||
//! Note this is now wrapping a simple U64 now, but it's desirable to keep the
|
||||
//! wrapper in case the internal representation needs to change again
|
||||
|
||||
use std::fmt;
|
||||
use std::ops::{Add, Mul, Div, Sub};
|
||||
use std::io::Cursor;
|
||||
use std::u64::MAX;
|
||||
|
||||
use bigint::BigUint;
|
||||
use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
|
||||
use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
|
||||
|
||||
use core::hash::Hash;
|
||||
use ser::{self, Reader, Writer, Writeable, Readable};
|
||||
|
||||
/// The target is the 32-bytes hash block hashes must be lower than.
|
||||
pub const MAX_TARGET: [u8; 32] = [0xf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
|
||||
pub const MAX_TARGET: [u8; 8] = [0xf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
|
||||
|
||||
/// The difficulty is defined as the maximum target divided by the block hash.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
|
||||
pub struct Difficulty {
|
||||
num: BigUint,
|
||||
num: u64,
|
||||
}
|
||||
|
||||
impl Difficulty {
|
||||
/// Difficulty of zero, which is practically invalid (not target can be
|
||||
/// calculated from it) but very useful as a start for additions.
|
||||
pub fn zero() -> Difficulty {
|
||||
Difficulty { num: BigUint::new(vec![0]) }
|
||||
Difficulty { num: 0 }
|
||||
}
|
||||
|
||||
/// Difficulty of one, which is the minumum difficulty (when the hash
|
||||
/// equals the max target)
|
||||
pub fn one() -> Difficulty {
|
||||
Difficulty { num: BigUint::new(vec![1]) }
|
||||
Difficulty { num: 1 }
|
||||
}
|
||||
|
||||
/// Convert a `u32` into a `Difficulty`
|
||||
pub fn from_num(num: u32) -> Difficulty {
|
||||
Difficulty { num: BigUint::new(vec![num]) }
|
||||
}
|
||||
|
||||
/// Convert a `BigUint` into a `Difficulty`
|
||||
pub fn from_biguint(num: BigUint) -> Difficulty {
|
||||
pub fn from_num(num: u64) -> Difficulty {
|
||||
Difficulty { num: num }
|
||||
}
|
||||
|
||||
/// Computes the difficulty from a hash. Divides the maximum target by the
|
||||
/// provided hash.
|
||||
pub fn from_hash(h: &Hash) -> Difficulty {
|
||||
let max_target = BigUint::from_bytes_be(&MAX_TARGET);
|
||||
let h_num = BigUint::from_bytes_be(&h[..]);
|
||||
Difficulty { num: max_target / h_num }
|
||||
let max_target = BigEndian::read_u64(&MAX_TARGET);
|
||||
//Use the first 64 bits of the given hash
|
||||
let mut in_vec=h.to_vec();
|
||||
in_vec.truncate(8);
|
||||
let num = BigEndian::read_u64(&in_vec);
|
||||
Difficulty { num: max_target / num }
|
||||
}
|
||||
|
||||
/// Converts the difficulty into a bignum
|
||||
pub fn into_biguint(self) -> BigUint {
|
||||
/// Converts the difficulty into a u64
|
||||
pub fn into_num(&self) -> u64 {
|
||||
self.num
|
||||
}
|
||||
}
|
||||
|
@ -109,17 +110,14 @@ impl Div<Difficulty> for Difficulty {
|
|||
|
||||
impl Writeable for Difficulty {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
let data = self.num.to_bytes_be();
|
||||
try!(writer.write_u8(data.len() as u8));
|
||||
writer.write_fixed_bytes(&data)
|
||||
writer.write_u64(self.num)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for Difficulty {
|
||||
fn read(reader: &mut Reader) -> Result<Difficulty, ser::Error> {
|
||||
let dlen = try!(reader.read_u8());
|
||||
let data = try!(reader.read_fixed_bytes(dlen as usize));
|
||||
Ok(Difficulty { num: BigUint::from_bytes_be(&data[..]) })
|
||||
let data = try!(reader.read_u64());
|
||||
Ok(Difficulty { num: data })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +125,7 @@ impl Serialize for Difficulty {
|
|||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S: Serializer
|
||||
{
|
||||
serializer.serialize_str(self.num.to_str_radix(10).as_str())
|
||||
serializer.serialize_u64(self.num)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +133,7 @@ impl<'de> Deserialize<'de> for Difficulty {
|
|||
fn deserialize<D>(deserializer: D) -> Result<Difficulty, D::Error>
|
||||
where D: Deserializer<'de>
|
||||
{
|
||||
deserializer.deserialize_i32(DiffVisitor)
|
||||
deserializer.deserialize_u64(DiffVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,9 +149,10 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error
|
||||
{
|
||||
let bigui = BigUint::parse_bytes(s.as_bytes(), 10).ok_or_else(|| {
|
||||
de::Error::invalid_value(de::Unexpected::Str(s), &"a value number")
|
||||
})?;
|
||||
Ok(Difficulty { num: bigui })
|
||||
let num_in = s.parse::<u64>();
|
||||
if let Err(e)=num_in {
|
||||
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number"));
|
||||
};
|
||||
Ok(Difficulty { num: num_in.unwrap() })
|
||||
}
|
||||
}
|
||||
|
|
|
@ -387,18 +387,72 @@ impl Writeable for [u8; 4] {
|
|||
}
|
||||
|
||||
/// Useful marker trait on types that can be sized byte slices
|
||||
pub trait AsFixedBytes: Sized + AsRef<[u8]> {}
|
||||
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
||||
fn len(&self) -> usize;
|
||||
}
|
||||
|
||||
impl<'a> AsFixedBytes for &'a [u8] {}
|
||||
impl AsFixedBytes for Vec<u8> {}
|
||||
impl AsFixedBytes for [u8; 1] {}
|
||||
impl AsFixedBytes for [u8; 2] {}
|
||||
impl AsFixedBytes for [u8; 4] {}
|
||||
impl AsFixedBytes for [u8; 8] {}
|
||||
impl AsFixedBytes for [u8; 32] {}
|
||||
impl AsFixedBytes for String {}
|
||||
impl AsFixedBytes for ::core::hash::Hash {}
|
||||
impl AsFixedBytes for ::secp::pedersen::RangeProof {}
|
||||
impl AsFixedBytes for ::secp::key::SecretKey {}
|
||||
impl AsFixedBytes for ::secp::Signature {}
|
||||
impl AsFixedBytes for ::secp::pedersen::Commitment {}
|
||||
impl<'a> AsFixedBytes for &'a [u8] {
|
||||
fn len(&self) -> usize {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for Vec<u8> {
|
||||
fn len(&self) -> usize {
|
||||
return self.len();
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for [u8; 1] {
|
||||
fn len(&self) -> usize {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for [u8; 2] {
|
||||
fn len(&self) -> usize {
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for [u8; 4] {
|
||||
fn len(&self) -> usize {
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for [u8; 8] {
|
||||
fn len(&self) -> usize {
|
||||
return 8;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for [u8; 32] {
|
||||
fn len(&self) -> usize {
|
||||
return 32;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for String {
|
||||
fn len(&self) -> usize {
|
||||
return self.len();
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for ::core::hash::Hash {
|
||||
fn len(&self) -> usize {
|
||||
return 32;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for ::secp::pedersen::RangeProof {
|
||||
fn len(&self) -> usize {
|
||||
return self.plen;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for ::secp::key::SecretKey {
|
||||
fn len(&self) -> usize {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for ::secp::Signature {
|
||||
fn len(&self) -> usize {
|
||||
return 64;
|
||||
}
|
||||
}
|
||||
impl AsFixedBytes for ::secp::pedersen::Commitment {
|
||||
fn len(&self) -> usize {
|
||||
return PEDERSEN_COMMITMENT_SIZE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,6 +59,14 @@ enable_mining = true
|
|||
|
||||
use_cuckoo_miner = true
|
||||
|
||||
#Whether to use async mode for cuckoo miner, if the plugin supports it.
|
||||
#this allows for many searches to be run in parallel, e.g. if the system
|
||||
#has multiple GPUs. This creates overhead, especially on faster test miners,
|
||||
#so in a post-release world this should only be used if you really want
|
||||
#to run cards in parallel
|
||||
|
||||
cuckoo_miner_async_mode = false
|
||||
|
||||
#If using cuckoo_miner, the directory in which plugins are installed
|
||||
#if not specified, grin will look in the directory /deps relative
|
||||
#to the executable
|
||||
|
|
|
@ -15,7 +15,8 @@ grin_util = { path = "../util" }
|
|||
grin_wallet = { path = "../wallet" }
|
||||
secp256k1zkp = { path = "../secp256k1zkp" }
|
||||
|
||||
cuckoo_miner = { git = "https://github.com/mimblewimble/cuckoo-miner", tag="grin_integration_2"}
|
||||
cuckoo_miner = { git = "https://github.com/mimblewimble/cuckoo-miner", tag="grin_integration_4"}
|
||||
#cuckoo_miner = { path = "../../cuckoo-miner"}
|
||||
|
||||
blake2-rfc = "~0.2.17"
|
||||
env_logger="^0.3.5"
|
||||
|
@ -29,4 +30,5 @@ serde_derive = "~1.0.8"
|
|||
tokio-core="^0.1.1"
|
||||
tokio-timer="^0.1.0"
|
||||
rand = "^0.3"
|
||||
lazy_static = "0.2.8"
|
||||
lazy_static = "~0.2.8"
|
||||
itertools = "~0.6.0"
|
||||
|
|
|
@ -36,6 +36,7 @@ extern crate tokio_core;
|
|||
extern crate tokio_timer;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate itertools;
|
||||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_chain as chain;
|
||||
|
|
|
@ -19,7 +19,7 @@ use rand::{self, Rng};
|
|||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread;
|
||||
use std;
|
||||
use std::env;
|
||||
use std::{env, str};
|
||||
use time;
|
||||
|
||||
use adapters::{ChainToPoolAndNetAdapter, PoolToChainAdapter};
|
||||
|
@ -30,19 +30,85 @@ use core::core;
|
|||
use core::core::Proof;
|
||||
use core::pow::cuckoo;
|
||||
use core::core::target::Difficulty;
|
||||
use core::core::{Block, BlockHeader};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::pow::MiningWorker;
|
||||
use core::ser;
|
||||
use core::ser::{Writer, Writeable, AsFixedBytes};
|
||||
|
||||
use chain;
|
||||
use secp;
|
||||
use pool;
|
||||
use types::{MinerConfig, Error};
|
||||
use types::{MinerConfig, ServerConfig, Error};
|
||||
use util;
|
||||
use wallet::{CbAmount, WalletReceiveRequest, CbData};
|
||||
|
||||
use plugin::PluginMiner;
|
||||
use itertools::Itertools;
|
||||
|
||||
// Max number of transactions this miner will assemble in a block
|
||||
const MAX_TX: u32 = 5000;
|
||||
|
||||
const PRE_NONCE_SIZE: usize = 113;
|
||||
const POST_NONCE_SIZE: usize = 5;
|
||||
|
||||
/// Serializer that outputs pre and post nonce portions of a block header
|
||||
/// which can then be sent off to miner to mutate at will
|
||||
pub struct HeaderPartWriter {
|
||||
//
|
||||
pub pre_nonce: Vec<u8>,
|
||||
// Post nonce is currently variable length
|
||||
// because of difficulty
|
||||
pub post_nonce: Vec<u8>,
|
||||
//which difficulty field we're on
|
||||
bytes_written: usize,
|
||||
writing_pre: bool,
|
||||
}
|
||||
|
||||
impl Default for HeaderPartWriter {
|
||||
fn default() -> HeaderPartWriter {
|
||||
HeaderPartWriter {
|
||||
bytes_written: 0,
|
||||
writing_pre: true,
|
||||
pre_nonce: Vec::new(),
|
||||
post_nonce: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeaderPartWriter {
|
||||
pub fn parts_as_hex_strings(&self)->(String, String) {
|
||||
(
|
||||
String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
|
||||
String::from(format!("{:02x}", self.post_nonce.iter().format(""))),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ser::Writer for HeaderPartWriter {
|
||||
fn serialization_mode(&self) -> ser::SerializationMode {
|
||||
ser::SerializationMode::Hash
|
||||
}
|
||||
|
||||
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
|
||||
if self.writing_pre {
|
||||
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])};
|
||||
|
||||
} else if self.bytes_written!=0 {
|
||||
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])};
|
||||
}
|
||||
|
||||
self.bytes_written+=bytes_in.len();
|
||||
|
||||
if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre {
|
||||
self.writing_pre=false;
|
||||
self.bytes_written=0;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Miner {
|
||||
config: MinerConfig,
|
||||
chain: Arc<chain::Chain>,
|
||||
|
@ -75,12 +141,133 @@ impl Miner {
|
|||
self.debug_output_id=debug_output_id;
|
||||
}
|
||||
|
||||
/// Inner part of the mining loop for cuckoo-miner asynch mode
|
||||
pub fn inner_loop_async(&self, plugin_miner:&mut PluginMiner,
|
||||
difficulty:Difficulty,
|
||||
b:&mut Block,
|
||||
cuckoo_size: u32,
|
||||
head:&BlockHeader,
|
||||
latest_hash:&Hash)
|
||||
-> Option<Proof> {
|
||||
|
||||
debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs at height {} and difficulty {}.",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
b.header.height,
|
||||
b.header.difficulty);
|
||||
|
||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// Will change this to something else at some point
|
||||
let deadline = time::get_time().sec + 2;
|
||||
|
||||
//Get parts of the header
|
||||
let mut header_parts = HeaderPartWriter::default();
|
||||
ser::Writeable::write(&b.header, &mut header_parts).unwrap();
|
||||
let (pre, post) = header_parts.parts_as_hex_strings();
|
||||
|
||||
//Start the miner working
|
||||
let miner = plugin_miner.get_consumable();
|
||||
let job_handle=miner.notify(1, &pre, &post, difficulty.into_num()).unwrap();
|
||||
|
||||
let mut sol=None;
|
||||
|
||||
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
||||
if let Some(s) = job_handle.get_solution() {
|
||||
sol = Some(Proof(s.solution_nonces));
|
||||
b.header.nonce=s.get_nonce_as_u64();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if sol==None {
|
||||
debug!("(Server ID: {}) No solution found after {} iterations, continuing...",
|
||||
self.debug_output_id,
|
||||
job_handle.get_hashes_since_last_call().unwrap())
|
||||
}
|
||||
|
||||
job_handle.stop_jobs();
|
||||
sol
|
||||
|
||||
}
|
||||
|
||||
/// The inner part of mining loop for synchronous mode
|
||||
pub fn inner_loop_sync<T: MiningWorker>(&self,
|
||||
miner:&mut T,
|
||||
difficulty:Difficulty,
|
||||
b:&mut Block,
|
||||
cuckoo_size: u32,
|
||||
head:&BlockHeader,
|
||||
latest_hash:&mut Hash)
|
||||
-> Option<Proof> {
|
||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
let deadline = time::get_time().sec + 2;
|
||||
|
||||
debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs on block {} at difficulty {}.",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
latest_hash,
|
||||
b.header.difficulty);
|
||||
let mut iter_count = 0;
|
||||
|
||||
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
||||
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
|
||||
self.debug_output_id,
|
||||
self.config.slow_down_in_millis.unwrap());
|
||||
}
|
||||
|
||||
let mut sol=None;
|
||||
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
||||
|
||||
let pow_hash = b.hash();
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
let proof_diff=proof.to_difficulty();
|
||||
/*debug!("(Server ID: {}) Header difficulty is: {}, Proof difficulty is: {}",
|
||||
self.debug_output_id,
|
||||
b.header.difficulty,
|
||||
proof_diff);*/
|
||||
|
||||
if proof_diff >= b.header.difficulty {
|
||||
sol = Some(proof);
|
||||
break;
|
||||
}
|
||||
}
|
||||
b.header.nonce += 1;
|
||||
*latest_hash = self.chain.head().unwrap().last_block_h;
|
||||
iter_count += 1;
|
||||
|
||||
//Artificial slow down
|
||||
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
||||
thread::sleep(std::time::Duration::from_millis(self.config.slow_down_in_millis.unwrap()));
|
||||
}
|
||||
}
|
||||
|
||||
if sol==None {
|
||||
debug!("(Server ID: {}) No solution found after {} iterations, continuing...",
|
||||
self.debug_output_id,
|
||||
iter_count)
|
||||
}
|
||||
|
||||
sol
|
||||
}
|
||||
|
||||
/// Starts the mining loop, building a new block on top of the existing
|
||||
/// chain anytime required and looking for PoW solution.
|
||||
pub fn run_loop<T: MiningWorker>(&self, mut miner:T, cuckoo_size:u32) {
|
||||
pub fn run_loop(&self,
|
||||
miner_config:MinerConfig,
|
||||
server_config:ServerConfig,
|
||||
cuckoo_size:u32) {
|
||||
|
||||
info!("(Server ID: {}) Starting miner loop.", self.debug_output_id);
|
||||
let mut plugin_miner=None;
|
||||
let mut miner=None;
|
||||
if miner_config.use_cuckoo_miner {
|
||||
plugin_miner = Some(PluginMiner::new(consensus::EASINESS, cuckoo_size));
|
||||
plugin_miner.as_mut().unwrap().init(miner_config.clone(),server_config);
|
||||
} else {
|
||||
miner = Some(cuckoo::Miner::new(consensus::EASINESS, cuckoo_size));
|
||||
}
|
||||
|
||||
let mut coinbase = self.get_coinbase();
|
||||
|
||||
loop {
|
||||
|
@ -89,46 +276,39 @@ impl Miner {
|
|||
let mut latest_hash = self.chain.head().unwrap().last_block_h;
|
||||
let mut b = self.build_block(&head, coinbase.clone());
|
||||
|
||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
let deadline = time::get_time().sec + 2;
|
||||
let mut sol = None;
|
||||
debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs on block {} at difficulty {}.",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
latest_hash,
|
||||
b.header.difficulty);
|
||||
let mut iter_count = 0;
|
||||
let mut sol=None;
|
||||
let mut use_async=false;
|
||||
if let Some(c)=self.config.cuckoo_miner_async_mode {
|
||||
if c {
|
||||
use_async=true;
|
||||
}
|
||||
}
|
||||
if let Some(mut p) = plugin_miner.as_mut() {
|
||||
if use_async {
|
||||
sol = self.inner_loop_async(&mut p,
|
||||
b.header.difficulty.clone(),
|
||||
&mut b,
|
||||
cuckoo_size,
|
||||
&head,
|
||||
&latest_hash);
|
||||
} else {
|
||||
sol = self.inner_loop_sync(p,
|
||||
b.header.difficulty.clone(),
|
||||
&mut b,
|
||||
cuckoo_size,
|
||||
&head,
|
||||
&mut latest_hash);
|
||||
}
|
||||
}
|
||||
if let Some(mut m) = miner.as_mut() {
|
||||
sol = self.inner_loop_sync(m,
|
||||
b.header.difficulty.clone(),
|
||||
&mut b,
|
||||
cuckoo_size,
|
||||
&head,
|
||||
&mut latest_hash);
|
||||
}
|
||||
|
||||
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
||||
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
|
||||
self.debug_output_id,
|
||||
self.config.slow_down_in_millis.unwrap());
|
||||
}
|
||||
while head.hash() == latest_hash && time::get_time().sec < deadline {
|
||||
let pow_hash = b.hash();
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
let proof_diff=proof.to_difficulty();
|
||||
/*debug!("(Server ID: {}) Header difficulty is: {}, Proof difficulty is: {}",
|
||||
self.debug_output_id,
|
||||
b.header.difficulty,
|
||||
proof_diff);*/
|
||||
|
||||
if proof_diff >= b.header.difficulty {
|
||||
sol = Some(proof);
|
||||
break;
|
||||
}
|
||||
}
|
||||
b.header.nonce += 1;
|
||||
latest_hash = self.chain.head().unwrap().last_block_h;
|
||||
iter_count += 1;
|
||||
|
||||
//Artificial slow down
|
||||
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
||||
thread::sleep(std::time::Duration::from_millis(self.config.slow_down_in_millis.unwrap()));
|
||||
}
|
||||
}
|
||||
|
||||
// if we found a solution, push our block out
|
||||
if let Some(proof) = sol {
|
||||
info!("(Server ID: {}) Found valid proof of work, adding block {}.",
|
||||
|
@ -146,11 +326,7 @@ impl Miner {
|
|||
} else {
|
||||
coinbase = self.get_coinbase();
|
||||
}
|
||||
} else {
|
||||
debug!("(Server ID: {}) No solution found after {} iterations, continuing...",
|
||||
self.debug_output_id,
|
||||
iter_count)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,14 +49,16 @@ lazy_static!{
|
|||
}
|
||||
|
||||
pub struct PluginMiner {
|
||||
miner:Option<CuckooMiner>,
|
||||
pub miner:Option<CuckooMiner>,
|
||||
last_solution: CuckooMinerSolution,
|
||||
config: CuckooMinerConfig,
|
||||
}
|
||||
|
||||
impl Default for PluginMiner {
|
||||
fn default() -> PluginMiner {
|
||||
PluginMiner {
|
||||
miner: None,
|
||||
config: CuckooMinerConfig::new(),
|
||||
last_solution: CuckooMinerSolution::new(),
|
||||
}
|
||||
}
|
||||
|
@ -131,16 +133,28 @@ impl PluginMiner {
|
|||
*loaded_config_ref=Some(config.clone());
|
||||
|
||||
//this will load the associated plugin
|
||||
let result=CuckooMiner::new(config);
|
||||
let result=CuckooMiner::new(config.clone());
|
||||
if let Err(e) = result {
|
||||
error!("Error initializing mining plugin: {:?}", e);
|
||||
error!("Accepted values are: {:?}", caps[0].parameters);
|
||||
panic!("Unable to init mining plugin.");
|
||||
}
|
||||
|
||||
|
||||
self.config=config.clone();
|
||||
self.miner=Some(result.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_consumable(&mut self)->CuckooMiner{
|
||||
|
||||
//this will load the associated plugin
|
||||
let result=CuckooMiner::new(self.config.clone());
|
||||
if let Err(e) = result {
|
||||
error!("Error initializing mining plugin: {:?}", e);
|
||||
panic!("Unable to init mining plugin.");
|
||||
}
|
||||
result.unwrap()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl MiningWorker for PluginMiner {
|
||||
|
|
|
@ -153,16 +153,7 @@ impl Server {
|
|||
miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port));
|
||||
let server_config = self.config.clone();
|
||||
thread::spawn(move || {
|
||||
if config.use_cuckoo_miner {
|
||||
let mut cuckoo_miner = PluginMiner::new(consensus::EASINESS,
|
||||
cuckoo_size);
|
||||
cuckoo_miner.init(config.clone(),server_config);
|
||||
miner.run_loop(cuckoo_miner, cuckoo_size);
|
||||
} else {
|
||||
let test_internal_miner = cuckoo::Miner::new(consensus::EASINESS, cuckoo_size);
|
||||
miner.run_loop(test_internal_miner, cuckoo_size);
|
||||
}
|
||||
|
||||
miner.run_loop(config.clone(), server_config, cuckoo_size);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,9 @@ pub struct MinerConfig {
|
|||
/// Whether to use the cuckoo-miner crate and plugin for mining
|
||||
pub use_cuckoo_miner: bool,
|
||||
|
||||
/// Whether to use the async version of mining
|
||||
pub cuckoo_miner_async_mode: Option<bool>,
|
||||
|
||||
/// The location in which cuckoo miner plugins are stored
|
||||
pub cuckoo_miner_plugin_dir: Option<String>,
|
||||
|
||||
|
@ -150,6 +153,7 @@ impl Default for MinerConfig {
|
|||
MinerConfig {
|
||||
enable_mining: false,
|
||||
use_cuckoo_miner: false,
|
||||
cuckoo_miner_async_mode: None,
|
||||
cuckoo_miner_plugin_dir: None,
|
||||
cuckoo_miner_plugin_type: None,
|
||||
cuckoo_miner_parameter_list: None,
|
||||
|
|
|
@ -228,6 +228,7 @@ impl LocalServerContainer {
|
|||
enable_mining: self.config.start_miner,
|
||||
burn_reward: self.config.burn_mining_rewards,
|
||||
use_cuckoo_miner: true,
|
||||
cuckoo_miner_async_mode: Some(false),
|
||||
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
|
||||
cuckoo_miner_plugin_type: Some(String::from("simple")),
|
||||
wallet_receiver_url : self.config.coinbase_wallet_address.clone(),
|
||||
|
|
|
@ -138,8 +138,7 @@ fn simulate_parallel_mining(){
|
|||
let mut pool_config = LocalServerContainerPoolConfig::default();
|
||||
pool_config.base_name = String::from(test_name_dir);
|
||||
pool_config.run_length_in_seconds = 60;
|
||||
|
||||
//have to select different ports because of tests being run in parallel
|
||||
//have to select different ports because of tests being run in parallel
|
||||
pool_config.base_api_port=30040;
|
||||
pool_config.base_p2p_port=31040;
|
||||
pool_config.base_wallet_port=32040;
|
||||
|
@ -178,12 +177,11 @@ fn simulate_parallel_mining(){
|
|||
}
|
||||
|
||||
//TODO: Convert these tests to newer framework format
|
||||
|
||||
/// Create a network of 5 servers and mine a block, verifying that the block
|
||||
/// gets propagated to all.
|
||||
|
||||
#[test]
|
||||
fn simulate_block_propagation() {
|
||||
fn a_simulate_block_propagation() {
|
||||
env_logger::init();
|
||||
|
||||
let test_name_dir="test_servers/grin-prop";
|
||||
|
@ -196,6 +194,7 @@ fn simulate_block_propagation() {
|
|||
enable_mining: true,
|
||||
burn_reward: true,
|
||||
use_cuckoo_miner: true,
|
||||
cuckoo_miner_async_mode: None,
|
||||
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
|
||||
cuckoo_miner_plugin_type: Some(String::from("simple")),
|
||||
..Default::default()
|
||||
|
@ -206,9 +205,9 @@ fn simulate_block_propagation() {
|
|||
for n in 0..5 {
|
||||
let s = grin::Server::future(
|
||||
grin::ServerConfig{
|
||||
api_http_addr: format!("127.0.0.1:{}", 20000+n),
|
||||
api_http_addr: format!("127.0.0.1:{}", 19000+n),
|
||||
db_root: format!("target/{}/grin-prop-{}", test_name_dir, n),
|
||||
p2p_config: Some(p2p::P2PConfig{port: 10000+n, ..p2p::P2PConfig::default()}),
|
||||
p2p_config: Some(p2p::P2PConfig{port: 18000+n, ..p2p::P2PConfig::default()}),
|
||||
..Default::default()
|
||||
}, &handle).unwrap();
|
||||
servers.push(s);
|
||||
|
@ -218,7 +217,7 @@ fn simulate_block_propagation() {
|
|||
for n in 0..5 {
|
||||
for m in 0..5 {
|
||||
if m == n { continue }
|
||||
let addr = format!("{}:{}", "127.0.0.1", 10000+m);
|
||||
let addr = format!("{}:{}", "127.0.0.1", 18000+m);
|
||||
servers[n].connect_peer(addr.parse().unwrap()).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -255,6 +254,7 @@ fn simulate_full_sync() {
|
|||
enable_mining: true,
|
||||
burn_reward: true,
|
||||
use_cuckoo_miner: true,
|
||||
cuckoo_miner_async_mode: Some(false),
|
||||
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
|
||||
cuckoo_miner_plugin_type: Some(String::from("simple")),
|
||||
..Default::default()
|
||||
|
|
|
@ -6,13 +6,6 @@ workspace = ".."
|
|||
|
||||
[dependencies]
|
||||
byteorder = "^0.5"
|
||||
rocksdb = "^0.6.0"
|
||||
## When using GCC 7, the rust-rocksdb dependency doesn't compile
|
||||
## To get around this (temporarily) clone (beside the 'grin' directory)
|
||||
## https://github.com/spacejam/rust-rocksdb.git
|
||||
## Manually apply the changes in:
|
||||
## https://github.com/facebook/rocksdb/commit/816c1e30ca73615c75fc208ddcc4b05012b30951
|
||||
## And swap the dependency for the one below
|
||||
#rocksdb = { path = "../../rust-rocksdb" }
|
||||
rocksdb = "^0.7.0"
|
||||
|
||||
grin_core = { path = "../core" }
|
||||
|
|
Loading…
Reference in a new issue