Cuckoo-miner update - Multiple Plugin mining (#139)

* Adding ability to load multiple mining plugins in parallel via cuckoo miner
* updating with newest cuckoo-miner changes
* revert default config value
* update pow document with link to podcast
This commit is contained in:
Yeastplume 2017-09-26 18:58:56 +01:00 committed by Ignotus Peverell
parent 7d48e1da49
commit 53d9ca630c
14 changed files with 1028 additions and 789 deletions

View file

@ -46,7 +46,7 @@ impl ApiEndpoint for ChainApi {
fn get(&self, _: String) -> ApiResult<Tip> { fn get(&self, _: String) -> ApiResult<Tip> {
match self.chain.head() { match self.chain.head() {
Ok(tip) => Ok(Tip::from_tip(tip)), Ok(tip) => Ok(Tip::from_tip(tip)),
Err(e) => Err(Error::Internal(format!("{:?}", e))) Err(e) => Err(Error::Internal(format!("{:?}", e))),
} }
} }
} }
@ -70,12 +70,14 @@ impl ApiEndpoint for OutputApi {
fn get(&self, id: String) -> ApiResult<Output> { fn get(&self, id: String) -> ApiResult<Output> {
debug!("GET output {}", id); debug!("GET output {}", id);
let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?; let c = util::from_hex(id.clone()).map_err(|_| {
Error::Argument(format!("Not a valid commitment: {}", id))
})?;
let commit = Commitment::from_vec(c); let commit = Commitment::from_vec(c);
let out = self.chain.get_unspent(&commit) let out = self.chain.get_unspent(&commit).map_err(|_| Error::NotFound)?;
.map_err(|_| Error::NotFound)?; let header = self.chain
let header = self.chain.get_block_header_by_output_commit(&commit) .get_block_header_by_output_commit(&commit)
.map_err(|_| Error::NotFound)?; .map_err(|_| Error::NotFound)?;
Ok(Output::from_output(&out, &header)) Ok(Output::from_output(&out, &header))
@ -90,7 +92,8 @@ pub struct PoolApi<T> {
} }
impl<T> ApiEndpoint for PoolApi<T> impl<T> ApiEndpoint for PoolApi<T>
where T: pool::BlockChain + Clone + Send + Sync + 'static where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{ {
type ID = String; type ID = String;
type T = PoolInfo; type T = PoolInfo;
@ -116,7 +119,9 @@ impl<T> ApiEndpoint for PoolApi<T>
})?; })?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| { let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument("Could not deserialize transaction, invalid format.".to_string()) Error::Argument(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?; })?;
let source = pool::TxSource { let source = pool::TxSource {
@ -148,20 +153,21 @@ pub struct TxWrapper {
/// Start all server REST APIs. Just register all of them on a ApiServer /// Start all server REST APIs. Just register all of them on a ApiServer
/// instance and runs the corresponding HTTP server. /// instance and runs the corresponding HTTP server.
pub fn start_rest_apis<T>(addr: String, pub fn start_rest_apis<T>(
addr: String,
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>) tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
where T: pool::BlockChain + Clone + Send + Sync + 'static ) where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{ {
thread::spawn(move || { thread::spawn(move || {
let mut apis = ApiServer::new("/v1".to_string()); let mut apis = ApiServer::new("/v1".to_string());
apis.register_endpoint("/chain".to_string(), apis.register_endpoint("/chain".to_string(), ChainApi { chain: chain.clone() });
ChainApi { chain: chain.clone() }); apis.register_endpoint(
apis.register_endpoint("/chain/utxo".to_string(), "/chain/utxo".to_string(),
OutputApi { OutputApi { chain: chain.clone() },
chain: chain.clone(), );
});
apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool }); apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool });
apis.start(&addr[..]).unwrap_or_else(|e| { apis.start(&addr[..]).unwrap_or_else(|e| {

View file

@ -22,9 +22,7 @@ use std::fs::File;
use toml; use toml;
use grin::ServerConfig; use grin::ServerConfig;
use pow::types::MinerConfig; use pow::types::MinerConfig;
use types::{ConfigMembers, use types::{ConfigMembers, GlobalConfig, ConfigError};
GlobalConfig,
ConfigError};
/// The default file name to use when trying to derive /// The default file name to use when trying to derive
/// the config file location /// the config file location
@ -39,7 +37,6 @@ impl Default for ConfigMembers {
ConfigMembers { ConfigMembers {
server: ServerConfig::default(), server: ServerConfig::default(),
mining: Some(MinerConfig::default()), mining: Some(MinerConfig::default()),
//wallet: Some(WalletConfig::default()),
} }
} }
} }
@ -49,13 +46,12 @@ impl Default for GlobalConfig {
GlobalConfig { GlobalConfig {
config_file_path: None, config_file_path: None,
using_config_file: false, using_config_file: false,
members: Some(ConfigMembers::default()) members: Some(ConfigMembers::default()),
} }
} }
} }
impl GlobalConfig { impl GlobalConfig {
/// Need to decide on rules where to read the config file from, /// Need to decide on rules where to read the config file from,
/// but will take a stab at logic for now /// but will take a stab at logic for now
@ -65,7 +61,7 @@ impl GlobalConfig {
config_path.push(CONFIG_FILE_NAME); config_path.push(CONFIG_FILE_NAME);
if config_path.exists() { if config_path.exists() {
self.config_file_path = Some(config_path); self.config_file_path = Some(config_path);
return Ok(()) return Ok(());
} }
// Next, look in directory of executable // Next, look in directory of executable
let mut config_path = env::current_exe().unwrap(); let mut config_path = env::current_exe().unwrap();
@ -73,7 +69,7 @@ impl GlobalConfig {
config_path.push(CONFIG_FILE_NAME); config_path.push(CONFIG_FILE_NAME);
if config_path.exists() { if config_path.exists() {
self.config_file_path = Some(config_path); self.config_file_path = Some(config_path);
return Ok(()) return Ok(());
} }
// Then look in {user_home}/.grin // Then look in {user_home}/.grin
let config_path = env::home_dir(); let config_path = env::home_dir();
@ -82,7 +78,7 @@ impl GlobalConfig {
p.push(CONFIG_FILE_NAME); p.push(CONFIG_FILE_NAME);
if p.exists() { if p.exists() {
self.config_file_path = Some(p); self.config_file_path = Some(p);
return Ok(()) return Ok(());
} }
} }
@ -110,10 +106,15 @@ impl GlobalConfig {
// Config file path is given but not valid // Config file path is given but not valid
if !return_value.config_file_path.as_mut().unwrap().exists() { if !return_value.config_file_path.as_mut().unwrap().exists() {
return Err( return Err(ConfigError::FileNotFoundError(String::from(
ConfigError::FileNotFoundError(String::from(return_value.config_file_path.as_mut() return_value
.unwrap().to_str().unwrap().clone())) .config_file_path
); .as_mut()
.unwrap()
.to_str()
.unwrap()
.clone(),
)));
} }
// Try to parse the config file if it exists // Try to parse the config file if it exists
@ -135,32 +136,34 @@ impl GlobalConfig {
gc.server.mining_config = gc.mining.clone(); gc.server.mining_config = gc.mining.clone();
self.using_config_file = true; self.using_config_file = true;
self.members = Some(gc); self.members = Some(gc);
return Ok(self) return Ok(self);
}, }
Err(e) => { Err(e) => {
return Err( return Err(ConfigError::ParseError(
ConfigError::ParseError(String::from(self.config_file_path.as_mut() String::from(
.unwrap().to_str().unwrap().clone()), self.config_file_path
String::from(format!("{}", e)) .as_mut()
) .unwrap()
); .to_str()
.unwrap()
.clone(),
),
String::from(format!("{}", e)),
));
} }
} }
} }
/// Serialize config /// Serialize config
pub fn ser_config(&mut self) -> Result<String, ConfigError> { pub fn ser_config(&mut self) -> Result<String, ConfigError> {
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap()); let encoded: Result<String, toml::ser::Error> =
toml::to_string(self.members.as_mut().unwrap());
match encoded { match encoded {
Ok(enc) => { Ok(enc) => return Ok(enc),
return Ok(enc)
},
Err(e) => { Err(e) => {
return Err( return Err(ConfigError::SerializationError(
ConfigError::SerializationError( String::from(format!("{}", e)),
String::from(format!("{}", e)) ));
)
);
} }
} }
} }
@ -171,7 +174,13 @@ impl GlobalConfig {
/// Enable mining /// Enable mining
pub fn mining_enabled(&mut self) -> bool { pub fn mining_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining; return self.members
.as_mut()
.unwrap()
.mining
.as_mut()
.unwrap()
.enable_mining;
} }
} }

View file

@ -41,7 +41,12 @@ impl fmt::Display for ConfigError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
ConfigError::ParseError(ref file_name, ref message) => { ConfigError::ParseError(ref file_name, ref message) => {
write!(f, "Error parsing configuration file at {} - {}",file_name, message) write!(
f,
"Error parsing configuration file at {} - {}",
file_name,
message
)
} }
ConfigError::FileIOError(ref file_name, ref message) => { ConfigError::FileIOError(ref file_name, ref message) => {
write!(f, "{} {}", message, file_name) write!(f, "{} {}", message, file_name)
@ -72,7 +77,6 @@ impl From<io::Error> for ConfigError {
/// as they tend to be quite nested in the code /// as they tend to be quite nested in the code
/// Most structs optional, as they may or may not /// Most structs optional, as they may or may not
/// be needed depending on what's being run /// be needed depending on what's being run
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct GlobalConfig { pub struct GlobalConfig {
/// Keep track of the file we've read /// Keep track of the file we've read
@ -89,7 +93,6 @@ pub struct GlobalConfig {
/// level GlobalConfigContainer options might want to keep /// level GlobalConfigContainer options might want to keep
/// internal state that we don't necessarily /// internal state that we don't necessarily
/// want serialised or deserialised /// want serialised or deserialised
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct ConfigMembers { pub struct ConfigMembers {
/// Server config /// Server config

View file

@ -56,7 +56,7 @@ from the build directory will run grin using the defaults in the grin.toml file,
For the time being, it's recommended just to put the built version of grin on your path, e.g. via: For the time being, it's recommended just to put the built version of grin on your path, e.g. via:
``` ```
export $PATH /path/to/grin/dir/target/grin:$PATH export $PATH=/path/to/grin/dir/target/grin:$PATH
``` ```
# Configuration # Configuration

View file

@ -1,8 +1,6 @@
Grin's Proof-of-Work Grin's Proof-of-Work
==================== ====================
[WIP and subject to review, may still contain errors]
This document is meant to outline, at a level suitable for someone without prior knowledge, This document is meant to outline, at a level suitable for someone without prior knowledge,
the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start
with a general overview of cycles in a graph and the Cuckoo Cycle algorithm which forms the with a general overview of cycles in a graph and the Cuckoo Cycle algorithm which forms the
@ -16,8 +14,8 @@ Please note that Grin is currently under active development, and any and all of
Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed
to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm, to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm,
which, (at least in theory,) means that solution time is limited to the speed of a system's RAM which, (at least in theory,) means that solution time is bound by memory bandwidth
rather than processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on rather than raw processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on
most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound
proof of work algorithms. proof of work algorithms.
@ -26,11 +24,16 @@ can be found in [his github repository](https://github.com/tromp/cuckoo). The
[white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of [white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of
further technical details. further technical details.
There is also a [podcast with Mike from Monero Monitor](https://moneromonitor.com/episodes/2017-09-26-Episode-014.html)
in which John Tromp talks at length about Cuckoo Cycle; recommended listening for anyone wanting
more background on Cuckoo Cycle, including more technical detail, the history of the algorihm's development
and some of the motivations behind it.
## Cycles in a Graph ## Cycles in a Graph
Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes
and M edges. In plainer terms, a bipartite graph is one in which edges (i.e. lines connecting nodes) and M edges. In plainer terms, a bipartite graph is one in which edges (i.e. lines connecting nodes)
go only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle, travel only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle,
one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even
indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a
line connecting two nodes on opposite sides. The simple graph below denotes just such a graph, line connecting two nodes on opposite sides. The simple graph below denotes just such a graph,

View file

@ -34,7 +34,7 @@ seeding_type = "None"
#UserTesting - For regular user testing, much lighter than production more #UserTesting - For regular user testing, much lighter than production more
#Production - Full production cuckoo parameters #Production - Full production cuckoo parameters
mining_parameter_mode = "UserTesting" mining_parameter_mode = "Production"
#7 = Bit flags for FULL_NODE, this structure needs to be changed #7 = Bit flags for FULL_NODE, this structure needs to be changed
#internally to make it more configurable #internally to make it more configurable
@ -74,27 +74,10 @@ cuckoo_miner_async_mode = false
#cuckoo_miner_plugin_dir = "target/debug/plugins" #cuckoo_miner_plugin_dir = "target/debug/plugins"
#if using cuckoo_miner, the implementation to use.. currently
#just filters for this word in the filenames in the plugin
#directory
#Plugins currently included are:
#"simple" : the basic cuckoo algorithm
#"edgetrim" : an algorithm trading speed for a much lower memory footprint
#"matrix" : fastest available CPU miner, with largest memory footprint
#"tomato" : Time memory-tradeoff... low memory but very slow
#Not included but verified working:
#"cuda" a gpu miner - which currently needs to bebuilt and installed
#separately from#the cuckoo-miner repository. Instructions found there
cuckoo_miner_plugin_type = "simple"
#the list of parameters if you're using "edgetrim or matrix"
#cuckoo_miner_parameter_list = {NUM_THREADS=4, NUM_TRIMS=7}
#The amount of time, in seconds, to attempt to mine on a particular #The amount of time, in seconds, to attempt to mine on a particular
#header before stopping and re-collecting transactions from the pool #header before stopping and re-collecting transactions from the pool
attempt_time_per_block = 30 attempt_time_per_block = 90
#the wallet reciever to which coinbase rewards will be sent #the wallet reciever to which coinbase rewards will be sent
@ -106,3 +89,40 @@ burn_reward = true
#testing value, optional #testing value, optional
#slow_down_in_millis = 30 #slow_down_in_millis = 30
#########################################
### CUCKOO MINER PLUGIN CONFIGURATION ###
#########################################
# These entries configure instances of cuckoo miner
# plugins if the 'use_cuckoo_miner' value above is
# set to 'true'.
#
# Multiple plugins can be specified, (e.g. a cpu
# miner and a gpu miner running in parallel). However,
# if 'use_async_mode' above is set to 'false', only
# the first plugin specified will be used for mining
# in single-threaded mode
# You'll likely get the best performance using a
# single GPU and single CPU plugin in parallel
#The fastest cpu algorithm, but consumes the most memory
[[mining.cuckoo_miner_plugin_config]]
type_filter = "mean_cpu"
parameter_list = {NUM_THREADS=4, NUM_TRIMS=64}
#note lean_cpu currently has a bug which prevents it from
#working with threads > 1
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cpu"
#parameter_list = {NUM_THREADS=1, NUM_TRIMS=7}
#CUDA verion of lean miner
#Can currently be used only in Production (30) Mode
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cuda"
#parameter_list = {}

View file

@ -19,10 +19,10 @@ use rand::{self, Rng};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread; use std::thread;
use std; use std;
use std::{str}; use std::str;
use time; use time;
use adapters::{PoolToChainAdapter}; use adapters::PoolToChainAdapter;
use api; use api;
use core::consensus; use core::consensus;
use core::core; use core::core;
@ -34,7 +34,7 @@ use core::core::hash::{Hash, Hashed};
use pow::MiningWorker; use pow::MiningWorker;
use pow::types::MinerConfig; use pow::types::MinerConfig;
use core::ser; use core::ser;
use core::ser::{AsFixedBytes}; use core::ser::AsFixedBytes;
// use core::genesis; // use core::genesis;
@ -94,10 +94,14 @@ impl ser::Writer for HeaderPartWriter {
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> { fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
if self.writing_pre { if self.writing_pre {
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])}; for i in 0..bytes_in.len() {
self.pre_nonce.push(bytes_in.as_ref()[i])
}
} else if self.bytes_written != 0 { } else if self.bytes_written != 0 {
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])}; for i in 0..bytes_in.len() {
self.post_nonce.push(bytes_in.as_ref()[i])
}
} }
self.bytes_written += bytes_in.len(); self.bytes_written += bytes_in.len();
@ -124,10 +128,11 @@ pub struct Miner {
impl Miner { impl Miner {
/// Creates a new Miner. Needs references to the chain state and its /// Creates a new Miner. Needs references to the chain state and its
/// storage. /// storage.
pub fn new(config: MinerConfig, pub fn new(
config: MinerConfig,
chain_ref: Arc<chain::Chain>, chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>) tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
-> Miner { ) -> Miner {
Miner { Miner {
config: config, config: config,
chain: chain_ref, chain: chain_ref,
@ -143,25 +148,29 @@ impl Miner {
self.debug_output_id = debug_output_id; self.debug_output_id = debug_output_id;
} }
/// Inner part of the mining loop for cuckoo-miner asynch mode /// Inner part of the mining loop for cuckoo-miner async mode
pub fn inner_loop_async(&self, pub fn inner_loop_async(
&self,
plugin_miner: &mut PluginMiner, plugin_miner: &mut PluginMiner,
difficulty: Difficulty, difficulty: Difficulty,
b: &mut Block, b: &mut Block,
cuckoo_size: u32, cuckoo_size: u32,
head: &BlockHeader, head: &BlockHeader,
latest_hash: &Hash, latest_hash: &Hash,
attempt_time_per_block: u32) attempt_time_per_block: u32,
-> Option<Proof> { ) -> Option<Proof> {
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.", debug!(
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
self.debug_output_id, self.debug_output_id,
cuckoo_size, cuckoo_size,
attempt_time_per_block, attempt_time_per_block,
b.header.height, b.header.height,
b.header.difficulty); b.header.difficulty
);
// look for a pow for at most 10 sec on the same block (to give a chance to new // look for a pow for at most attempt_time_per_block sec on the
// same block (to give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
// Will change this to something else at some point // Will change this to something else at some point
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
@ -192,31 +201,44 @@ impl Miner {
if let Some(s) = job_handle.get_solution() { if let Some(s) = job_handle.get_solution() {
sol = Some(Proof::new(s.solution_nonces.to_vec())); sol = Some(Proof::new(s.solution_nonces.to_vec()));
b.header.nonce = s.get_nonce_as_u64(); b.header.nonce = s.get_nonce_as_u64();
println!("Nonce: {}", b.header.nonce); // debug!("Nonce: {}", b.header.nonce);
break; break;
} }
if time::get_time().sec > next_stat_output { if time::get_time().sec > next_stat_output {
let stats = job_handle.get_stats(); let mut sps_total = 0.0;
for i in 0..plugin_miner.loaded_plugin_count() {
let stats = job_handle.get_stats(i);
if let Ok(stat_vec) = stats { if let Ok(stat_vec) = stats {
for s in stat_vec { for s in stat_vec {
if s.last_start_time==0 {
continue;
}
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0; let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs; let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!("Mining on Device {} - {}: Last hash time: {} - Hashes per second: {:.*} - Total Attempts: {}", debug!(
s.device_id, s.device_name, "Mining: Plugin {} - Device {} ({}): Last Solution time: {}s; \
last_solution_time_secs, 3, last_hashes_per_sec, Solutions per second: {:.*} - Total Attempts: {}",
s.iterations_completed); i,
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec,
s.iterations_completed
);
if last_hashes_per_sec.is_finite() {
sps_total += last_hashes_per_sec;
} }
} }
}
debug!("Total solutions per second: {}", sps_total);
next_stat_output = time::get_time().sec + stat_output_interval; next_stat_output = time::get_time().sec + stat_output_interval;
} }
} }
}
if sol == None { if sol == None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...", debug!(
"(Server ID: {}) No solution found after {} seconds, continuing...",
self.debug_output_id, self.debug_output_id,
job_handle.get_hashes_since_last_call().unwrap()) attempt_time_per_block
);
} }
job_handle.stop_jobs(); job_handle.stop_jobs();
@ -224,31 +246,125 @@ impl Miner {
} }
/// The inner part of mining loop for synchronous mode /// The inner part of mining loop for cuckoo miner sync mode
pub fn inner_loop_sync<T: MiningWorker>(&self, pub fn inner_loop_sync_plugin(
&self,
plugin_miner: &mut PluginMiner,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
let stat_check_interval = 3;
let mut next_stat_check = time::get_time().sec + stat_check_interval;
debug!(
"(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
latest_hash,
b.header.difficulty
);
let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!(
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
);
}
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
if proof_diff >= b.header.difficulty {
sol = Some(proof);
break;
}
}
if time::get_time().sec >= next_stat_check {
let stats_vec = plugin_miner.get_stats(0).unwrap();
for s in stats_vec.into_iter() {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
println!(
"Plugin 0 - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec
);
}
next_stat_check = time::get_time().sec + stat_check_interval;
}
b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1;
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
}
}
if sol == None {
debug!(
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
)
}
sol
}
/// The inner part of mining loop for the internal miner
pub fn inner_loop_sync_internal<T: MiningWorker>(
&self,
miner: &mut T, miner: &mut T,
b: &mut Block, b: &mut Block,
cuckoo_size: u32, cuckoo_size: u32,
head: &BlockHeader, head: &BlockHeader,
attempt_time_per_block: u32, attempt_time_per_block: u32,
latest_hash:&mut Hash) latest_hash: &mut Hash,
-> Option<Proof> { ) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new // look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.", debug!(
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
self.debug_output_id, self.debug_output_id,
cuckoo_size, cuckoo_size,
attempt_time_per_block, attempt_time_per_block,
latest_hash, latest_hash,
b.header.difficulty); b.header.difficulty
);
let mut iter_count = 0; let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 { if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.", debug!(
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id, self.debug_output_id,
self.config.slow_down_in_millis.unwrap()); self.config.slow_down_in_millis.unwrap()
);
} }
let mut sol = None; let mut sol = None;
@ -257,50 +373,56 @@ impl Miner {
let pow_hash = b.hash(); let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) { if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
/*debug!("(Server ID: {}) Header difficulty is: {}, Proof difficulty is: {}",
self.debug_output_id,
b.header.difficulty,
proof_diff);*/
if proof_diff >= b.header.difficulty { if proof_diff >= b.header.difficulty {
sol = Some(proof); sol = Some(proof);
break; break;
} }
} }
b.header.nonce += 1; b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h; *latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1; iter_count += 1;
// Artificial slow down // Artificial slow down
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 { if self.config.slow_down_in_millis != None &&
thread::sleep(std::time::Duration::from_millis(self.config.slow_down_in_millis.unwrap())); self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
} }
} }
if sol == None { if sol == None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...", debug!(
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id, self.debug_output_id,
iter_count) iter_count
)
} }
sol sol
} }
/// Starts the mining loop, building a new block on top of the existing /// Starts the mining loop, building a new block on top of the existing
/// chain anytime required and looking for PoW solution. /// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
miner_config:MinerConfig,
cuckoo_size:u32,
proof_size:usize) {
info!("(Server ID: {}) Starting miner loop.", self.debug_output_id); info!("(Server ID: {}) Starting miner loop.", self.debug_output_id);
let mut plugin_miner = None; let mut plugin_miner = None;
let mut miner = None; let mut miner = None;
if miner_config.use_cuckoo_miner { if miner_config.use_cuckoo_miner {
plugin_miner = Some(PluginMiner::new(consensus::EASINESS, cuckoo_size, proof_size)); plugin_miner = Some(PluginMiner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
plugin_miner.as_mut().unwrap().init(miner_config.clone()); plugin_miner.as_mut().unwrap().init(miner_config.clone());
} else { } else {
miner = Some(cuckoo::Miner::new(consensus::EASINESS, cuckoo_size, proof_size)); miner = Some(cuckoo::Miner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
} }
let mut coinbase = self.get_coinbase(); let mut coinbase = self.get_coinbase();
@ -320,35 +442,44 @@ impl Miner {
} }
if let Some(mut p) = plugin_miner.as_mut() { if let Some(mut p) = plugin_miner.as_mut() {
if use_async { if use_async {
sol = self.inner_loop_async(&mut p, sol = self.inner_loop_async(
&mut p,
b.header.difficulty.clone(), b.header.difficulty.clone(),
&mut b, &mut b,
cuckoo_size, cuckoo_size,
&head, &head,
&latest_hash, &latest_hash,
miner_config.attempt_time_per_block); miner_config.attempt_time_per_block,
);
} else { } else {
sol = self.inner_loop_sync(p, sol = self.inner_loop_sync_plugin(
p,
&mut b, &mut b,
cuckoo_size, cuckoo_size,
&head, &head,
miner_config.attempt_time_per_block, miner_config.attempt_time_per_block,
&mut latest_hash); &mut latest_hash,
);
} }
} }
if let Some(mut m) = miner.as_mut() { if let Some(mut m) = miner.as_mut() {
sol = self.inner_loop_sync(m, sol = self.inner_loop_sync_internal(
m,
&mut b, &mut b,
cuckoo_size, cuckoo_size,
&head, &head,
miner_config.attempt_time_per_block, miner_config.attempt_time_per_block,
&mut latest_hash); &mut latest_hash,
);
} }
// if we found a solution, push our block out // if we found a solution, push our block out
if let Some(proof) = sol { if let Some(proof) = sol {
info!("(Server ID: {}) Found valid proof of work, adding block {}.", info!(
self.debug_output_id, b.hash()); "(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
);
b.header.pow = proof; b.header.pow = proof;
let opts = if cuckoo_size < consensus::DEFAULT_SIZESHIFT as u32 { let opts = if cuckoo_size < consensus::DEFAULT_SIZESHIFT as u32 {
chain::EASY_POW chain::EASY_POW
@ -357,8 +488,11 @@ impl Miner {
}; };
let res = self.chain.process_block(b, opts); let res = self.chain.process_block(b, opts);
if let Err(e) = res { if let Err(e) = res {
error!("(Server ID: {}) Error validating mined block: {:?}", error!(
self.debug_output_id, e); "(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
);
} else { } else {
coinbase = self.get_coinbase(); coinbase = self.get_coinbase();
} }
@ -368,10 +502,11 @@ impl Miner {
/// Builds a new block with the chain head as previous and eligible /// Builds a new block with the chain head as previous and eligible
/// transactions from the pool. /// transactions from the pool.
fn build_block(&self, fn build_block(
&self,
head: &core::BlockHeader, head: &core::BlockHeader,
coinbase: (core::Output, core::TxKernel)) coinbase: (core::Output, core::TxKernel),
-> core::Block { ) -> core::Block {
let mut now_sec = time::get_time().sec; let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec; let head_sec = head.timestamp.to_timespec().sec;
if now_sec == head_sec { if now_sec == head_sec {
@ -381,15 +516,19 @@ impl Miner {
let diff_iter = self.chain.difficulty_iter(); let diff_iter = self.chain.difficulty_iter();
let difficulty = consensus::next_difficulty(diff_iter).unwrap(); let difficulty = consensus::next_difficulty(diff_iter).unwrap();
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(MAX_TX); let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(
MAX_TX,
);
let txs = txs_box.iter().map(|tx| tx.as_ref()).collect(); let txs = txs_box.iter().map(|tx| tx.as_ref()).collect();
let (output, kernel) = coinbase; let (output, kernel) = coinbase;
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap(); let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
debug!("(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}", debug!(
"(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}",
self.debug_output_id, self.debug_output_id,
b.inputs.len(), b.inputs.len(),
b.outputs.len(), b.outputs.len(),
difficulty); difficulty
);
// making sure we're not spending time mining a useless block // making sure we're not spending time mining a useless block
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -409,13 +548,18 @@ impl Miner {
let skey = secp::key::SecretKey::new(&secp_inst, &mut rng); let skey = secp::key::SecretKey::new(&secp_inst, &mut rng);
core::Block::reward_output(skey, &secp_inst).unwrap() core::Block::reward_output(skey, &secp_inst).unwrap()
} else { } else {
let url = format!("{}/v1/receive/coinbase", let url = format!(
self.config.wallet_receiver_url.as_str()); "{}/v1/receive/coinbase",
self.config.wallet_receiver_url.as_str()
);
let request = WalletReceiveRequest::Coinbase(CbAmount { amount: consensus::REWARD }); let request = WalletReceiveRequest::Coinbase(CbAmount { amount: consensus::REWARD });
let res: CbData = api::client::post(url.as_str(), let res: CbData = api::client::post(url.as_str(), &request).expect(
&request) format!(
.expect(format!("(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?", "(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?",
self.debug_output_id.as_str()).as_str()); self.debug_output_id
.as_str()
).as_str(),
);
let out_bin = util::from_hex(res.output).unwrap(); let out_bin = util::from_hex(res.output).unwrap();
let kern_bin = util::from_hex(res.kernel).unwrap(); let kern_bin = util::from_hex(res.kernel).unwrap();
let output = ser::deserialize(&mut &out_bin[..]).unwrap(); let output = ser::deserialize(&mut &out_bin[..]).unwrap();

View file

@ -63,10 +63,8 @@ pub enum Error {
/// All-in-one server configuration struct, for convenience /// All-in-one server configuration struct, for convenience
/// ///
#[derive(Clone)] #[derive(Clone)]
pub struct LocalServerContainerConfig { pub struct LocalServerContainerConfig {
// user friendly name for the server, also denotes what dir // user friendly name for the server, also denotes what dir
// the data files will appear in // the data files will appear in
pub name: String, pub name: String,
@ -109,8 +107,6 @@ pub struct LocalServerContainerConfig {
// When running a wallet, the address to check inputs and send // When running a wallet, the address to check inputs and send
// finalised transactions to, // finalised transactions to,
pub wallet_validating_node_url: String, pub wallet_validating_node_url: String,
} }
/// Default server config /// Default server config
@ -139,7 +135,6 @@ impl Default for LocalServerContainerConfig {
/// on a server, i.e. server, wallet in send or receive mode /// on a server, i.e. server, wallet in send or receive mode
pub struct LocalServerContainer { pub struct LocalServerContainer {
// Configuration // Configuration
config: LocalServerContainerConfig, config: LocalServerContainerConfig,
@ -165,17 +160,17 @@ pub struct LocalServerContainer {
// base directory for the server instance // base directory for the server instance
working_dir: String, working_dir: String,
} }
impl LocalServerContainer { impl LocalServerContainer {
/// Create a new local server container with defaults, with the given name /// Create a new local server container with defaults, with the given name
/// all related files will be created in the directory target/test_servers/{name} /// all related files will be created in the directory
/// target/test_servers/{name}
pub fn new(config: LocalServerContainerConfig) -> Result<LocalServerContainer, Error> { pub fn new(config: LocalServerContainerConfig) -> Result<LocalServerContainer, Error> {
let working_dir = format!("target/test_servers/{}", config.name); let working_dir = format!("target/test_servers/{}", config.name);
Ok((LocalServerContainer { Ok(
(LocalServerContainer {
config: config, config: config,
p2p_server_stats: None, p2p_server_stats: None,
api_server: None, api_server: None,
@ -184,11 +179,11 @@ impl LocalServerContainer {
wallet_is_running: false, wallet_is_running: false,
working_dir: working_dir, working_dir: working_dir,
peer_list: Vec::new(), peer_list: Vec::new(),
})) }),
)
} }
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats {
{
let mut event_loop = reactor::Core::new().unwrap(); let mut event_loop = reactor::Core::new().unwrap();
let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port); let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port);
@ -205,11 +200,16 @@ impl LocalServerContainer {
grin::ServerConfig { grin::ServerConfig {
api_http_addr: api_addr, api_http_addr: api_addr,
db_root: format!("{}/.grin", self.working_dir), db_root: format!("{}/.grin", self.working_dir),
p2p_config: Some(p2p::P2PConfig{port: self.config.p2p_server_port, ..p2p::P2PConfig::default()}), p2p_config: Some(p2p::P2PConfig {
port: self.config.p2p_server_port,
..p2p::P2PConfig::default()
}),
seeds: Some(seeds), seeds: Some(seeds),
seeding_type: seeding_type, seeding_type: seeding_type,
..Default::default() ..Default::default()
}, &event_loop.handle()).unwrap(); },
&event_loop.handle(),
).unwrap();
self.p2p_server_stats = Some(s.get_server_stats().unwrap()); self.p2p_server_stats = Some(s.get_server_stats().unwrap());
@ -219,13 +219,18 @@ impl LocalServerContainer {
thread::sleep(time::Duration::from_millis(1000)); thread::sleep(time::Duration::from_millis(1000));
} }
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: self.config.start_miner, enable_mining: self.config.start_miner,
burn_reward: self.config.burn_mining_rewards, burn_reward: self.config.burn_mining_rewards,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false), cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
wallet_receiver_url: self.config.coinbase_wallet_address.clone(), wallet_receiver_url: self.config.coinbase_wallet_address.clone(),
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()), slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
..Default::default() ..Default::default()
@ -267,10 +272,14 @@ impl LocalServerContainer {
let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes()); let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
let s = Secp256k1::new(); let s = Secp256k1::new();
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()) let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()).expect(
.expect("Error deriving extended key from seed."); "Error deriving extended key from seed.",
);
println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port ); println!(
"Starting the Grin wallet receiving daemon on {} ",
self.config.wallet_port
);
let mut wallet_config = WalletConfig::default(); let mut wallet_config = WalletConfig::default();
@ -280,10 +289,13 @@ impl LocalServerContainer {
let mut api_server = api::ApiServer::new("/v1".to_string()); let mut api_server = api::ApiServer::new("/v1".to_string());
api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver { api_server.register_endpoint(
"/receive".to_string(),
wallet::WalletReceiver {
key: key, key: key,
config: wallet_config, config: wallet_config,
}); },
);
api_server.start(url).unwrap_or_else(|e| { api_server.start(url).unwrap_or_else(|e| {
println!("Failed to start Grin wallet receiver: {}.", e); println!("Failed to start Grin wallet receiver: {}.", e);
@ -306,7 +318,6 @@ impl LocalServerContainer {
pub fn add_peer(&mut self, addr: String) { pub fn add_peer(&mut self, addr: String) {
self.peer_list.push(addr); self.peer_list.push(addr);
} }
} }
/// Configuration values for container pool /// Configuration values for container pool
@ -332,8 +343,6 @@ pub struct LocalServerContainerPoolConfig {
// How long the servers in the pool are going to run // How long the servers in the pool are going to run
pub run_length_in_seconds: u64, pub run_length_in_seconds: u64,
} }
/// Default server config /// Default server config
@ -370,11 +379,9 @@ pub struct LocalServerContainerPool {
// keep track of whether a seed exists, and pause a bit if so // keep track of whether a seed exists, and pause a bit if so
is_seeding: bool, is_seeding: bool,
} }
impl LocalServerContainerPool { impl LocalServerContainerPool {
pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool { pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool {
(LocalServerContainerPool { (LocalServerContainerPool {
next_api_port: config.base_api_port, next_api_port: config.base_api_port,
@ -383,33 +390,36 @@ impl LocalServerContainerPool {
config: config, config: config,
server_containers: Vec::new(), server_containers: Vec::new(),
is_seeding: false, is_seeding: false,
}) })
} }
/// adds a single server on the next available port /// adds a single server on the next available port
/// overriding passed-in values as necessary. Config object is an OUT value with /// overriding passed-in values as necessary. Config object is an OUT value
/// with
/// ports/addresses filled in /// ports/addresses filled in
/// ///
pub fn create_server(&mut self, server_config:&mut LocalServerContainerConfig) pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
{
// If we're calling it this way, need to override these // If we're calling it this way, need to override these
server_config.p2p_server_port = self.next_p2p_port; server_config.p2p_server_port = self.next_p2p_port;
server_config.api_server_port = self.next_api_port; server_config.api_server_port = self.next_api_port;
server_config.wallet_port = self.next_wallet_port; server_config.wallet_port = self.next_wallet_port;
server_config.name=String::from(format!("{}/{}-{}", server_config.name = String::from(format!(
"{}/{}-{}",
self.config.base_name, self.config.base_name,
self.config.base_name, self.config.base_name,
server_config.p2p_server_port)); server_config.p2p_server_port
));
// Use self as coinbase wallet // Use self as coinbase wallet
server_config.coinbase_wallet_address=String::from(format!("http://{}:{}", server_config.coinbase_wallet_address = String::from(format!(
"http://{}:{}",
server_config.base_addr, server_config.base_addr,
server_config.wallet_port)); server_config.wallet_port
));
self.next_p2p_port += 1; self.next_p2p_port += 1;
@ -420,9 +430,11 @@ impl LocalServerContainerPool {
self.is_seeding = true; self.is_seeding = true;
} }
let _server_address = format!("{}:{}", let _server_address = format!(
"{}:{}",
server_config.base_addr, server_config.base_addr,
server_config.p2p_server_port); server_config.p2p_server_port
);
let server_container = LocalServerContainer::new(server_config.clone()).unwrap(); let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
// self.server_containers.push(server_arc); // self.server_containers.push(server_arc);
@ -499,16 +511,13 @@ impl LocalServerContainerPool {
let mut server_addresses: Vec<String> = Vec::new(); let mut server_addresses: Vec<String> = Vec::new();
for s in &self.server_containers { for s in &self.server_containers {
let server_address = format!("{}:{}", let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);
s.config.base_addr,
s.config.p2p_server_port);
server_addresses.push(server_address); server_addresses.push(server_address);
} }
for a in server_addresses { for a in server_addresses {
for s in &mut self.server_containers { for s in &mut self.server_containers {
if format!("{}:{}", s.config.base_addr, if format!("{}:{}", s.config.base_addr, s.config.p2p_server_port) != a {
s.config.p2p_server_port) != a {
s.add_peer(a.clone()); s.add_peer(a.clone());
} }
} }

View file

@ -128,8 +128,6 @@ fn simulate_seeding() {
/// as a seed. Meant to test the evolution of mining difficulty with miners /// as a seed. Meant to test the evolution of mining difficulty with miners
/// running at /// running at
/// different rates /// different rates
// Just going to comment this out as an automatically run test for the time // Just going to comment this out as an automatically run test for the time
// being, // being,
// As it's more for actively testing and hurts CI a lot // As it's more for actively testing and hurts CI a lot
@ -201,13 +199,18 @@ fn a_simulate_block_propagation() {
let mut evtlp = reactor::Core::new().unwrap(); let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle(); let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: true, enable_mining: true,
burn_reward: true, burn_reward: true,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: None, cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default() ..Default::default()
}; };
@ -268,13 +271,18 @@ fn simulate_full_sync() {
let mut evtlp = reactor::Core::new().unwrap(); let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle(); let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: true, enable_mining: true,
burn_reward: true, burn_reward: true,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false), cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default() ..Default::default()
}; };

View file

@ -14,10 +14,15 @@ lazy_static = "~0.2.8"
serde = "~1.0.8" serde = "~1.0.8"
serde_derive = "~1.0.8" serde_derive = "~1.0.8"
cuckoo_miner = { git = "https://github.com/mimblewimble/cuckoo-miner", tag="grin_integration_8"}
grin_core = { path = "../core" } grin_core = { path = "../core" }
[dependencies.cuckoo_miner]
git = "https://github.com/mimblewimble/cuckoo-miner"
tag="grin_integration_9"
#path = "../../cuckoo-miner"
#uncomment this feature to turn off plugin builds
#features=["no-plugin-build"]
[dev_dependencies] [dev_dependencies]
grin_chain = { path = "../chain"} grin_chain = { path = "../chain"}
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" } secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }

View file

@ -27,21 +27,20 @@ use core::global;
use core::core::Proof; use core::core::Proof;
use types::MinerConfig; use types::MinerConfig;
use std::sync::{Mutex}; use std::sync::Mutex;
use cuckoo_miner::{ use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
CuckooMiner, CuckooMinerDeviceStats, CuckooMinerError};
CuckooPluginManager,
CuckooMinerConfig,
CuckooMinerSolution};
//For now, we're just going to keep a static reference around to the loaded config // For now, we're just going to keep a static reference around to the loaded
//And not allow querying the plugin directory twice once a plugin has been selected // config
// And not allow querying the plugin directory twice once a plugin has been
// selected
// This is to keep compatibility with multi-threaded testing, so that spawned // This is to keep compatibility with multi-threaded testing, so that spawned
// testing threads don't try to load/unload the library while another thread is // testing threads don't try to load/unload the library while another thread is
// using it. // using it.
lazy_static!{ lazy_static!{
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None); static ref LOADED_CONFIG: Mutex<Option<Vec<CuckooMinerConfig>>> = Mutex::new(None);
} }
/// plugin miner /// plugin miner
@ -49,14 +48,14 @@ pub struct PluginMiner {
/// the miner /// the miner
pub miner: Option<CuckooMiner>, pub miner: Option<CuckooMiner>,
last_solution: CuckooMinerSolution, last_solution: CuckooMinerSolution,
config: CuckooMinerConfig, config: Vec<CuckooMinerConfig>,
} }
impl Default for PluginMiner { impl Default for PluginMiner {
fn default() -> PluginMiner { fn default() -> PluginMiner {
PluginMiner { PluginMiner {
miner: None, miner: None,
config: CuckooMinerConfig::new(), config: Vec::new(),
last_solution: CuckooMinerSolution::new(), last_solution: CuckooMinerSolution::new(),
} }
} }
@ -69,18 +68,19 @@ impl PluginMiner {
let mut exe_path = env::current_exe().unwrap(); let mut exe_path = env::current_exe().unwrap();
exe_path.pop(); exe_path.pop();
let exe_path = exe_path.to_str().unwrap(); let exe_path = exe_path.to_str().unwrap();
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir.clone() {
//println!("Plugin dir: {}", miner_config.clone().cuckoo_miner_plugin_dir.unwrap());
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir {
Some(s) => s, Some(s) => s,
None => String::from(format!("{}/plugins", exe_path)) None => String::from(format!("{}/plugins", exe_path)),
}; };
let plugin_impl_filter = match miner_config.cuckoo_miner_plugin_type { let mut plugin_vec_filters = Vec::new();
Some(s) => s, if let None = miner_config.cuckoo_miner_plugin_config {
None => String::from("simple") plugin_vec_filters.push(String::from("simple"));
}; } else {
for p in miner_config.clone().cuckoo_miner_plugin_config.unwrap() {
plugin_vec_filters.push(p.type_filter);
}
}
// First, load and query the plugins in the given directory // First, load and query the plugins in the given directory
// These should all be stored in 'plugins' at the moment relative // These should all be stored in 'plugins' at the moment relative
@ -95,6 +95,7 @@ impl PluginMiner {
// this will load the associated plugin // this will load the associated plugin
let result = CuckooMiner::new(c.clone()); let result = CuckooMiner::new(c.clone());
self.miner = Some(result.unwrap()); self.miner = Some(result.unwrap());
self.config = c.clone();
return; return;
} }
@ -102,40 +103,49 @@ impl PluginMiner {
let result = plugin_manager.load_plugin_dir(plugin_install_path); let result = plugin_manager.load_plugin_dir(plugin_install_path);
if let Err(_) = result { if let Err(_) = result {
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."); error!(
"Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."
);
panic!("Unable to load plugin directory... Please check configuration values"); panic!("Unable to load plugin directory... Please check configuration values");
} }
let sz = global::sizeshift(); let sz = global::sizeshift();
let mut cuckoo_configs = Vec::new();
let mut index=0;
for f in plugin_vec_filters {
// So this is built dynamically based on the plugin implementation // So this is built dynamically based on the plugin implementation
// type and the consensus sizeshift // type and the consensus sizeshift
let filter = format!("{}_{}", plugin_impl_filter, sz); let filter = format!("{}_{}", f, sz);
let caps = plugin_manager.get_available_plugins(&filter).unwrap(); let caps = plugin_manager.get_available_plugins(&filter).unwrap();
// insert it into the miner configuration being created below // insert it into the miner configuration being created below
let mut config = CuckooMinerConfig::new(); let mut config = CuckooMinerConfig::new();
info!("Mining using plugin: {}", caps[0].full_path.clone()); info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
config.plugin_full_path = caps[0].full_path.clone(); config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.cuckoo_miner_parameter_list { if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
config.parameter_list = l.clone(); if let Some(lp) = l[index].parameter_list.clone(){
config.parameter_list = lp.clone();
}
}
cuckoo_configs.push(config);
index+=1;
} }
// Store this config now, because we just want one instance // Store this config now, because we just want one instance
// of the plugin lib per invocation now // of the plugin lib per invocation now
*loaded_config_ref=Some(config.clone()); *loaded_config_ref = Some(cuckoo_configs.clone());
// this will load the associated plugin // this will load the associated plugin
let result=CuckooMiner::new(config.clone()); let result = CuckooMiner::new(cuckoo_configs.clone());
if let Err(e) = result { if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e); error!("Error initializing mining plugin: {:?}", e);
error!("Accepted values are: {:?}", caps[0].parameters); //error!("Accepted values are: {:?}", caps[0].parameters);
panic!("Unable to init mining plugin."); panic!("Unable to init mining plugin.");
} }
self.config=config.clone(); self.config = cuckoo_configs.clone();
self.miner = Some(result.unwrap()); self.miner = Some(result.unwrap());
} }
@ -151,18 +161,24 @@ impl PluginMiner {
result.unwrap() result.unwrap()
} }
/// Returns the number of mining plugins that have been loaded
pub fn loaded_plugin_count(&self) -> usize {
self.config.len()
}
/// Get stats
pub fn get_stats(&self, index:usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
self.miner.as_ref().unwrap().get_stats(index)
}
} }
impl MiningWorker for PluginMiner { impl MiningWorker for PluginMiner {
/// This will initialise a plugin according to what's currently /// This will initialise a plugin according to what's currently
/// included in CONSENSUS::TEST_SIZESHIFT, just using the edgetrim /// included in CONSENSUS::TEST_SIZESHIFT, just using the edgetrim
/// version of the miner for now, though this should become /// version of the miner for now, though this should become
/// configurable somehow /// configurable somehow
fn new(_ease: u32, fn new(_ease: u32, _sizeshift: u32, _proof_size: usize) -> Self {
_sizeshift: u32,
_proof_size: usize) -> Self {
PluginMiner::default() PluginMiner::default()
} }
@ -170,7 +186,11 @@ impl MiningWorker for PluginMiner {
/// returning whether a solution was found and the solution itself /// returning whether a solution was found and the solution itself
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{ fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{
let result = self.miner.as_mut().unwrap().mine(&header, &mut self.last_solution).unwrap(); let result = self.miner
.as_mut()
.unwrap()
.mine(&header, &mut self.last_solution, 0)
.unwrap();
if result == true { if result == true {
return Ok(Proof::new(self.last_solution.solution_nonces.to_vec())); return Ok(Proof::new(self.last_solution.solution_nonces.to_vec()));
} }

View file

@ -16,6 +16,25 @@
use std::collections::HashMap; use std::collections::HashMap;
/// CuckooMinerPlugin configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CuckooMinerPluginConfig {
///The type of plugin to load (i.e. filters on filename)
pub type_filter : String,
///Parameters for this plugin
pub parameter_list : Option<HashMap<String, u32>>,
}
impl Default for CuckooMinerPluginConfig {
fn default() -> CuckooMinerPluginConfig {
CuckooMinerPluginConfig {
type_filter : String::new(),
parameter_list : None,
}
}
}
/// Mining configuration /// Mining configuration
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MinerConfig { pub struct MinerConfig {
@ -28,15 +47,11 @@ pub struct MinerConfig {
/// Whether to use the async version of mining /// Whether to use the async version of mining
pub cuckoo_miner_async_mode: Option<bool>, pub cuckoo_miner_async_mode: Option<bool>,
/// The location in which cuckoo miner plugins are stored /// plugin dir
pub cuckoo_miner_plugin_dir: Option<String>, pub cuckoo_miner_plugin_dir: Option<String>,
/// The type of plugin to use (ends up filtering the filename) /// Cuckoo miner plugin configuration, one for each plugin
pub cuckoo_miner_plugin_type: Option<String>, pub cuckoo_miner_plugin_config: Option<Vec<CuckooMinerPluginConfig>>,
/// Cuckoo-miner parameters... these vary according
/// to the plugin being loaded
pub cuckoo_miner_parameter_list: Option<HashMap<String, u32>>,
/// How long to wait before stopping the miner, recollecting transactions /// How long to wait before stopping the miner, recollecting transactions
/// and starting again /// and starting again
@ -52,7 +67,6 @@ pub struct MinerConfig {
/// a testing attribute for the time being that artifically slows down the /// a testing attribute for the time being that artifically slows down the
/// mining loop by adding a sleep to the thread /// mining loop by adding a sleep to the thread
pub slow_down_in_millis: Option<u64>, pub slow_down_in_millis: Option<u64>,
} }
impl Default for MinerConfig { impl Default for MinerConfig {
@ -62,8 +76,7 @@ impl Default for MinerConfig {
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: None, cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: None, cuckoo_miner_plugin_dir: None,
cuckoo_miner_plugin_type: None, cuckoo_miner_plugin_config: None,
cuckoo_miner_parameter_list: None,
wallet_receiver_url: "http://localhost:13416".to_string(), wallet_receiver_url: "http://localhost:13416".to_string(),
burn_reward: false, burn_reward: false,
slow_down_in_millis: Some(0), slow_down_in_millis: Some(0),
@ -71,4 +84,3 @@ impl Default for MinerConfig {
} }
} }
} }