2018-04-13 16:42:25 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Mining Stratum Server
|
2019-02-09 22:49:58 +03:00
|
|
|
|
|
|
|
use futures::future::Future;
|
|
|
|
use futures::stream::Stream;
|
|
|
|
use tokio::io::AsyncRead;
|
|
|
|
use tokio::io::{lines, write_all};
|
|
|
|
use tokio::net::TcpListener;
|
|
|
|
|
|
|
|
use crate::util::RwLock;
|
2018-08-20 01:50:43 +03:00
|
|
|
use chrono::prelude::Utc;
|
2018-07-13 02:27:32 +03:00
|
|
|
use serde;
|
2018-05-23 21:32:38 +03:00
|
|
|
use serde_json;
|
2018-06-17 22:08:17 +03:00
|
|
|
use serde_json::Value;
|
2019-02-09 22:49:58 +03:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::io::BufReader;
|
|
|
|
use std::net::SocketAddr;
|
2018-10-20 03:13:07 +03:00
|
|
|
use std::sync::Arc;
|
2018-06-14 15:16:14 +03:00
|
|
|
use std::time::{Duration, SystemTime};
|
|
|
|
use std::{cmp, thread};
|
2018-04-13 16:42:25 +03:00
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::chain;
|
|
|
|
use crate::common::stats::{StratumStats, WorkerStats};
|
|
|
|
use crate::common::types::{StratumServerConfig, SyncState};
|
|
|
|
use crate::core::core::verifier_cache::VerifierCache;
|
|
|
|
use crate::core::core::Block;
|
|
|
|
use crate::core::{pow, ser};
|
|
|
|
use crate::keychain;
|
|
|
|
use crate::mining::mine_block;
|
|
|
|
use crate::pool;
|
|
|
|
use crate::util;
|
2018-04-13 16:42:25 +03:00
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
use futures::sync::mpsc;
|
|
|
|
|
|
|
|
type Tx = mpsc::UnboundedSender<String>;
|
|
|
|
|
2018-04-13 16:42:25 +03:00
|
|
|
// ----------------------------------------
|
|
|
|
// http://www.jsonrpc.org/specification
|
|
|
|
// RPC Methods
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
struct RpcRequest {
|
|
|
|
id: String,
|
|
|
|
jsonrpc: String,
|
|
|
|
method: String,
|
2018-06-17 22:08:17 +03:00
|
|
|
params: Option<Value>,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
struct RpcResponse {
|
|
|
|
id: String,
|
|
|
|
jsonrpc: String,
|
2018-04-23 11:41:35 +03:00
|
|
|
method: String,
|
2018-06-17 22:08:17 +03:00
|
|
|
result: Option<Value>,
|
|
|
|
error: Option<Value>,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
struct RpcError {
|
|
|
|
code: i32,
|
|
|
|
message: String,
|
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
impl RpcError {
|
|
|
|
pub fn internal_error() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: 32603,
|
|
|
|
message: "Internal error".to_owned(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn node_is_syncing() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32000,
|
|
|
|
message: "Node is syncing - Please wait".to_owned(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn method_not_found() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32601,
|
|
|
|
message: "Method not found".to_owned(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn too_late() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32503,
|
|
|
|
message: "Solution submitted too late".to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn cannot_validate() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32502,
|
|
|
|
message: "Failed to validate solution".to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn too_low_difficulty() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32501,
|
|
|
|
message: "Share rejected due to low difficulty".to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pub fn invalid_request() -> Self {
|
|
|
|
RpcError {
|
|
|
|
code: -32600,
|
|
|
|
message: "Invalid Request".to_string(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<RpcError> for Value {
|
|
|
|
fn from(e: RpcError) -> Self {
|
|
|
|
serde_json::to_value(e).unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> From<T> for RpcError
|
|
|
|
where
|
|
|
|
T: std::error::Error,
|
|
|
|
{
|
|
|
|
fn from(e: T) -> Self {
|
|
|
|
error!("Received unhandled error: {}", e);
|
|
|
|
RpcError::internal_error()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 16:42:25 +03:00
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
struct LoginParams {
|
|
|
|
login: String,
|
|
|
|
pass: String,
|
|
|
|
agent: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
struct SubmitParams {
|
|
|
|
height: u64,
|
2018-07-10 11:18:09 +03:00
|
|
|
job_id: u64,
|
2018-04-13 16:42:25 +03:00
|
|
|
nonce: u64,
|
2018-10-16 02:14:23 +03:00
|
|
|
edge_bits: u32,
|
2018-06-29 20:41:28 +03:00
|
|
|
pow: Vec<u64>,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
2018-04-17 11:16:58 +03:00
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
pub struct JobTemplate {
|
2018-04-17 18:17:30 +03:00
|
|
|
height: u64,
|
2018-07-10 11:18:09 +03:00
|
|
|
job_id: u64,
|
2018-04-17 11:16:58 +03:00
|
|
|
difficulty: u64,
|
|
|
|
pre_pow: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, Deserialize, Debug)]
|
|
|
|
pub struct WorkerStatus {
|
|
|
|
id: String,
|
|
|
|
height: u64,
|
|
|
|
difficulty: u64,
|
|
|
|
accepted: u64,
|
|
|
|
rejected: u64,
|
|
|
|
stale: u64,
|
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
struct Handler {
|
2018-04-13 16:42:25 +03:00
|
|
|
id: String,
|
2019-02-09 22:49:58 +03:00
|
|
|
workers: Arc<WorkersList>,
|
|
|
|
current_block_versions: Arc<RwLock<Vec<Block>>>,
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2019-02-09 22:49:58 +03:00
|
|
|
minimum_share_difficulty: Arc<RwLock<u64>>,
|
|
|
|
current_key_id: Arc<RwLock<Option<keychain::Identifier>>>,
|
|
|
|
current_difficulty: Arc<RwLock<u64>>,
|
|
|
|
chain: Arc<chain::Chain>,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
impl Handler {
|
2018-04-13 16:42:25 +03:00
|
|
|
pub fn new(
|
2019-02-09 22:49:58 +03:00
|
|
|
id: String,
|
|
|
|
workers: Arc<WorkersList>,
|
|
|
|
current_block_versions: Arc<RwLock<Vec<Block>>>,
|
|
|
|
sync_state: Arc<SyncState>,
|
|
|
|
minimum_share_difficulty: Arc<RwLock<u64>>,
|
|
|
|
current_key_id: Arc<RwLock<Option<keychain::Identifier>>>,
|
|
|
|
current_difficulty: Arc<RwLock<u64>>,
|
2018-08-30 17:44:34 +03:00
|
|
|
chain: Arc<chain::Chain>,
|
2019-02-09 22:49:58 +03:00
|
|
|
) -> Self {
|
|
|
|
Handler {
|
|
|
|
id,
|
|
|
|
workers,
|
|
|
|
current_block_versions,
|
|
|
|
sync_state,
|
|
|
|
minimum_share_difficulty,
|
|
|
|
current_key_id,
|
|
|
|
current_difficulty,
|
2018-08-30 17:44:34 +03:00
|
|
|
chain,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
}
|
2019-02-09 22:49:58 +03:00
|
|
|
pub fn from_stratum(stratum: &StratumServer) -> Self {
|
|
|
|
Handler::new(
|
|
|
|
stratum.id.clone(),
|
|
|
|
stratum.workers.clone(),
|
|
|
|
stratum.current_block_versions.clone(),
|
|
|
|
stratum.sync_state.clone(),
|
|
|
|
stratum.minimum_share_difficulty.clone(),
|
|
|
|
stratum.current_key_id.clone(),
|
|
|
|
stratum.current_difficulty.clone(),
|
|
|
|
stratum.chain.clone(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
fn handle_rpc_requests(&self, request: RpcRequest, worker_id: usize) -> String {
|
|
|
|
self.workers.last_seen(worker_id);
|
|
|
|
|
|
|
|
// Call the handler function for requested method
|
|
|
|
let response = match request.method.as_str() {
|
|
|
|
"login" => self.handle_login(request.params, worker_id),
|
|
|
|
"submit" => {
|
|
|
|
let res = self.handle_submit(request.params, worker_id);
|
|
|
|
// this key_id has been used now, reset
|
|
|
|
if let Ok((_, true)) = res {
|
|
|
|
let mut current_key_id = self.current_key_id.write();
|
|
|
|
*current_key_id = None;
|
|
|
|
}
|
|
|
|
res.map(|(v, _)| v)
|
|
|
|
}
|
|
|
|
"keepalive" => self.handle_keepalive(),
|
|
|
|
"getjobtemplate" => {
|
|
|
|
if self.sync_state.is_syncing() {
|
|
|
|
Err(RpcError::node_is_syncing())
|
|
|
|
} else {
|
|
|
|
self.handle_getjobtemplate()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"status" => self.handle_status(worker_id),
|
|
|
|
_ => {
|
|
|
|
// Called undefined method
|
|
|
|
Err(RpcError::method_not_found())
|
|
|
|
}
|
|
|
|
};
|
2018-04-13 16:42:25 +03:00
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
// Package the reply as RpcResponse json
|
|
|
|
let resp = match response {
|
|
|
|
Err(rpc_error) => RpcResponse {
|
|
|
|
id: request.id,
|
|
|
|
jsonrpc: String::from("2.0"),
|
|
|
|
method: request.method,
|
|
|
|
result: None,
|
|
|
|
error: Some(rpc_error.into()),
|
|
|
|
},
|
|
|
|
Ok(response) => RpcResponse {
|
|
|
|
id: request.id,
|
|
|
|
jsonrpc: String::from("2.0"),
|
|
|
|
method: request.method,
|
|
|
|
result: Some(response),
|
|
|
|
error: None,
|
|
|
|
},
|
2018-04-17 11:16:58 +03:00
|
|
|
};
|
2019-02-09 22:49:58 +03:00
|
|
|
serde_json::to_string(&resp).unwrap()
|
|
|
|
}
|
|
|
|
fn handle_login(&self, params: Option<Value>, worker_id: usize) -> Result<Value, RpcError> {
|
|
|
|
let params: LoginParams = parse_params(params)?;
|
|
|
|
let mut workers = self.workers.workers_list.write();
|
|
|
|
let worker = workers
|
|
|
|
.get_mut(&worker_id)
|
|
|
|
.ok_or(RpcError::internal_error())?;
|
|
|
|
worker.login = Some(params.login);
|
|
|
|
// XXX TODO Future - Validate password?
|
|
|
|
worker.agent = params.agent;
|
|
|
|
worker.authenticated = true;
|
|
|
|
return Ok("ok".into());
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
// Handle KEEPALIVE message
|
|
|
|
fn handle_keepalive(&self) -> Result<Value, RpcError> {
|
|
|
|
return Ok("ok".into());
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
fn handle_status(&self, worker_id: usize) -> Result<Value, RpcError> {
|
2018-04-17 11:16:58 +03:00
|
|
|
// Return worker status in json for use by a dashboard or healthcheck.
|
|
|
|
let status = WorkerStatus {
|
2019-02-09 22:49:58 +03:00
|
|
|
id: self.workers.stratum_stats.read().worker_stats[worker_id]
|
|
|
|
.id
|
|
|
|
.clone(),
|
|
|
|
height: self
|
|
|
|
.current_block_versions
|
|
|
|
.read()
|
|
|
|
.last()
|
|
|
|
.unwrap()
|
|
|
|
.header
|
|
|
|
.height,
|
|
|
|
difficulty: self.workers.stratum_stats.read().worker_stats[worker_id].pow_difficulty,
|
|
|
|
accepted: self.workers.stratum_stats.read().worker_stats[worker_id].num_accepted,
|
|
|
|
rejected: self.workers.stratum_stats.read().worker_stats[worker_id].num_rejected,
|
|
|
|
stale: self.workers.stratum_stats.read().worker_stats[worker_id].num_stale,
|
2018-04-17 11:16:58 +03:00
|
|
|
};
|
2018-06-17 22:08:17 +03:00
|
|
|
let response = serde_json::to_value(&status).unwrap();
|
2018-06-20 12:34:39 +03:00
|
|
|
return Ok(response);
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
// Handle GETJOBTEMPLATE message
|
2019-02-09 22:49:58 +03:00
|
|
|
fn handle_getjobtemplate(&self) -> Result<Value, RpcError> {
|
2018-04-13 16:42:25 +03:00
|
|
|
// Build a JobTemplate from a BlockHeader and return JSON
|
2018-07-10 11:18:09 +03:00
|
|
|
let job_template = self.build_block_template();
|
2018-06-17 22:08:17 +03:00
|
|
|
let response = serde_json::to_value(&job_template).unwrap();
|
2018-07-10 11:18:09 +03:00
|
|
|
debug!(
|
|
|
|
"(Server ID: {}) sending block {} with id {} to single worker",
|
2018-10-21 23:30:56 +03:00
|
|
|
self.id, job_template.height, job_template.job_id,
|
2018-07-10 11:18:09 +03:00
|
|
|
);
|
2018-06-20 12:34:39 +03:00
|
|
|
return Ok(response);
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
// Build and return a JobTemplate for mining the current block
|
|
|
|
fn build_block_template(&self) -> JobTemplate {
|
|
|
|
let bh = self
|
|
|
|
.current_block_versions
|
|
|
|
.read()
|
|
|
|
.last()
|
|
|
|
.unwrap()
|
|
|
|
.header
|
|
|
|
.clone();
|
|
|
|
// Serialize the block header into pre and post nonce strings
|
|
|
|
let mut header_buf = vec![];
|
|
|
|
{
|
|
|
|
let mut writer = ser::BinWriter::new(&mut header_buf);
|
|
|
|
bh.write_pre_pow(&mut writer).unwrap();
|
|
|
|
bh.pow.write_pre_pow(bh.version, &mut writer).unwrap();
|
|
|
|
}
|
|
|
|
let pre_pow = util::to_hex(header_buf);
|
|
|
|
let job_template = JobTemplate {
|
|
|
|
height: bh.height,
|
|
|
|
job_id: (self.current_block_versions.read().len() - 1) as u64,
|
|
|
|
difficulty: *self.minimum_share_difficulty.read(),
|
|
|
|
pre_pow,
|
|
|
|
};
|
|
|
|
return job_template;
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
// Handle SUBMIT message
|
2018-06-17 22:08:17 +03:00
|
|
|
// params contains a solved block header
|
2018-06-04 13:22:42 +03:00
|
|
|
// We accept and log valid shares of all difficulty above configured minimum
|
|
|
|
// Accepted shares that are full solutions will also be submitted to the
|
|
|
|
// network
|
2018-04-17 18:17:30 +03:00
|
|
|
fn handle_submit(
|
|
|
|
&self,
|
2018-06-17 22:08:17 +03:00
|
|
|
params: Option<Value>,
|
2019-02-09 22:49:58 +03:00
|
|
|
worker_id: usize,
|
|
|
|
) -> Result<(Value, bool), RpcError> {
|
2018-06-17 22:08:17 +03:00
|
|
|
// Validate parameters
|
2018-07-13 02:27:32 +03:00
|
|
|
let params: SubmitParams = parse_params(params)?;
|
2018-08-20 01:50:43 +03:00
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
let current_block_versions = self.current_block_versions.read();
|
2019-01-02 17:49:06 +03:00
|
|
|
// Find the correct version of the block to match this header
|
2019-02-09 22:49:58 +03:00
|
|
|
let b: Option<&Block> = current_block_versions.get(params.job_id as usize);
|
|
|
|
if params.height
|
|
|
|
!= self
|
|
|
|
.current_block_versions
|
|
|
|
.read()
|
|
|
|
.last()
|
|
|
|
.unwrap()
|
|
|
|
.header
|
|
|
|
.height || b.is_none()
|
2019-01-02 17:49:06 +03:00
|
|
|
{
|
2018-12-21 22:22:14 +03:00
|
|
|
// Return error status
|
|
|
|
error!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Share at height {}, edge_bits {}, nonce {}, job_id {} submitted too late",
|
|
|
|
self.id, params.height, params.edge_bits, params.nonce, params.job_id,
|
|
|
|
);
|
|
|
|
self.workers.update_stats(worker_id, |ws| ws.num_stale += 1);
|
|
|
|
return Err(RpcError::too_late());
|
2018-12-21 22:22:14 +03:00
|
|
|
}
|
|
|
|
|
2018-06-04 13:22:42 +03:00
|
|
|
let share_difficulty: u64;
|
2018-07-09 11:14:34 +03:00
|
|
|
let mut share_is_block = false;
|
2018-12-18 14:12:05 +03:00
|
|
|
|
2018-07-10 11:18:09 +03:00
|
|
|
let mut b: Block = b.unwrap().clone();
|
2018-12-18 14:12:05 +03:00
|
|
|
// Reconstruct the blocks header with this nonce and pow added
|
2018-10-16 02:14:23 +03:00
|
|
|
b.header.pow.proof.edge_bits = params.edge_bits as u8;
|
2018-09-11 01:36:57 +03:00
|
|
|
b.header.pow.nonce = params.nonce;
|
|
|
|
b.header.pow.proof.nonces = params.pow;
|
2018-12-18 14:12:05 +03:00
|
|
|
|
2018-12-21 13:14:19 +03:00
|
|
|
if !b.header.pow.is_primary() && !b.header.pow.is_secondary() {
|
|
|
|
// Return error status
|
|
|
|
error!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}: cuckoo size too small",
|
|
|
|
self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id,
|
|
|
|
);
|
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
|
|
|
|
return Err(RpcError::cannot_validate());
|
2018-12-21 13:14:19 +03:00
|
|
|
}
|
|
|
|
|
2018-07-10 11:18:09 +03:00
|
|
|
// Get share difficulty
|
2018-11-29 01:05:55 +03:00
|
|
|
share_difficulty = b.header.pow.to_difficulty(b.header.height).to_num();
|
2018-07-10 11:18:09 +03:00
|
|
|
// If the difficulty is too low its an error
|
2019-02-09 22:49:58 +03:00
|
|
|
if share_difficulty < *self.minimum_share_difficulty.read() {
|
2018-07-10 11:18:09 +03:00
|
|
|
// Return error status
|
|
|
|
error!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Share at height {}, hash {}, edge_bits {}, nonce {}, job_id {} rejected due to low difficulty: {}/{}",
|
|
|
|
self.id, params.height, b.hash(), params.edge_bits, params.nonce, params.job_id, share_difficulty, *self.minimum_share_difficulty.read(),
|
|
|
|
);
|
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
|
|
|
|
return Err(RpcError::too_low_difficulty());
|
2018-07-10 11:18:09 +03:00
|
|
|
}
|
2019-02-09 22:49:58 +03:00
|
|
|
|
2018-07-10 11:18:09 +03:00
|
|
|
// If the difficulty is high enough, submit it (which also validates it)
|
2019-02-09 22:49:58 +03:00
|
|
|
if share_difficulty >= *self.current_difficulty.read() {
|
2018-07-10 11:18:09 +03:00
|
|
|
// This is a full solution, submit it to the network
|
|
|
|
let res = self.chain.process_block(b.clone(), chain::Options::MINE);
|
|
|
|
if let Err(e) = res {
|
2018-06-04 13:22:42 +03:00
|
|
|
// Return error status
|
2018-04-13 16:42:25 +03:00
|
|
|
error!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Failed to validate solution at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, {}: {}",
|
|
|
|
self.id,
|
|
|
|
params.height,
|
|
|
|
b.hash(),
|
|
|
|
params.edge_bits,
|
|
|
|
params.nonce,
|
|
|
|
params.job_id,
|
|
|
|
e,
|
|
|
|
e.backtrace().unwrap(),
|
|
|
|
);
|
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
|
|
|
|
return Err(RpcError::cannot_validate());
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
2018-07-10 11:18:09 +03:00
|
|
|
share_is_block = true;
|
2019-02-09 22:49:58 +03:00
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_blocks_found += 1);
|
2018-07-10 11:18:09 +03:00
|
|
|
// Log message to make it obvious we found a block
|
|
|
|
warn!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Solution Found for block {}, hash {} - Yay!!! Worker ID: {}, blocks found: {}, shares: {}",
|
|
|
|
self.id, params.height,
|
|
|
|
b.hash(),
|
|
|
|
self.workers.stratum_stats.read().worker_stats[worker_id].id,
|
|
|
|
self.workers.stratum_stats.read().worker_stats[worker_id].num_blocks_found,
|
|
|
|
self.workers.stratum_stats.read().worker_stats[worker_id].num_accepted,
|
|
|
|
);
|
2018-07-10 11:18:09 +03:00
|
|
|
} else {
|
|
|
|
// Do some validation but dont submit
|
2018-12-21 13:14:19 +03:00
|
|
|
let res = pow::verify_size(&b.header);
|
|
|
|
if !res.is_ok() {
|
2018-07-10 11:18:09 +03:00
|
|
|
// Return error status
|
|
|
|
error!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Failed to validate share at height {}, hash {}, edge_bits {}, nonce {}, job_id {}. {:?}",
|
|
|
|
self.id,
|
|
|
|
params.height,
|
|
|
|
b.hash(),
|
|
|
|
params.edge_bits,
|
|
|
|
b.header.pow.nonce,
|
|
|
|
params.job_id,
|
|
|
|
res,
|
|
|
|
);
|
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_rejected += 1);
|
|
|
|
return Err(RpcError::cannot_validate());
|
2018-07-10 11:18:09 +03:00
|
|
|
}
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
2018-06-04 13:22:42 +03:00
|
|
|
// Log this as a valid share
|
2019-02-09 22:49:58 +03:00
|
|
|
let submitted_by = match self
|
|
|
|
.workers
|
|
|
|
.workers_list
|
|
|
|
.read()
|
|
|
|
.get(&worker_id)
|
|
|
|
.unwrap()
|
|
|
|
.login
|
|
|
|
.clone()
|
|
|
|
{
|
|
|
|
None => self
|
|
|
|
.workers
|
|
|
|
.workers_list
|
|
|
|
.read()
|
|
|
|
.get(&worker_id)
|
|
|
|
.unwrap()
|
|
|
|
.id
|
|
|
|
.to_string(),
|
2018-05-23 21:32:38 +03:00
|
|
|
Some(login) => login.clone(),
|
2018-05-09 12:10:47 +03:00
|
|
|
};
|
|
|
|
info!(
|
2019-02-09 22:49:58 +03:00
|
|
|
"(Server ID: {}) Got share at height {}, hash {}, edge_bits {}, nonce {}, job_id {}, difficulty {}/{}, submitted by {}",
|
|
|
|
self.id,
|
|
|
|
b.header.height,
|
|
|
|
b.hash(),
|
|
|
|
b.header.pow.proof.edge_bits,
|
|
|
|
b.header.pow.nonce,
|
|
|
|
params.job_id,
|
|
|
|
share_difficulty,
|
|
|
|
*self.current_difficulty.read(),
|
|
|
|
submitted_by,
|
|
|
|
);
|
|
|
|
self.workers
|
|
|
|
.update_stats(worker_id, |worker_stats| worker_stats.num_accepted += 1);
|
2018-07-11 19:17:24 +03:00
|
|
|
let submit_response;
|
|
|
|
if share_is_block {
|
|
|
|
submit_response = format!("blockfound - {}", b.hash().to_hex());
|
|
|
|
} else {
|
|
|
|
submit_response = "ok".to_string();
|
|
|
|
}
|
2018-08-20 01:50:43 +03:00
|
|
|
return Ok((
|
|
|
|
serde_json::to_value(submit_response).unwrap(),
|
|
|
|
share_is_block,
|
|
|
|
));
|
2018-04-13 16:42:25 +03:00
|
|
|
} // handle submit a solution
|
2019-02-09 22:49:58 +03:00
|
|
|
}
|
2018-04-13 16:42:25 +03:00
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
// ----------------------------------------
|
|
|
|
// Worker Factory Thread Function
|
|
|
|
fn accept_connections(listen_addr: SocketAddr, handler: Handler) {
|
|
|
|
info!("Start tokio stratum server");
|
|
|
|
let listener = TcpListener::bind(&listen_addr).expect(&format!(
|
|
|
|
"Stratum: Failed to bind to listen address {}",
|
|
|
|
listen_addr
|
|
|
|
));
|
|
|
|
let handler = Arc::new(handler);
|
|
|
|
let server = listener
|
|
|
|
.incoming()
|
|
|
|
.for_each(move |socket| {
|
|
|
|
// Spawn a task to process the connection
|
|
|
|
let (tx, rx) = mpsc::unbounded();
|
|
|
|
|
|
|
|
let worker_id = handler.workers.add_worker(tx);
|
|
|
|
info!("Worker {} connected", worker_id);
|
|
|
|
|
|
|
|
let (reader, writer) = socket.split();
|
|
|
|
let reader = BufReader::new(reader);
|
|
|
|
let h = handler.clone();
|
|
|
|
let workers = h.workers.clone();
|
|
|
|
let input = lines(reader)
|
|
|
|
.for_each(move |line| {
|
|
|
|
let request = serde_json::from_str(&line)?;
|
|
|
|
let resp = h.handle_rpc_requests(request, worker_id);
|
|
|
|
workers.send_to(worker_id, resp);
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
.map_err(|e| error!("error {}", e));
|
|
|
|
|
|
|
|
let output = rx.fold(writer, |writer, s| {
|
|
|
|
let s2 = s + "\n";
|
|
|
|
write_all(writer, s2.into_bytes())
|
|
|
|
.map(|(writer, _)| writer)
|
|
|
|
.map_err(|e| error!("cannot send {}", e))
|
|
|
|
});
|
|
|
|
|
|
|
|
let workers = handler.workers.clone();
|
|
|
|
let both = output.map(|_| ()).select(input);
|
|
|
|
tokio::spawn(both.then(move |_| {
|
|
|
|
workers.remove_worker(worker_id);
|
|
|
|
info!("Worker {} disconnected", worker_id);
|
|
|
|
Ok(())
|
|
|
|
}));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
.map_err(|err| {
|
|
|
|
error!("accept error = {:?}", err);
|
|
|
|
});
|
|
|
|
tokio::run(server.map(|_| ()).map_err(|_| ()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------
|
|
|
|
// Worker Object - a connected stratum client - a miner, pool, proxy, etc...
|
|
|
|
|
|
|
|
pub struct Worker {
|
|
|
|
id: usize,
|
|
|
|
agent: String,
|
|
|
|
login: Option<String>,
|
|
|
|
authenticated: bool,
|
|
|
|
tx: Tx,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Worker {
|
|
|
|
/// Creates a new Stratum Worker.
|
|
|
|
pub fn new(id: usize, tx: Tx) -> Worker {
|
|
|
|
Worker {
|
|
|
|
id: id,
|
|
|
|
agent: String::from(""),
|
|
|
|
login: None,
|
|
|
|
authenticated: false,
|
|
|
|
tx: tx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // impl Worker
|
|
|
|
|
|
|
|
struct WorkersList {
|
|
|
|
workers_list: Arc<RwLock<HashMap<usize, Worker>>>,
|
|
|
|
stratum_stats: Arc<RwLock<StratumStats>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl WorkersList {
|
|
|
|
pub fn new(stratum_stats: Arc<RwLock<StratumStats>>) -> Self {
|
|
|
|
WorkersList {
|
|
|
|
workers_list: Arc::new(RwLock::new(HashMap::new())),
|
|
|
|
stratum_stats: stratum_stats,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_worker(&self, tx: Tx) -> usize {
|
|
|
|
let mut stratum_stats = self.stratum_stats.write();
|
|
|
|
let worker_id = stratum_stats.worker_stats.len();
|
|
|
|
let worker = Worker::new(worker_id, tx);
|
|
|
|
let mut workers_list = self.workers_list.write();
|
|
|
|
workers_list.insert(worker_id, worker);
|
|
|
|
|
|
|
|
let mut worker_stats = WorkerStats::default();
|
|
|
|
worker_stats.is_connected = true;
|
|
|
|
worker_stats.id = worker_id.to_string();
|
|
|
|
worker_stats.pow_difficulty = 1; // XXX TODO
|
|
|
|
stratum_stats.worker_stats.push(worker_stats);
|
|
|
|
stratum_stats.num_workers = workers_list.len();
|
|
|
|
worker_id
|
|
|
|
}
|
|
|
|
pub fn remove_worker(&self, worker_id: usize) {
|
|
|
|
self.update_stats(worker_id, |ws| ws.is_connected = false);
|
|
|
|
self.workers_list
|
|
|
|
.write()
|
|
|
|
.remove(&worker_id)
|
|
|
|
.expect("Stratum: no such addr in map");
|
|
|
|
self.stratum_stats.write().num_workers = self.workers_list.read().len();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn last_seen(&self, worker_id: usize) {
|
|
|
|
//self.stratum_stats.write().worker_stats[worker_id].last_seen = SystemTime::now();
|
|
|
|
self.update_stats(worker_id, |ws| ws.last_seen = SystemTime::now());
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn update_stats(&self, worker_id: usize, f: impl FnOnce(&mut WorkerStats) -> ()) {
|
|
|
|
let mut stratum_stats = self.stratum_stats.write();
|
|
|
|
f(&mut stratum_stats.worker_stats[worker_id]);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn send_to(&self, worker_id: usize, msg: String) {
|
|
|
|
self.workers_list
|
|
|
|
.read()
|
|
|
|
.get(&worker_id)
|
|
|
|
.unwrap()
|
|
|
|
.tx
|
|
|
|
.unbounded_send(msg);
|
|
|
|
}
|
|
|
|
pub fn count(&self) -> usize {
|
|
|
|
self.workers_list.read().len()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------
|
|
|
|
// Grin Stratum Server
|
|
|
|
|
|
|
|
pub struct StratumServer {
|
|
|
|
id: String,
|
|
|
|
config: StratumServerConfig,
|
|
|
|
chain: Arc<chain::Chain>,
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
|
|
|
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
|
|
|
current_block_versions: Arc<RwLock<Vec<Block>>>,
|
|
|
|
current_difficulty: Arc<RwLock<u64>>,
|
|
|
|
minimum_share_difficulty: Arc<RwLock<u64>>,
|
|
|
|
current_key_id: Arc<RwLock<Option<keychain::Identifier>>>,
|
|
|
|
workers: Arc<WorkersList>,
|
|
|
|
sync_state: Arc<SyncState>,
|
|
|
|
stratum_stats: Arc<RwLock<StratumStats>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl StratumServer {
|
|
|
|
/// Creates a new Stratum Server.
|
|
|
|
pub fn new(
|
|
|
|
config: StratumServerConfig,
|
|
|
|
chain: Arc<chain::Chain>,
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
|
|
|
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
|
|
|
stratum_stats: Arc<RwLock<StratumStats>>,
|
|
|
|
) -> StratumServer {
|
|
|
|
StratumServer {
|
|
|
|
id: String::from("0"),
|
|
|
|
minimum_share_difficulty: Arc::new(RwLock::new(config.minimum_share_difficulty)),
|
|
|
|
config,
|
|
|
|
chain,
|
|
|
|
tx_pool,
|
|
|
|
verifier_cache,
|
|
|
|
current_block_versions: Arc::new(RwLock::new(Vec::new())),
|
|
|
|
current_difficulty: Arc::new(RwLock::new(<u64>::max_value())),
|
|
|
|
current_key_id: Arc::new(RwLock::new(None)),
|
|
|
|
workers: Arc::new(WorkersList::new(stratum_stats.clone())),
|
|
|
|
sync_state: Arc::new(SyncState::new()),
|
|
|
|
stratum_stats: stratum_stats,
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-09 22:49:58 +03:00
|
|
|
// Build and return a JobTemplate for mining the current block
|
|
|
|
fn build_block_template(&self) -> JobTemplate {
|
|
|
|
let bh = self
|
|
|
|
.current_block_versions
|
|
|
|
.read()
|
|
|
|
.last()
|
|
|
|
.unwrap()
|
|
|
|
.header
|
|
|
|
.clone();
|
|
|
|
// Serialize the block header into pre and post nonce strings
|
|
|
|
let mut header_buf = vec![];
|
|
|
|
{
|
|
|
|
let mut writer = ser::BinWriter::new(&mut header_buf);
|
|
|
|
bh.write_pre_pow(&mut writer).unwrap();
|
|
|
|
bh.pow.write_pre_pow(bh.version, &mut writer).unwrap();
|
|
|
|
}
|
|
|
|
let pre_pow = util::to_hex(header_buf);
|
|
|
|
let job_template = JobTemplate {
|
|
|
|
height: bh.height,
|
|
|
|
job_id: (self.current_block_versions.read().len() - 1) as u64,
|
|
|
|
difficulty: *self.minimum_share_difficulty.read(),
|
|
|
|
pre_pow,
|
|
|
|
};
|
|
|
|
return job_template;
|
|
|
|
}
|
|
|
|
|
2018-04-17 18:17:30 +03:00
|
|
|
// Broadcast a jobtemplate RpcRequest to all connected workers - no response
|
|
|
|
// expected
|
2018-04-13 16:42:25 +03:00
|
|
|
fn broadcast_job(&mut self) {
|
|
|
|
// Package new block into RpcRequest
|
2018-07-10 11:18:09 +03:00
|
|
|
let job_template = self.build_block_template();
|
2018-04-13 16:42:25 +03:00
|
|
|
let job_template_json = serde_json::to_string(&job_template).unwrap();
|
2018-06-17 22:08:17 +03:00
|
|
|
// Issue #1159 - use a serde_json Value type to avoid extra quoting
|
|
|
|
let job_template_value: Value = serde_json::from_str(&job_template_json).unwrap();
|
2018-04-13 16:42:25 +03:00
|
|
|
let job_request = RpcRequest {
|
|
|
|
id: String::from("Stratum"),
|
|
|
|
jsonrpc: String::from("2.0"),
|
|
|
|
method: String::from("job"),
|
2018-06-17 22:08:17 +03:00
|
|
|
params: Some(job_template_value),
|
2018-04-13 16:42:25 +03:00
|
|
|
};
|
|
|
|
let job_request_json = serde_json::to_string(&job_request).unwrap();
|
2018-07-10 11:18:09 +03:00
|
|
|
debug!(
|
|
|
|
"(Server ID: {}) sending block {} with id {} to stratum clients",
|
2018-10-21 23:30:56 +03:00
|
|
|
self.id, job_template.height, job_template.job_id,
|
2018-07-10 11:18:09 +03:00
|
|
|
);
|
2019-02-09 22:49:58 +03:00
|
|
|
for worker in self.workers.workers_list.read().values() {
|
|
|
|
worker.tx.unbounded_send(job_request_json.clone());
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 21:32:38 +03:00
|
|
|
/// "main()" - Starts the stratum-server. Creates a thread to Listens for
|
|
|
|
/// a connection, then enters a loop, building a new block on top of the
|
|
|
|
/// existing chain anytime required and sending that to the connected
|
|
|
|
/// stratum miner, proxy, or pool, and accepts full solutions to
|
2018-04-17 18:17:30 +03:00
|
|
|
/// be submitted.
|
2019-02-09 22:49:58 +03:00
|
|
|
pub fn run_loop(&mut self, edge_bits: u32, proof_size: usize, sync_state: Arc<SyncState>) {
|
2018-04-13 16:42:25 +03:00
|
|
|
info!(
|
2018-10-16 02:14:23 +03:00
|
|
|
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
|
2018-10-21 23:30:56 +03:00
|
|
|
self.id, edge_bits, proof_size
|
2018-04-13 16:42:25 +03:00
|
|
|
);
|
|
|
|
|
2018-07-02 02:08:39 +03:00
|
|
|
self.sync_state = sync_state;
|
2018-04-24 11:18:24 +03:00
|
|
|
|
2018-04-13 16:42:25 +03:00
|
|
|
// "globals" for this function
|
2018-06-04 13:22:42 +03:00
|
|
|
let attempt_time_per_block = self.config.attempt_time_per_block;
|
2018-04-13 16:42:25 +03:00
|
|
|
let mut deadline: i64 = 0;
|
|
|
|
// to prevent the wallet from generating a new HD key derivation for each
|
|
|
|
// iteration, we keep the returned derivation to provide it back when
|
|
|
|
// nothing has changed. We only want to create a key_id for each new block,
|
|
|
|
// and reuse it when we rebuild the current block to add new tx.
|
|
|
|
let mut head = self.chain.head().unwrap();
|
|
|
|
let mut current_hash = head.prev_block_h;
|
|
|
|
let mut latest_hash;
|
2019-02-09 22:49:58 +03:00
|
|
|
let listen_addr = self
|
|
|
|
.config
|
|
|
|
.stratum_server_addr
|
|
|
|
.clone()
|
|
|
|
.unwrap()
|
|
|
|
.parse()
|
|
|
|
.expect("Stratum: Incorrect address ");
|
|
|
|
{
|
|
|
|
self.current_block_versions.write().push(Block::default());
|
|
|
|
}
|
|
|
|
|
|
|
|
let handler = Handler::from_stratum(&self);
|
2018-04-13 16:42:25 +03:00
|
|
|
|
|
|
|
let _listener_th = thread::spawn(move || {
|
2019-02-09 22:49:58 +03:00
|
|
|
accept_connections(listen_addr, handler);
|
2018-04-13 16:42:25 +03:00
|
|
|
});
|
|
|
|
|
|
|
|
// We have started
|
2018-04-17 18:17:30 +03:00
|
|
|
{
|
2019-02-09 22:49:58 +03:00
|
|
|
let mut stratum_stats = self.stratum_stats.write();
|
2018-04-17 18:17:30 +03:00
|
|
|
stratum_stats.is_running = true;
|
2018-10-16 02:14:23 +03:00
|
|
|
stratum_stats.edge_bits = edge_bits as u16;
|
2018-04-17 18:17:30 +03:00
|
|
|
}
|
2018-04-13 16:42:25 +03:00
|
|
|
|
|
|
|
warn!(
|
|
|
|
"Stratum server started on {}",
|
2018-06-04 13:22:42 +03:00
|
|
|
self.config.stratum_server_addr.clone().unwrap()
|
2018-04-13 16:42:25 +03:00
|
|
|
);
|
|
|
|
|
2018-12-29 01:12:02 +03:00
|
|
|
// Initial Loop. Waiting node complete syncing
|
|
|
|
while self.sync_state.is_syncing() {
|
|
|
|
thread::sleep(Duration::from_millis(50));
|
|
|
|
}
|
|
|
|
|
2018-04-13 16:42:25 +03:00
|
|
|
// Main Loop
|
|
|
|
loop {
|
|
|
|
// get the latest chain state
|
|
|
|
head = self.chain.head().unwrap();
|
|
|
|
latest_hash = head.last_block_h;
|
|
|
|
|
|
|
|
// Build a new block if:
|
|
|
|
// There is a new block on the chain
|
|
|
|
// or We are rebuilding the current one to include new transactions
|
2018-05-01 11:29:39 +03:00
|
|
|
// and there is at least one worker connected
|
2018-08-20 01:50:43 +03:00
|
|
|
if (current_hash != latest_hash || Utc::now().timestamp() >= deadline)
|
2019-02-09 22:49:58 +03:00
|
|
|
&& self.workers.count() > 0
|
2018-05-01 11:29:39 +03:00
|
|
|
{
|
2018-04-13 16:42:25 +03:00
|
|
|
{
|
2019-02-09 22:49:58 +03:00
|
|
|
let mut current_block_versions = self.current_block_versions.write();
|
|
|
|
let mut wallet_listener_url: Option<String> = None;
|
|
|
|
if !self.config.burn_reward {
|
|
|
|
wallet_listener_url = Some(self.config.wallet_listener_url.clone());
|
|
|
|
}
|
|
|
|
// If this is a new block, clear the current_block version history
|
|
|
|
if current_hash != latest_hash {
|
|
|
|
current_block_versions.clear();
|
|
|
|
}
|
|
|
|
// Build the new block (version)
|
|
|
|
let (new_block, block_fees) = mine_block::get_block(
|
|
|
|
&self.chain,
|
|
|
|
&self.tx_pool,
|
|
|
|
self.verifier_cache.clone(),
|
|
|
|
self.current_key_id.read().clone(),
|
|
|
|
wallet_listener_url,
|
|
|
|
);
|
|
|
|
{
|
|
|
|
let mut current_difficulty = self.current_difficulty.write();
|
|
|
|
*current_difficulty =
|
|
|
|
(new_block.header.total_difficulty() - head.total_difficulty).to_num();
|
|
|
|
}
|
|
|
|
{
|
|
|
|
let mut current_key_id = self.current_key_id.write();
|
|
|
|
*current_key_id = block_fees.key_id();
|
|
|
|
}
|
|
|
|
current_hash = latest_hash;
|
|
|
|
{
|
|
|
|
// set the minimum acceptable share difficulty for this block
|
|
|
|
let mut minimum_share_difficulty = self.minimum_share_difficulty.write();
|
|
|
|
*minimum_share_difficulty = cmp::min(
|
|
|
|
self.config.minimum_share_difficulty,
|
|
|
|
*self.current_difficulty.read(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
// set a new deadline for rebuilding with fresh transactions
|
|
|
|
deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
|
|
|
|
|
|
|
|
let mut stratum_stats = self.stratum_stats.write();
|
2018-07-10 11:18:09 +03:00
|
|
|
stratum_stats.block_height = new_block.header.height;
|
2019-02-09 22:49:58 +03:00
|
|
|
stratum_stats.network_difficulty = *self.current_difficulty.read();
|
|
|
|
|
|
|
|
// Add this new block version to our current block map
|
|
|
|
current_block_versions.push(new_block);
|
2018-04-13 16:42:25 +03:00
|
|
|
}
|
|
|
|
// Send this job to all connected workers
|
|
|
|
self.broadcast_job();
|
|
|
|
}
|
|
|
|
|
|
|
|
// sleep before restarting loop
|
2019-02-09 22:49:58 +03:00
|
|
|
thread::sleep(Duration::from_millis(5));
|
2018-04-13 16:42:25 +03:00
|
|
|
} // Main Loop
|
|
|
|
} // fn run_loop()
|
|
|
|
} // StratumServer
|
2018-07-13 02:27:32 +03:00
|
|
|
|
|
|
|
// Utility function to parse a JSON RPC parameter object, returning a proper
|
|
|
|
// error if things go wrong.
|
2019-02-09 22:49:58 +03:00
|
|
|
fn parse_params<T>(params: Option<Value>) -> Result<T, RpcError>
|
2018-07-13 02:27:32 +03:00
|
|
|
where
|
2018-08-20 01:50:43 +03:00
|
|
|
for<'de> T: serde::Deserialize<'de>,
|
2018-07-13 02:27:32 +03:00
|
|
|
{
|
|
|
|
params
|
|
|
|
.and_then(|v| serde_json::from_value(v).ok())
|
2019-02-09 22:49:58 +03:00
|
|
|
.ok_or(RpcError::invalid_request())
|
2018-07-13 02:27:32 +03:00
|
|
|
}
|