2018-01-30 17:42:04 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2016-10-25 07:35:10 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-10-13 05:12:13 +03:00
|
|
|
use std::cmp;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::env;
|
|
|
|
use std::fs::File;
|
2018-08-31 02:50:55 +03:00
|
|
|
use std::io::{self, BufWriter};
|
|
|
|
use std::net::{SocketAddr, TcpStream};
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::sync::Arc;
|
2018-08-31 20:20:59 +03:00
|
|
|
use std::time;
|
2016-10-29 22:36:45 +03:00
|
|
|
|
2018-10-13 05:12:13 +03:00
|
|
|
use chrono::prelude::Utc;
|
2018-06-14 15:16:14 +03:00
|
|
|
use conn::{Message, MessageHandler, Response};
|
2018-08-31 02:50:55 +03:00
|
|
|
use core::core::{self, hash::Hash, CompactBlock};
|
|
|
|
use core::{global, ser};
|
2018-11-11 02:30:57 +03:00
|
|
|
use util::{RateCounter, RwLock};
|
2018-08-31 02:50:55 +03:00
|
|
|
|
2018-08-01 12:44:07 +03:00
|
|
|
use msg::{
|
2018-08-31 02:50:55 +03:00
|
|
|
read_exact, BanReason, GetPeerAddrs, Headers, Locator, PeerAddrs, Ping, Pong, SockAddr,
|
|
|
|
TxHashSetArchive, TxHashSetRequest, Type,
|
2018-08-01 12:44:07 +03:00
|
|
|
};
|
2018-06-14 15:16:14 +03:00
|
|
|
use types::{Error, NetAdapter};
|
2016-10-25 07:35:10 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
pub struct Protocol {
|
|
|
|
adapter: Arc<NetAdapter>,
|
|
|
|
addr: SocketAddr,
|
2016-10-31 22:29:08 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
impl Protocol {
|
|
|
|
pub fn new(adapter: Arc<NetAdapter>, addr: SocketAddr) -> Protocol {
|
2018-03-04 03:19:54 +03:00
|
|
|
Protocol { adapter, addr }
|
2017-02-02 06:05:17 +03:00
|
|
|
}
|
2016-10-25 07:35:10 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
impl MessageHandler for Protocol {
|
2018-11-11 02:30:57 +03:00
|
|
|
fn consume<'a>(
|
|
|
|
&self,
|
|
|
|
mut msg: Message<'a>,
|
|
|
|
received_bytes: Arc<RwLock<RateCounter>>,
|
|
|
|
) -> Result<Option<Response<'a>>, Error> {
|
2018-02-02 05:03:12 +03:00
|
|
|
let adapter = &self.adapter;
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-03-27 19:09:41 +03:00
|
|
|
// If we received a msg from a banned peer then log and drop it.
|
|
|
|
// If we are getting a lot of these then maybe we are not cleaning
|
|
|
|
// banned peers up correctly?
|
|
|
|
if adapter.is_banned(self.addr.clone()) {
|
|
|
|
debug!(
|
|
|
|
"handler: consume: peer {:?} banned, received: {:?}, dropping.",
|
2018-10-21 23:30:56 +03:00
|
|
|
self.addr, msg.header.msg_type,
|
2018-03-27 19:09:41 +03:00
|
|
|
);
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
match msg.header.msg_type {
|
|
|
|
Type::Ping => {
|
|
|
|
let ping: Ping = msg.body()?;
|
|
|
|
adapter.peer_difficulty(self.addr, ping.total_difficulty, ping.height);
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
Ok(Some(msg.respond(
|
|
|
|
Type::Pong,
|
|
|
|
Pong {
|
|
|
|
total_difficulty: adapter.total_difficulty(),
|
|
|
|
height: adapter.total_height(),
|
|
|
|
},
|
|
|
|
)))
|
2018-02-02 05:03:12 +03:00
|
|
|
}
|
2018-01-31 23:39:55 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::Pong => {
|
|
|
|
let pong: Pong = msg.body()?;
|
|
|
|
adapter.peer_difficulty(self.addr, pong.total_difficulty, pong.height);
|
|
|
|
Ok(None)
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-12-14 15:19:43 +03:00
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
Type::BanReason => {
|
|
|
|
let ban_reason: BanReason = msg.body()?;
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("handle_payload: BanReason {:?}", ban_reason);
|
2018-05-29 05:45:31 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
Type::TransactionKernel => {
|
|
|
|
let h: Hash = msg.body()?;
|
|
|
|
debug!(
|
|
|
|
"handle_payload: received tx kernel: {}, msg_len: {}",
|
|
|
|
h, msg.header.msg_len
|
|
|
|
);
|
|
|
|
adapter.tx_kernel_received(h, self.addr);
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
|
|
|
|
Type::GetTransaction => {
|
|
|
|
let h: Hash = msg.body()?;
|
|
|
|
debug!(
|
|
|
|
"handle_payload: GetTransaction: {}, msg_len: {}",
|
|
|
|
h, msg.header.msg_len,
|
|
|
|
);
|
|
|
|
let tx = adapter.get_transaction(h);
|
|
|
|
if let Some(tx) = tx {
|
|
|
|
Ok(Some(msg.respond(Type::Transaction, tx)))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::Transaction => {
|
2018-08-19 20:15:42 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: received tx: msg_len: {}",
|
|
|
|
msg.header.msg_len
|
2018-08-19 20:15:42 +03:00
|
|
|
);
|
2018-02-02 05:03:12 +03:00
|
|
|
let tx: core::Transaction = msg.body()?;
|
2018-03-20 06:18:54 +03:00
|
|
|
adapter.transaction_received(tx, false);
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
|
|
|
|
Type::StemTransaction => {
|
2018-08-19 20:15:42 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: received stem tx: msg_len: {}",
|
|
|
|
msg.header.msg_len
|
2018-08-19 20:15:42 +03:00
|
|
|
);
|
2018-03-20 06:18:54 +03:00
|
|
|
let tx: core::Transaction = msg.body()?;
|
|
|
|
adapter.transaction_received(tx, true);
|
2018-02-02 05:03:12 +03:00
|
|
|
Ok(None)
|
2017-12-14 15:19:43 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::GetBlock => {
|
|
|
|
let h: Hash = msg.body()?;
|
2018-10-02 00:12:39 +03:00
|
|
|
trace!(
|
2018-11-07 12:28:17 +03:00
|
|
|
"handle_payload: GetBlock: {}, msg_len: {}",
|
2018-10-02 00:12:39 +03:00
|
|
|
h,
|
|
|
|
msg.header.msg_len,
|
2018-09-18 20:51:37 +03:00
|
|
|
);
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
let bo = adapter.get_block(h);
|
|
|
|
if let Some(b) = bo {
|
2018-02-10 01:32:16 +03:00
|
|
|
return Ok(Some(msg.respond(Type::Block, b)));
|
2017-12-14 15:19:43 +03:00
|
|
|
}
|
2018-02-02 05:03:12 +03:00
|
|
|
Ok(None)
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2018-01-31 23:39:55 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::Block => {
|
2018-08-19 20:15:42 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: received block: msg_len: {}",
|
|
|
|
msg.header.msg_len
|
2018-08-19 20:15:42 +03:00
|
|
|
);
|
2018-02-02 05:03:12 +03:00
|
|
|
let b: core::Block = msg.body()?;
|
2018-01-31 23:39:55 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
adapter.block_received(b, self.addr);
|
|
|
|
Ok(None)
|
|
|
|
}
|
2018-02-07 19:26:52 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::GetCompactBlock => {
|
|
|
|
let h: Hash = msg.body()?;
|
|
|
|
if let Some(b) = adapter.get_block(h) {
|
2018-09-18 20:51:37 +03:00
|
|
|
let cb: CompactBlock = b.into();
|
|
|
|
Ok(Some(msg.respond(Type::CompactBlock, cb)))
|
2018-01-31 23:39:55 +03:00
|
|
|
} else {
|
2018-02-02 05:03:12 +03:00
|
|
|
Ok(None)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::CompactBlock => {
|
2018-08-19 20:15:42 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: received compact block: msg_len: {}",
|
|
|
|
msg.header.msg_len
|
2018-08-19 20:15:42 +03:00
|
|
|
);
|
2018-02-02 05:03:12 +03:00
|
|
|
let b: core::CompactBlock = msg.body()?;
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
adapter.compact_block_received(b, self.addr);
|
|
|
|
Ok(None)
|
2017-12-14 15:19:43 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::GetHeaders => {
|
|
|
|
// load headers from the locator
|
|
|
|
let loc: Locator = msg.body()?;
|
|
|
|
let headers = adapter.locate_headers(loc.hashes);
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
// serialize and send all the headers over
|
2018-08-31 02:50:55 +03:00
|
|
|
Ok(Some(
|
|
|
|
msg.respond(Type::Headers, Headers { headers: headers }),
|
|
|
|
))
|
2018-02-02 05:03:12 +03:00
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-02-07 19:26:52 +03:00
|
|
|
// "header first" block propagation - if we have not yet seen this block
|
|
|
|
// we can go request it from some of our peers
|
|
|
|
Type::Header => {
|
|
|
|
let header: core::BlockHeader = msg.body()?;
|
|
|
|
|
|
|
|
adapter.header_received(header, self.addr);
|
|
|
|
|
|
|
|
// we do not return a hash here as we never request a single header
|
|
|
|
// a header will always arrive unsolicited
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::Headers => {
|
2018-08-31 02:50:55 +03:00
|
|
|
let conn = &mut msg.get_conn();
|
|
|
|
|
|
|
|
let header_size: u64 = headers_header_size(conn, msg.header.msg_len)?;
|
|
|
|
let mut total_read: u64 = 2;
|
|
|
|
let mut reserved: Vec<u8> = vec![];
|
|
|
|
|
|
|
|
while total_read < msg.header.msg_len || reserved.len() > 0 {
|
|
|
|
let headers: Headers = headers_streaming_body(
|
|
|
|
conn,
|
|
|
|
msg.header.msg_len,
|
2018-10-11 11:47:27 +03:00
|
|
|
32,
|
2018-08-31 02:50:55 +03:00
|
|
|
&mut total_read,
|
|
|
|
&mut reserved,
|
|
|
|
header_size,
|
|
|
|
)?;
|
|
|
|
adapter.headers_received(headers.headers, self.addr);
|
|
|
|
}
|
2018-02-02 05:03:12 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::GetPeerAddrs => {
|
|
|
|
let get_peers: GetPeerAddrs = msg.body()?;
|
|
|
|
let peer_addrs = adapter.find_peer_addrs(get_peers.capabilities);
|
2018-03-04 03:19:54 +03:00
|
|
|
Ok(Some(msg.respond(
|
|
|
|
Type::PeerAddrs,
|
|
|
|
PeerAddrs {
|
|
|
|
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
|
|
|
},
|
|
|
|
)))
|
2018-02-02 05:03:12 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
Type::PeerAddrs => {
|
|
|
|
let peer_addrs: PeerAddrs = msg.body()?;
|
|
|
|
adapter.peer_addrs_received(peer_addrs.peers.iter().map(|pa| pa.0).collect());
|
|
|
|
Ok(None)
|
2017-12-14 15:19:43 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
Type::TxHashSetRequest => {
|
|
|
|
let sm_req: TxHashSetRequest = msg.body()?;
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: txhashset req for {} at {}",
|
|
|
|
sm_req.hash, sm_req.height
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset = self.adapter.txhashset_read(sm_req.hash);
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
if let Some(txhashset) = txhashset {
|
|
|
|
let file_sz = txhashset.reader.metadata()?.len();
|
2018-02-10 01:32:16 +03:00
|
|
|
let mut resp = msg.respond(
|
2018-03-05 22:33:44 +03:00
|
|
|
Type::TxHashSetArchive,
|
|
|
|
&TxHashSetArchive {
|
2018-02-10 01:32:16 +03:00
|
|
|
height: sm_req.height as u64,
|
|
|
|
hash: sm_req.hash,
|
|
|
|
bytes: file_sz,
|
2018-03-04 03:19:54 +03:00
|
|
|
},
|
|
|
|
);
|
2018-03-05 22:33:44 +03:00
|
|
|
resp.add_attachment(txhashset.reader);
|
2018-02-10 01:32:16 +03:00
|
|
|
Ok(Some(resp))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
Type::TxHashSetArchive => {
|
|
|
|
let sm_arch: TxHashSetArchive = msg.body()?;
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-07-23 20:37:35 +03:00
|
|
|
"handle_payload: txhashset archive for {} at {}. size={}",
|
2018-10-21 23:30:56 +03:00
|
|
|
sm_arch.hash, sm_arch.height, sm_arch.bytes,
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-07-12 19:06:52 +03:00
|
|
|
if !self.adapter.txhashset_receive_ready() {
|
2018-07-23 20:37:35 +03:00
|
|
|
error!(
|
|
|
|
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
|
|
|
|
);
|
2018-07-12 19:06:52 +03:00
|
|
|
return Err(Error::BadMessage);
|
|
|
|
}
|
2018-10-13 05:12:13 +03:00
|
|
|
|
|
|
|
let download_start_time = Utc::now();
|
|
|
|
self.adapter
|
|
|
|
.txhashset_download_update(download_start_time, 0, sm_arch.bytes);
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
let mut tmp = env::temp_dir();
|
2018-03-05 22:33:44 +03:00
|
|
|
tmp.push("txhashset.zip");
|
2018-08-01 12:44:07 +03:00
|
|
|
let mut save_txhashset_to_file = |file| -> Result<(), Error> {
|
2018-08-18 04:52:44 +03:00
|
|
|
let mut tmp_zip = BufWriter::new(File::create(file)?);
|
2018-10-13 05:12:13 +03:00
|
|
|
let total_size = sm_arch.bytes as usize;
|
|
|
|
let mut downloaded_size: usize = 0;
|
2018-10-14 15:13:49 +03:00
|
|
|
let mut request_size = cmp::min(48_000, total_size);
|
2018-10-13 05:12:13 +03:00
|
|
|
while request_size > 0 {
|
2018-11-11 02:30:57 +03:00
|
|
|
let size = msg.copy_attachment(request_size, &mut tmp_zip)?;
|
|
|
|
downloaded_size += size;
|
2018-10-13 05:12:13 +03:00
|
|
|
request_size = cmp::min(48_000, total_size - downloaded_size);
|
|
|
|
self.adapter.txhashset_download_update(
|
|
|
|
download_start_time,
|
|
|
|
downloaded_size as u64,
|
|
|
|
total_size as u64,
|
|
|
|
);
|
2018-11-11 02:30:57 +03:00
|
|
|
|
|
|
|
// Increase received bytes counter
|
|
|
|
{
|
|
|
|
let mut received_bytes = received_bytes.write();
|
|
|
|
received_bytes.inc(size as u64);
|
|
|
|
}
|
2018-10-13 05:12:13 +03:00
|
|
|
}
|
2018-08-18 04:52:44 +03:00
|
|
|
tmp_zip.into_inner().unwrap().sync_all()?;
|
2018-07-23 20:37:35 +03:00
|
|
|
Ok(())
|
|
|
|
};
|
|
|
|
|
2018-08-01 12:44:07 +03:00
|
|
|
if let Err(e) = save_txhashset_to_file(tmp.clone()) {
|
2018-07-23 20:37:35 +03:00
|
|
|
error!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"handle_payload: txhashset archive save to file fail. err={:?}",
|
|
|
|
e
|
2018-07-23 20:37:35 +03:00
|
|
|
);
|
|
|
|
return Err(e);
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:37:35 +03:00
|
|
|
trace!(
|
|
|
|
"handle_payload: txhashset archive save to file {:?} success",
|
|
|
|
tmp,
|
|
|
|
);
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
let tmp_zip = File::open(tmp)?;
|
2018-08-31 02:50:55 +03:00
|
|
|
let res = self
|
|
|
|
.adapter
|
2018-08-01 12:44:07 +03:00
|
|
|
.txhashset_write(sm_arch.hash, tmp_zip, self.addr);
|
2018-04-06 20:14:50 +03:00
|
|
|
|
|
|
|
debug!(
|
2018-07-23 20:37:35 +03:00
|
|
|
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
|
2018-10-21 23:30:56 +03:00
|
|
|
sm_arch.hash, sm_arch.height, res
|
2018-04-06 20:14:50 +03:00
|
|
|
);
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
_ => {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("unknown message type {:?}", msg.header.msg_type);
|
2018-02-02 05:03:12 +03:00
|
|
|
Ok(None)
|
|
|
|
}
|
2017-02-02 06:05:17 +03:00
|
|
|
}
|
|
|
|
}
|
2016-12-16 01:57:04 +03:00
|
|
|
}
|
2018-08-31 02:50:55 +03:00
|
|
|
|
|
|
|
/// Read the Headers Vec size from the underlying connection, and calculate maximum header_size of one Header
|
|
|
|
fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error> {
|
|
|
|
let mut size = vec![0u8; 2];
|
|
|
|
// read size of Vec<BlockHeader>
|
2018-08-31 20:20:59 +03:00
|
|
|
read_exact(conn, &mut size, time::Duration::from_millis(10), true)?;
|
2018-08-31 02:50:55 +03:00
|
|
|
|
|
|
|
let total_headers = size[0] as u64 * 256 + size[1] as u64;
|
|
|
|
if total_headers == 0 || total_headers > 10_000 {
|
|
|
|
return Err(Error::Connection(io::Error::new(
|
|
|
|
io::ErrorKind::InvalidData,
|
|
|
|
"headers_header_size",
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
let average_header_size = (msg_len - 2) / total_headers;
|
|
|
|
|
2018-10-16 02:14:23 +03:00
|
|
|
// support size of Cuck(at)oo: from Cuck(at)oo 29 to Cuck(at)oo 35, with version 2
|
2018-09-11 01:36:57 +03:00
|
|
|
// having slightly larger headers
|
2018-10-16 02:14:23 +03:00
|
|
|
let min_size = core::serialized_size_of_header(1, global::min_edge_bits());
|
2018-10-13 23:57:01 +03:00
|
|
|
let max_size = min_size + 6;
|
|
|
|
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
|
2018-08-31 02:50:55 +03:00
|
|
|
debug!(
|
|
|
|
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
|
2018-10-21 23:30:56 +03:00
|
|
|
total_headers, average_header_size, min_size, max_size,
|
2018-08-31 02:50:55 +03:00
|
|
|
);
|
|
|
|
return Err(Error::Connection(io::Error::new(
|
|
|
|
io::ErrorKind::InvalidData,
|
|
|
|
"headers_header_size",
|
|
|
|
)));
|
|
|
|
}
|
2018-10-13 23:57:01 +03:00
|
|
|
return Ok(max_size as u64);
|
2018-08-31 02:50:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Read the Headers streaming body from the underlying connection
|
|
|
|
fn headers_streaming_body(
|
|
|
|
conn: &mut TcpStream, // (i) underlying connection
|
|
|
|
msg_len: u64, // (i) length of whole 'Headers'
|
|
|
|
headers_num: u64, // (i) how many BlockHeader(s) do you want to read
|
|
|
|
total_read: &mut u64, // (i/o) how many bytes already read on this 'Headers' message
|
2018-09-03 14:09:28 +03:00
|
|
|
reserved: &mut Vec<u8>, // (i/o) reserved part of previous read, which is not a whole header
|
2018-08-31 02:50:55 +03:00
|
|
|
max_header_size: u64, // (i) maximum possible size of single BlockHeader
|
|
|
|
) -> Result<Headers, Error> {
|
|
|
|
if headers_num == 0 || msg_len < *total_read || *total_read < 2 {
|
|
|
|
return Err(Error::Connection(io::Error::new(
|
|
|
|
io::ErrorKind::InvalidInput,
|
|
|
|
"headers_streaming_body",
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note:
|
|
|
|
// As we allow Cuckoo sizes greater than 30 now, the proof of work part of the header
|
|
|
|
// could be 30*42 bits, 31*42 bits, 32*42 bits, etc.
|
|
|
|
// So, for compatibility with variable size of block header, we read max possible size, for
|
|
|
|
// up to Cuckoo 36.
|
|
|
|
//
|
|
|
|
let mut read_size = headers_num * max_header_size - reserved.len() as u64;
|
|
|
|
if *total_read + read_size > msg_len {
|
|
|
|
read_size = msg_len - *total_read;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1st part
|
|
|
|
let mut body = vec![0u8; 2]; // for Vec<> size
|
|
|
|
let mut final_headers_num = (read_size + reserved.len() as u64) / max_header_size;
|
|
|
|
let remaining = msg_len - *total_read - read_size;
|
|
|
|
if final_headers_num == 0 && remaining == 0 {
|
|
|
|
final_headers_num = 1;
|
|
|
|
}
|
|
|
|
body[0] = (final_headers_num >> 8) as u8;
|
|
|
|
body[1] = (final_headers_num & 0x00ff) as u8;
|
|
|
|
|
|
|
|
// 2nd part
|
|
|
|
body.append(reserved);
|
|
|
|
|
|
|
|
// 3rd part
|
|
|
|
let mut read_body = vec![0u8; read_size as usize];
|
|
|
|
if read_size > 0 {
|
2018-08-31 20:20:59 +03:00
|
|
|
read_exact(conn, &mut read_body, time::Duration::from_secs(20), true)?;
|
2018-08-31 02:50:55 +03:00
|
|
|
*total_read += read_size;
|
|
|
|
}
|
|
|
|
body.append(&mut read_body);
|
|
|
|
|
|
|
|
// deserialize these assembled 3 parts
|
|
|
|
let result: Result<Headers, Error> = ser::deserialize(&mut &body[..]).map_err(From::from);
|
|
|
|
let headers = result?;
|
|
|
|
|
|
|
|
// remaining data
|
|
|
|
let mut deserialized_size = 2; // for Vec<> size
|
|
|
|
for header in &headers.headers {
|
|
|
|
deserialized_size += header.serialized_size();
|
|
|
|
}
|
|
|
|
*reserved = body[deserialized_size..].to_vec();
|
|
|
|
|
|
|
|
Ok(headers)
|
|
|
|
}
|