mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 17:01:09 +03:00
rustfmt
This commit is contained in:
parent
fd08c34474
commit
2d105deea7
3 changed files with 60 additions and 50 deletions
|
@ -70,15 +70,15 @@ impl NetAdapter for NetToChainAdapter {
|
||||||
|
|
||||||
if let &Err(ref e) = &res {
|
if let &Err(ref e) = &res {
|
||||||
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
|
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.syncing() {
|
if self.syncing() {
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => self.syncer.borrow().block_received(bhash),
|
Ok(_) => self.syncer.borrow().block_received(bhash),
|
||||||
Err(chain::Error::Unfit(_)) => self.syncer.borrow().block_received(bhash),
|
Err(chain::Error::Unfit(_)) => self.syncer.borrow().block_received(bhash),
|
||||||
Err(_) => {},
|
Err(_) => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn headers_received(&self, bhs: Vec<core::BlockHeader>) {
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>) {
|
||||||
|
@ -104,7 +104,7 @@ impl NetAdapter for NetToChainAdapter {
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Store error processing block header {}: in {} {:?}",
|
"Store error processing block header {}: in {} {:?}",
|
||||||
bh.hash(),
|
bh.hash(),
|
||||||
explanation,
|
explanation,
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -132,7 +132,7 @@ impl Server {
|
||||||
Seeding::WebStatic => {
|
Seeding::WebStatic => {
|
||||||
seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone()));
|
seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone()));
|
||||||
}
|
}
|
||||||
_ => {},
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.seeding_type != Seeding::None {
|
if config.seeding_type != Seeding::None {
|
||||||
|
|
|
@ -33,9 +33,9 @@ use util::LOGGER;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct BlockDownload {
|
struct BlockDownload {
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
start_time: Instant,
|
start_time: Instant,
|
||||||
retries: u8,
|
retries: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Manages syncing the local chain with other peers. Needs both a head chain
|
/// Manages syncing the local chain with other peers. Needs both a head chain
|
||||||
|
@ -93,7 +93,12 @@ impl Syncer {
|
||||||
let tip = self.chain.get_header_head()?;
|
let tip = self.chain.get_header_head()?;
|
||||||
// TODO do something better (like trying to get more) if we lose peers
|
// TODO do something better (like trying to get more) if we lose peers
|
||||||
let peer = self.p2p.most_work_peer().unwrap();
|
let peer = self.p2p.most_work_peer().unwrap();
|
||||||
debug!(LOGGER, "Sync: peer {} vs us {}", peer.info.total_difficulty, tip.total_difficulty);
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"Sync: peer {} vs us {}",
|
||||||
|
peer.info.total_difficulty,
|
||||||
|
tip.total_difficulty
|
||||||
|
);
|
||||||
|
|
||||||
let more_headers = peer.info.total_difficulty > tip.total_difficulty;
|
let more_headers = peer.info.total_difficulty > tip.total_difficulty;
|
||||||
let more_bodies = {
|
let more_bodies = {
|
||||||
|
@ -159,35 +164,39 @@ impl Syncer {
|
||||||
let mut blocks_to_download = self.blocks_to_download.lock().unwrap();
|
let mut blocks_to_download = self.blocks_to_download.lock().unwrap();
|
||||||
let mut blocks_downloading = self.blocks_downloading.lock().unwrap();
|
let mut blocks_downloading = self.blocks_downloading.lock().unwrap();
|
||||||
|
|
||||||
// retry blocks not downloading
|
// retry blocks not downloading
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for download in blocks_downloading.deref_mut() {
|
for download in blocks_downloading.deref_mut() {
|
||||||
let elapsed = (now - download.start_time).as_secs();
|
let elapsed = (now - download.start_time).as_secs();
|
||||||
if download.retries >= 8 {
|
if download.retries >= 8 {
|
||||||
panic!("Failed to download required block {}", download.hash);
|
panic!("Failed to download required block {}", download.hash);
|
||||||
}
|
}
|
||||||
if download.retries < (elapsed / 5) as u8 {
|
if download.retries < (elapsed / 5) as u8 {
|
||||||
debug!(LOGGER, "Retry {} on block {}", download.retries, download.hash);
|
debug!(
|
||||||
self.request_block(download.hash);
|
LOGGER,
|
||||||
download.retries += 1;
|
"Retry {} on block {}",
|
||||||
}
|
download.retries,
|
||||||
}
|
download.hash
|
||||||
|
);
|
||||||
|
self.request_block(download.hash);
|
||||||
|
download.retries += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// consume hashes from blocks to download, place them in downloading and
|
// consume hashes from blocks to download, place them in downloading and
|
||||||
// request them from the network
|
// request them from the network
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
|
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
|
||||||
let h = blocks_to_download.pop().unwrap();
|
let h = blocks_to_download.pop().unwrap();
|
||||||
self.request_block(h);
|
self.request_block(h);
|
||||||
count += 1;
|
count += 1;
|
||||||
blocks_downloading.push(
|
blocks_downloading.push(BlockDownload {
|
||||||
BlockDownload {
|
hash: h,
|
||||||
hash: h,
|
start_time: Instant::now(),
|
||||||
start_time: Instant::now(),
|
retries: 0,
|
||||||
retries: 0
|
});
|
||||||
});
|
}
|
||||||
}
|
debug!(
|
||||||
debug!(
|
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Requested {} full blocks to download, total left: {}. Current list: {:?}.",
|
"Requested {} full blocks to download, total left: {}. Current list: {:?}.",
|
||||||
count,
|
count,
|
||||||
|
@ -200,7 +209,9 @@ impl Syncer {
|
||||||
pub fn block_received(&self, bh: Hash) {
|
pub fn block_received(&self, bh: Hash) {
|
||||||
// just clean up the downloading list
|
// just clean up the downloading list
|
||||||
let mut bds = self.blocks_downloading.lock().unwrap();
|
let mut bds = self.blocks_downloading.lock().unwrap();
|
||||||
bds.iter().position(|ref h| h.hash == bh).map(|n| bds.remove(n));
|
bds.iter().position(|ref h| h.hash == bh).map(
|
||||||
|
|n| bds.remove(n),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request some block headers from a peer to advance us
|
/// Request some block headers from a peer to advance us
|
||||||
|
@ -257,7 +268,7 @@ impl Syncer {
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
heights.append(&mut tail);
|
heights.append(&mut tail);
|
||||||
debug!(LOGGER, "Loc heights: {:?}", heights);
|
debug!(LOGGER, "Loc heights: {:?}", heights);
|
||||||
|
|
||||||
// Iteratively travel the header chain back from our head and retain the
|
// Iteratively travel the header chain back from our head and retain the
|
||||||
// headers at the wanted heights.
|
// headers at the wanted heights.
|
||||||
|
@ -275,13 +286,12 @@ impl Syncer {
|
||||||
Ok(locator)
|
Ok(locator)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pick a random peer and ask for a block by hash
|
/// Pick a random peer and ask for a block by hash
|
||||||
fn request_block(&self, h: Hash) {
|
fn request_block(&self, h: Hash) {
|
||||||
let peer = self.p2p.random_peer().unwrap();
|
let peer = self.p2p.random_peer().unwrap();
|
||||||
let send_result = peer.send_block_request(h);
|
let send_result = peer.send_block_request(h);
|
||||||
if let Err(e) = send_result {
|
if let Err(e) = send_result {
|
||||||
debug!(LOGGER, "Error requesting block: {:?}", e);
|
debug!(LOGGER, "Error requesting block: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue