mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 08:51:08 +03:00
[PIBD_IMPL] BitmapAccumulator Serialization Fix (#3705)
* fix for writing / calculating incorrect length for negative indices * update capabilities with new version of PIBD hist * remove incorrect comment * fix capabilities flag, trace output * test fix
This commit is contained in:
parent
aa2a2a98df
commit
5efd70a17b
5 changed files with 40 additions and 19 deletions
|
@ -418,7 +418,11 @@ impl Writeable for BitmapBlock {
|
|||
writer.write_u8((length / BitmapChunk::LEN_BITS) as u8)?;
|
||||
|
||||
let count_pos = self.inner.iter().filter(|&v| v).count() as u32;
|
||||
let count_neg = Self::NBITS - count_pos;
|
||||
|
||||
// Negative count needs to be adjusted if the block is not full,
|
||||
// which affects the choice of serialization mode and size written
|
||||
let count_neg = length as u32 - count_pos;
|
||||
|
||||
let threshold = Self::NBITS / 16;
|
||||
if count_pos < threshold {
|
||||
// Write positive indices
|
||||
|
@ -518,17 +522,19 @@ mod tests {
|
|||
use rand::thread_rng;
|
||||
use std::io::Cursor;
|
||||
|
||||
fn test_roundtrip(entries: usize, inverse: bool, encoding: u8, length: usize) {
|
||||
fn test_roundtrip(entries: usize, inverse: bool, encoding: u8, length: usize, n_blocks: usize) {
|
||||
let mut rng = thread_rng();
|
||||
let mut block = BitmapBlock::new(64);
|
||||
let mut block = BitmapBlock::new(n_blocks);
|
||||
if inverse {
|
||||
block.inner.negate();
|
||||
}
|
||||
|
||||
let range_size = n_blocks * BitmapChunk::LEN_BITS as usize;
|
||||
|
||||
// Flip `entries` bits in random spots
|
||||
let mut count = 0;
|
||||
while count < entries {
|
||||
let idx = rng.gen_range(0, BitmapBlock::NBITS as usize);
|
||||
let idx = rng.gen_range(0, range_size);
|
||||
if block.inner.get(idx).unwrap() == inverse {
|
||||
count += 1;
|
||||
block.inner.set(idx, !inverse);
|
||||
|
@ -562,21 +568,35 @@ mod tests {
|
|||
fn block_ser_roundtrip() {
|
||||
let threshold = BitmapBlock::NBITS as usize / 16;
|
||||
let entries = thread_rng().gen_range(threshold, 4 * threshold);
|
||||
test_roundtrip(entries, false, 0, 2 + BitmapBlock::NBITS as usize / 8);
|
||||
test_roundtrip(entries, true, 0, 2 + BitmapBlock::NBITS as usize / 8);
|
||||
test_roundtrip(entries, false, 0, 2 + BitmapBlock::NBITS as usize / 8, 64);
|
||||
test_roundtrip(entries, true, 0, 2 + BitmapBlock::NBITS as usize / 8, 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_block_ser_roundtrip() {
|
||||
let entries =
|
||||
thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16);
|
||||
test_roundtrip(entries, false, 1, 4 + 2 * entries);
|
||||
test_roundtrip(entries, false, 1, 4 + 2 * entries, 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sparse_unfull_block_ser_roundtrip() {
|
||||
let entries =
|
||||
thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16);
|
||||
test_roundtrip(entries, false, 1, 4 + 2 * entries, 61);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn abdundant_block_ser_roundtrip() {
|
||||
let entries =
|
||||
thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16);
|
||||
test_roundtrip(entries, true, 2, 4 + 2 * entries);
|
||||
test_roundtrip(entries, true, 2, 4 + 2 * entries, 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn abdundant_unfull_block_ser_roundtrip() {
|
||||
let entries =
|
||||
thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16);
|
||||
test_roundtrip(entries, true, 2, 4 + 2 * entries, 61);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -391,6 +391,8 @@ bitflags! {
|
|||
const PIBD_HIST = 0b0001_0000;
|
||||
/// Can provide historical blocks for archival sync.
|
||||
const BLOCK_HIST = 0b0010_0000;
|
||||
/// As above, with crucial serialization fix #3705 applied
|
||||
const PIBD_HIST_1 = 0b0100_0000;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,6 +404,7 @@ impl Default for Capabilities {
|
|||
| Capabilities::PEER_LIST
|
||||
| Capabilities::TX_KERNEL_HASH
|
||||
| Capabilities::PIBD_HIST
|
||||
| Capabilities::PIBD_HIST_1
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ fn default_capabilities() {
|
|||
assert!(x.contains(Capabilities::PEER_LIST));
|
||||
assert!(x.contains(Capabilities::TX_KERNEL_HASH));
|
||||
assert!(x.contains(Capabilities::PIBD_HIST));
|
||||
assert!(x.contains(Capabilities::PIBD_HIST_1));
|
||||
|
||||
assert_eq!(
|
||||
x,
|
||||
|
@ -50,5 +51,6 @@ fn default_capabilities() {
|
|||
| Capabilities::PEER_LIST
|
||||
| Capabilities::TX_KERNEL_HASH
|
||||
| Capabilities::PIBD_HIST
|
||||
| Capabilities::PIBD_HIST_1
|
||||
);
|
||||
}
|
||||
|
|
|
@ -55,11 +55,7 @@ fn test_capabilities() {
|
|||
|
||||
assert_eq!(
|
||||
expected,
|
||||
p2p::types::Capabilities::from_bits_truncate(0b11111 as u32),
|
||||
);
|
||||
assert_eq!(
|
||||
expected,
|
||||
p2p::types::Capabilities::from_bits_truncate(0b00011111 as u32),
|
||||
p2p::types::Capabilities::from_bits_truncate(0b1011111 as u32),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
|
|
|
@ -305,10 +305,10 @@ impl StateSync {
|
|||
let max_diff = peers_iter().max_difficulty().unwrap_or(Difficulty::zero());
|
||||
let peers_iter_max = || peers_iter().with_difficulty(|x| x >= max_diff);
|
||||
|
||||
// Then, further filter by PIBD capabilities
|
||||
// Then, further filter by PIBD capabilities v1
|
||||
let peers_iter_pibd = || {
|
||||
peers_iter_max()
|
||||
.with_capabilities(Capabilities::PIBD_HIST)
|
||||
.with_capabilities(Capabilities::PIBD_HIST_1)
|
||||
.connected()
|
||||
};
|
||||
|
||||
|
@ -335,10 +335,10 @@ impl StateSync {
|
|||
}
|
||||
|
||||
// Choose a random "most work" peer, preferring outbound if at all possible.
|
||||
let peer = peers_iter_pibd().outbound().choose_random().or_else(|| {
|
||||
warn!("no suitable outbound peer for pibd message, considering inbound");
|
||||
peers_iter_pibd().inbound().choose_random()
|
||||
});
|
||||
let peer = peers_iter_pibd()
|
||||
.outbound()
|
||||
.choose_random()
|
||||
.or_else(|| peers_iter_pibd().inbound().choose_random());
|
||||
trace!("Chosen peer is {:?}", peer);
|
||||
|
||||
if let Some(p) = peer {
|
||||
|
|
Loading…
Reference in a new issue