declone and close the door (#12035)

* declone and close the door

* cargo fmt

* remove brackets
This commit is contained in:
Squirrel
2022-08-15 20:38:36 +01:00
committed by GitHub
parent 9c2a2495fe
commit a68a80fbae
72 changed files with 344 additions and 512 deletions
+27 -27
View File
@@ -290,51 +290,51 @@ mod test {
let peer2 = PeerId::random();
let blocks = generate_blocks(150);
assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41));
assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81));
assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121));
assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(1..41));
assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(41..81));
assert_eq!(bc.needed_blocks(peer2, 40, 150, 0, 1, 200), Some(81..121));
bc.clear_peer_download(&peer1);
bc.insert(41, blocks[41..81].to_vec(), peer1.clone());
bc.insert(41, blocks[41..81].to_vec(), peer1);
assert_eq!(bc.ready_blocks(1), vec![]);
assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151));
assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(121..151));
bc.clear_peer_download(&peer0);
bc.insert(1, blocks[1..11].to_vec(), peer0.clone());
bc.insert(1, blocks[1..11].to_vec(), peer0);
assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41));
assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(11..41));
assert_eq!(
bc.ready_blocks(1),
blocks[1..11]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer0) })
.collect::<Vec<_>>()
);
bc.clear_peer_download(&peer0);
bc.insert(11, blocks[11..41].to_vec(), peer0.clone());
bc.insert(11, blocks[11..41].to_vec(), peer0);
let ready = bc.ready_blocks(12);
assert_eq!(
ready[..30],
blocks[11..41]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer0) })
.collect::<Vec<_>>()[..]
);
assert_eq!(
ready[30..],
blocks[41..81]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer1) })
.collect::<Vec<_>>()[..]
);
bc.clear_peer_download(&peer2);
assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121));
assert_eq!(bc.needed_blocks(peer2, 40, 150, 80, 1, 200), Some(81..121));
bc.clear_peer_download(&peer2);
bc.insert(81, blocks[81..121].to_vec(), peer2.clone());
bc.insert(81, blocks[81..121].to_vec(), peer2);
bc.clear_peer_download(&peer1);
bc.insert(121, blocks[121..150].to_vec(), peer1.clone());
bc.insert(121, blocks[121..150].to_vec(), peer1);
assert_eq!(bc.ready_blocks(80), vec![]);
let ready = bc.ready_blocks(81);
@@ -342,14 +342,14 @@ mod test {
ready[..40],
blocks[81..121]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer2) })
.collect::<Vec<_>>()[..]
);
assert_eq!(
ready[40..],
blocks[121..150]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer1) })
.collect::<Vec<_>>()[..]
);
}
@@ -365,10 +365,10 @@ mod test {
bc.blocks.insert(114305, BlockRangeState::Complete(blocks));
let peer0 = PeerId::random();
assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100));
assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead
assert_eq!(bc.needed_blocks(peer0, 128, 10000, 000, 1, 200), Some(1..100));
assert_eq!(bc.needed_blocks(peer0, 128, 10000, 600, 1, 200), None); // too far ahead
assert_eq!(
bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000),
bc.needed_blocks(peer0, 128, 10000, 600, 1, 200000),
Some(100 + 128..100 + 128 + 128)
);
}
@@ -382,11 +382,11 @@ mod test {
let blocks = generate_blocks(10);
// count = 5, peer_best = 50, common = 39, max_parallel = 0, max_ahead = 200
assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45));
assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45));
// got a response on the request for `40..45`
bc.clear_peer_download(&peer);
bc.insert(40, blocks[..5].to_vec(), peer.clone());
bc.insert(40, blocks[..5].to_vec(), peer);
// our "node" started on a fork, with its current best = 47, which is > common
let ready = bc.ready_blocks(48);
@@ -394,11 +394,11 @@ mod test {
ready,
blocks[..5]
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer) })
.collect::<Vec<_>>()
);
assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50));
assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50));
}
#[test]
@@ -410,12 +410,12 @@ mod test {
let blocks = generate_blocks(10);
// Request 2 ranges
assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45));
assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50));
assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45));
assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50));
// got a response on the request for `40..50`
bc.clear_peer_download(&peer);
bc.insert(40, blocks.to_vec(), peer.clone());
bc.insert(40, blocks.to_vec(), peer);
// request any blocks starting from 1000 or lower.
let ready = bc.ready_blocks(1000);
@@ -423,7 +423,7 @@ mod test {
ready,
blocks
.iter()
.map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) })
.map(|b| BlockData { block: b.clone(), origin: Some(peer) })
.collect::<Vec<_>>()
);
@@ -446,16 +446,12 @@ mod tests {
PeerSyncState::DownloadingJustification(r.0);
}
let active = requests
.active_requests
.iter()
.map(|(p, &r)| (p.clone(), r))
.collect::<Vec<_>>();
let active = requests.active_requests.iter().map(|(&p, &r)| (p, r)).collect::<Vec<_>>();
for (peer, req) in &active {
assert!(requests.failed_requests.get(req).is_none());
assert!(!requests.pending_requests.contains(req));
assert!(requests.on_response::<()>(peer.clone(), None).is_none());
assert!(requests.on_response::<()>(*peer, None).is_none());
assert!(requests.pending_requests.contains(req));
assert_eq!(
1,
+20 -20
View File
@@ -2614,15 +2614,15 @@ mod test {
let (b1_hash, b1_number) = new_blocks(50);
// add 2 peers at blocks that we don't have locally
sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap();
sync.new_peer(peer_id2.clone(), Hash::random(), 10).unwrap();
sync.new_peer(peer_id1, Hash::random(), 42).unwrap();
sync.new_peer(peer_id2, Hash::random(), 10).unwrap();
// we wil send block requests to these peers
// for these blocks we don't know about
assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 }));
// add a new peer at a known block
sync.new_peer(peer_id3.clone(), b1_hash, b1_number).unwrap();
sync.new_peer(peer_id3, b1_hash, b1_number).unwrap();
// we request a justification for a block we have locally
sync.request_justification(&b1_hash, b1_number);
@@ -2673,7 +2673,7 @@ mod test {
data: Some(Vec::new()),
};
sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true);
sync.push_block_announce_validation(*peer_id, header.hash(), block_annnounce, true);
// Poll until we have procssed the block announcement
block_on(poll_fn(|cx| loop {
@@ -2790,8 +2790,8 @@ mod test {
let block3_fork = build_block_at(block2.hash(), false);
// Add two peers which are on block 1.
sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap();
sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap();
sync.new_peer(peer_id1, block1.hash(), 1).unwrap();
sync.new_peer(peer_id2, block1.hash(), 1).unwrap();
// Tell sync that our best block is 3.
sync.update_chain_info(&block3.hash(), 3);
@@ -2885,9 +2885,9 @@ mod test {
let best_block = blocks.last().unwrap().clone();
// Connect the node we will sync from
sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number())
sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number())
.unwrap();
sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap();
sync.new_peer(peer_id2, info.best_hash, 0).unwrap();
let mut best_block_num = 0;
while best_block_num < MAX_DOWNLOAD_AHEAD {
@@ -2922,9 +2922,9 @@ mod test {
.map(|b| {
(
Ok(BlockImportStatus::ImportedUnknown(
b.header().number().clone(),
*b.header().number(),
Default::default(),
Some(peer_id1.clone()),
Some(peer_id1),
)),
b.hash(),
)
@@ -3034,7 +3034,7 @@ mod test {
let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone();
// Connect the node we will sync from
sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number())
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number())
.unwrap();
send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync);
@@ -3059,7 +3059,7 @@ mod test {
}
// Now request and import the fork.
let mut best_block_num = finalized_block.header().number().clone() as u32;
let mut best_block_num = *finalized_block.header().number() as u32;
while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 {
let request = get_block_request(
&mut sync,
@@ -3092,9 +3092,9 @@ mod test {
.map(|b| {
(
Ok(BlockImportStatus::ImportedUnknown(
b.header().number().clone(),
*b.header().number(),
Default::default(),
Some(peer_id1.clone()),
Some(peer_id1),
)),
b.hash(),
)
@@ -3165,7 +3165,7 @@ mod test {
let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone();
// Connect the node we will sync from
sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number())
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number())
.unwrap();
send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync);
@@ -3190,7 +3190,7 @@ mod test {
}
// Now request and import the fork.
let mut best_block_num = finalized_block.header().number().clone() as u32;
let mut best_block_num = *finalized_block.header().number() as u32;
let mut request = get_block_request(
&mut sync,
FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64),
@@ -3231,9 +3231,9 @@ mod test {
.map(|b| {
(
Ok(BlockImportStatus::ImportedUnknown(
b.header().number().clone(),
*b.header().number(),
Default::default(),
Some(peer_id1.clone()),
Some(peer_id1),
)),
b.hash(),
)
@@ -3288,7 +3288,7 @@ mod test {
let peer_id1 = PeerId::random();
let common_block = blocks[1].clone();
// Connect the node we will sync from
sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number())
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number())
.unwrap();
// Create a "new" header and announce it
@@ -3320,7 +3320,7 @@ mod test {
let peer_id1 = PeerId::random();
let best_block = blocks[3].clone();
sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number())
sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number())
.unwrap();
sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available;