Fast/warp sync fixes (#10562)

* Fast sync fixes

* Fix gap blocks validation

* Updated test

* Formatting

* Networking test
This commit is contained in:
Arkadiy Paronyan
2022-01-13 16:26:06 +01:00
committed by GitHub
parent 3a88215fca
commit 4d477ea9b4
7 changed files with 126 additions and 32 deletions
+5 -1
View File
@@ -347,7 +347,11 @@ impl Config {
{
trace!(target: "babe", "Getting slot duration");
let best_block_id = BlockId::Hash(client.usage_info().chain.best_hash);
let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash);
if client.usage_info().chain.finalized_state.is_none() {
debug!(target: "babe", "No finalized state is available. Reading config from genesis");
best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash);
}
let runtime_api = client.runtime_api();
let version = runtime_api.api_version::<dyn BabeApi<B>>(&best_block_id)?;
+39 -20
View File
@@ -477,7 +477,6 @@ impl<Block: BlockT> BlockchainDb<Block> {
let mut meta = self.meta.write();
if number.is_zero() {
meta.genesis_hash = hash;
meta.finalized_hash = hash;
}
if is_best {
@@ -1347,11 +1346,6 @@ impl<Block: BlockT> Backend<Block> {
}
if number.is_zero() {
transaction.set_from_vec(
columns::META,
meta_keys::FINALIZED_BLOCK,
lookup_key.clone(),
);
transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
if operation.commit_state {
@@ -1447,14 +1441,15 @@ impl<Block: BlockT> Backend<Block> {
let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
finalized
} else {
number.is_zero() || pending_block.leaf_state.is_final()
(number.is_zero() && last_finalized_num.is_zero()) ||
pending_block.leaf_state.is_final()
};
let header = &pending_block.header;
let is_best = pending_block.leaf_state.is_best();
debug!(target: "db",
"DB Commit {:?} ({}), best={}, state={}, existing={}",
hash, number, is_best, operation.commit_state, existing_header,
"DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}",
hash, number, is_best, operation.commit_state, existing_header, finalized,
);
self.state_usage.merge_sm(operation.old_state.usage_info());
@@ -2295,6 +2290,7 @@ pub(crate) mod tests {
extrinsics_root: H256,
) -> H256 {
insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None)
.unwrap()
}
pub fn insert_block(
@@ -2305,7 +2301,7 @@ pub(crate) mod tests {
extrinsics_root: H256,
body: Vec<ExtrinsicWrapper<u64>>,
transaction_index: Option<Vec<IndexOperation>>,
) -> H256 {
) -> Result<H256, sp_blockchain::Error> {
use sp_runtime::testing::Digest;
let digest = Digest::default();
@@ -2329,9 +2325,9 @@ pub(crate) mod tests {
if let Some(index) = transaction_index {
op.update_transaction_index(index).unwrap();
}
backend.commit_operation(op).unwrap();
backend.commit_operation(op)?;
header_hash
Ok(header_hash)
}
#[test]
@@ -3019,7 +3015,6 @@ pub(crate) mod tests {
{
let header = backend.blockchain().header(BlockId::Hash(hash1)).unwrap().unwrap();
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(hash0)).unwrap();
op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap();
backend.commit_operation(op).unwrap();
}
@@ -3063,7 +3058,8 @@ pub(crate) mod tests {
Default::default(),
vec![i.into()],
None,
);
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}
@@ -3100,7 +3096,8 @@ pub(crate) mod tests {
Default::default(),
vec![i.into()],
None,
);
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}
@@ -3114,7 +3111,8 @@ pub(crate) mod tests {
sp_core::H256::random(),
vec![2.into()],
None,
);
)
.unwrap();
insert_block(
&backend,
3,
@@ -3123,7 +3121,8 @@ pub(crate) mod tests {
H256::random(),
vec![3.into(), 11.into()],
None,
);
)
.unwrap();
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap();
op.mark_head(BlockId::Hash(blocks[4])).unwrap();
@@ -3172,7 +3171,8 @@ pub(crate) mod tests {
Default::default(),
vec![i.into()],
Some(index),
);
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}
@@ -3206,7 +3206,8 @@ pub(crate) mod tests {
Default::default(),
vec![i.into()],
None,
);
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}
@@ -3220,7 +3221,8 @@ pub(crate) mod tests {
sp_core::H256::random(),
vec![42.into()],
None,
);
)
.unwrap();
assert!(backend.remove_leaf_block(&best_hash).is_err());
assert!(backend.have_state_at(&prev_hash, 1));
backend.remove_leaf_block(&prev_hash).unwrap();
@@ -3290,4 +3292,21 @@ pub(crate) mod tests {
assert_eq!(backend.blockchain().info().finalized_hash, block1);
}
#[test]
fn test_import_existing_state_fails() {
let backend: Backend<Block> = Backend::new_test(10, 10);
let genesis =
insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
.unwrap();
insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap();
let err = insert_block(&backend, 1, genesis, None, Default::default(), vec![], None)
.err()
.unwrap();
match err {
sp_blockchain::Error::StateDatabase(m) if m == "Block already exists" => (),
e @ _ => panic!("Unexpected error {:?}", e),
}
}
}
+50 -6
View File
@@ -1095,15 +1095,17 @@ impl<B: BlockT> ChainSync<B> {
}
self.drain_blocks()
},
PeerSyncState::DownloadingGap(start_block) => {
let start_block = *start_block;
PeerSyncState::DownloadingGap(_) => {
peer.state = PeerSyncState::Available;
if let Some(gap_sync) = &mut self.gap_sync {
gap_sync.blocks.clear_peer_download(who);
validate_blocks::<B>(&blocks, who, Some(request))?;
gap_sync.blocks.insert(start_block, blocks, who.clone());
if let Some(start_block) =
validate_blocks::<B>(&blocks, who, Some(request))?
{
gap_sync.blocks.insert(start_block, blocks, who.clone());
}
gap = true;
gap_sync
let blocks: Vec<_> = gap_sync
.blocks
.drain(gap_sync.best_queued_number + One::one())
.into_iter()
@@ -1126,7 +1128,9 @@ impl<B: BlockT> ChainSync<B> {
state: None,
}
})
.collect()
.collect();
debug!(target: "sync", "Drained {} gap blocks from {}", blocks.len(), gap_sync.best_queued_number);
blocks
} else {
debug!(target: "sync", "Unexpected gap block response from {}", who);
return Err(BadPeer(who.clone(), rep::NO_BLOCK))
@@ -3169,4 +3173,44 @@ mod test {
sync.peer_disconnected(&peer_id1);
assert!(sync.fork_targets.len() == 0);
}
#[test]
fn can_import_response_with_missing_blocks() {
sp_tracing::try_init_simple();
let mut client2 = Arc::new(TestClientBuilder::new().build());
let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::<Vec<_>>();
let empty_client = Arc::new(TestClientBuilder::new().build());
let mut sync = ChainSync::new(
SyncMode::Full,
empty_client.clone(),
Box::new(DefaultBlockAnnounceValidator),
1,
None,
)
.unwrap();
let peer_id1 = PeerId::random();
let best_block = blocks[3].clone();
sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number())
.unwrap();
sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available;
sync.peers.get_mut(&peer_id1).unwrap().common_number = 0;
// Request all missing blocks and respond only with some.
let request =
get_block_request(&mut sync, FromBlock::Hash(best_block.hash()), 4, &peer_id1);
let response =
create_block_response(vec![blocks[3].clone(), blocks[2].clone(), blocks[1].clone()]);
sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap();
assert_eq!(sync.best_queued_number, 0);
// Request should only contain the missing block.
let request = get_block_request(&mut sync, FromBlock::Number(1), 1, &peer_id1);
let response = create_block_response(vec![blocks[0].clone()]);
sync.on_block_data(&peer_id1, Some(request), response).unwrap();
assert_eq!(sync.best_queued_number, 4);
}
}
@@ -1726,9 +1726,8 @@ where
.block_status(&BlockId::Hash(hash))
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
{
BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing =>
BlockStatus::InChainWithState | BlockStatus::Queued =>
return Ok(ImportResult::AlreadyInChain),
BlockStatus::InChainWithState | BlockStatus::Queued => {},
BlockStatus::InChainPruned if !import_existing =>
return Ok(ImportResult::AlreadyInChain),
BlockStatus::InChainPruned => {},
+3
View File
@@ -130,6 +130,8 @@ pub enum Error<E: fmt::Debug> {
InvalidPruningMode(String),
/// Too many unfinalized sibling blocks inserted.
TooManySiblingBlocks,
/// Trying to insert existing block.
BlockAlreadyExists,
}
/// Pinning error type.
@@ -154,6 +156,7 @@ impl<E: fmt::Debug> fmt::Debug for Error<E> {
Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"),
Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e),
Error::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"),
Error::BlockAlreadyExists => write!(f, "Block already exists"),
}
}
}
+20 -2
View File
@@ -210,9 +210,10 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
insert_values(&mut values, record.inserted);
trace!(
target: "state-db",
"Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)",
"Uncanonicalized journal entry {}.{} ({:?}) ({} inserted, {} deleted)",
block,
index,
record.hash,
overlay.inserted.len(),
overlay.deleted.len()
);
@@ -296,6 +297,9 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize {
return Err(Error::TooManySiblingBlocks)
}
if level.blocks.iter().any(|b| b.hash == *hash) {
return Err(Error::BlockAlreadyExists)
}
let index = level.available_index();
let journal_key = to_journal_key(number, index);
@@ -641,7 +645,7 @@ mod tests {
use super::{to_journal_key, NonCanonicalOverlay};
use crate::{
test::{make_changeset, make_db},
ChangeSet, CommitSet, MetaDb,
ChangeSet, CommitSet, Error, MetaDb,
};
use sp_core::H256;
use std::io;
@@ -710,6 +714,20 @@ mod tests {
.unwrap();
}
#[test]
fn insert_existing_fails() {
let db = make_db(&[]);
let h1 = H256::random();
let mut overlay = NonCanonicalOverlay::<H256, H256>::new(&db).unwrap();
overlay
.insert::<io::Error>(&h1, 2, &H256::default(), ChangeSet::default())
.unwrap();
assert!(matches!(
overlay.insert::<io::Error>(&h1, 2, &H256::default(), ChangeSet::default()),
Err(Error::BlockAlreadyExists)
));
}
#[test]
#[should_panic]
fn canonicalize_unknown_panics() {
@@ -20,7 +20,7 @@
use lru::LruCache;
use parking_lot::RwLock;
use sp_runtime::traits::{Block as BlockT, Header, NumberFor};
use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One};
/// Set to the expected max difference between `best` and `finalized` blocks at sync.
const LRU_CACHE_SIZE: usize = 5_000;
@@ -37,7 +37,14 @@ pub fn lowest_common_ancestor<Block: BlockT, T: HeaderMetadata<Block> + ?Sized>(
id_two: Block::Hash,
) -> Result<HashAndNumber<Block>, T::Error> {
let mut header_one = backend.header_metadata(id_one)?;
if header_one.parent == id_two {
return Ok(HashAndNumber { hash: id_two, number: header_one.number - One::one() })
}
let mut header_two = backend.header_metadata(id_two)?;
if header_two.parent == id_one {
return Ok(HashAndNumber { hash: id_one, number: header_one.number })
}
let mut orig_header_one = header_one.clone();
let mut orig_header_two = header_two.clone();