Limit number of blocks per level (2nd attempt) (#1559)

Prevents the StateDbError::TooManySiblingBlocks error from being triggered by eagerly removing 
stale blocks from the backend on block import and before the error condition is met.

Introduces a just in time block recovery mechanism for blocks that were wrongly removed
via an explicit pov-recovery method

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
Davide Galassi
2022-12-20 12:13:49 +01:00
committed by GitHub
parent f621351332
commit 73837c384e
17 changed files with 1096 additions and 184 deletions
+353 -16
View File
@@ -18,6 +18,7 @@ use crate::*;
use async_trait::async_trait;
use codec::Encode;
use cumulus_client_pov_recovery::RecoveryKind;
use cumulus_relay_chain_interface::RelayChainResult;
use cumulus_test_client::{
runtime::{Block, Header},
@@ -26,10 +27,10 @@ use cumulus_test_client::{
use futures::{channel::mpsc, executor::block_on, select, FutureExt, Stream, StreamExt};
use futures_timer::Delay;
use polkadot_primitives::v2::Id as ParaId;
use sc_client_api::UsageProvider;
use sc_client_api::{blockchain::Backend as _, Backend as _, UsageProvider};
use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy};
use sp_blockchain::Error as ClientError;
use sp_consensus::BlockOrigin;
use sp_consensus::{BlockOrigin, BlockStatus};
use sp_runtime::generic::BlockId;
use std::{
sync::{Arc, Mutex},
@@ -103,21 +104,82 @@ impl crate::parachain_consensus::RelaychainClient for Relaychain {
}
}
fn build_and_import_block(mut client: Arc<Client>, import_as_best: bool) -> Block {
let builder = client.init_block_builder(None, Default::default());
fn build_block<B: InitBlockBuilder>(
builder: &B,
at: Option<BlockId<Block>>,
timestamp: Option<u64>,
) -> Block {
let builder = match at {
Some(at) => match timestamp {
Some(ts) =>
builder.init_block_builder_with_timestamp(&at, None, Default::default(), ts),
None => builder.init_block_builder_at(&at, None, Default::default()),
},
None => builder.init_block_builder(None, Default::default()),
};
let block = builder.build().unwrap().block;
let (header, body) = block.clone().deconstruct();
let mut block = builder.build().unwrap().block;
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header);
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(import_as_best));
block_import_params.body = Some(body);
block_on(client.import_block(block_import_params, Default::default())).unwrap();
// Simulate some form of post activity (like a Seal or Other generic things).
// This is mostly used to excercise the `LevelMonitor` correct behavior.
// (in practice we want that header post-hash != pre-hash)
block.header.digest.push(sp_runtime::DigestItem::Other(vec![1, 2, 3]));
block
}
async fn import_block<I: BlockImport<Block>>(
importer: &mut I,
block: Block,
origin: BlockOrigin,
import_as_best: bool,
) {
let (mut header, body) = block.deconstruct();
let post_digest =
header.digest.pop().expect("post digested is present in manually crafted block");
let mut block_import_params = BlockImportParams::new(origin, header);
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(import_as_best));
block_import_params.body = Some(body);
block_import_params.post_digests.push(post_digest);
importer.import_block(block_import_params, Default::default()).await.unwrap();
}
fn import_block_sync<I: BlockImport<Block>>(
importer: &mut I,
block: Block,
origin: BlockOrigin,
import_as_best: bool,
) {
block_on(import_block(importer, block, origin, import_as_best));
}
fn build_and_import_block_ext<B: InitBlockBuilder, I: BlockImport<Block>>(
builder: &B,
origin: BlockOrigin,
import_as_best: bool,
importer: &mut I,
at: Option<BlockId<Block>>,
timestamp: Option<u64>,
) -> Block {
let block = build_block(builder, at, timestamp);
import_block_sync(importer, block.clone(), origin, import_as_best);
block
}
fn build_and_import_block(mut client: Arc<Client>, import_as_best: bool) -> Block {
build_and_import_block_ext(
&*client.clone(),
BlockOrigin::Own,
import_as_best,
&mut client,
None,
None,
)
}
#[test]
fn follow_new_best_works() {
sp_tracing::try_init_simple();
@@ -129,7 +191,7 @@ fn follow_new_best_works() {
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None);
let work = async move {
new_best_heads_sender.unbounded_send(block.header().clone()).unwrap();
@@ -152,6 +214,68 @@ fn follow_new_best_works() {
});
}
#[test]
fn follow_new_best_with_dummy_recovery_works() {
sp_tracing::try_init_simple();
let client = Arc::new(TestClientBuilder::default().build());
let relay_chain = Relaychain::new();
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let (recovery_chan_tx, mut recovery_chan_rx) = futures::channel::mpsc::channel(3);
let consensus = run_parachain_consensus(
100.into(),
client.clone(),
relay_chain,
Arc::new(|_, _| {}),
Some(recovery_chan_tx),
);
let block = build_block(&*client.clone(), None, None);
let block_clone = block.clone();
let client_clone = client.clone();
let work = async move {
new_best_heads_sender.unbounded_send(block.header().clone()).unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
match client.block_status(&BlockId::Hash(block.hash())).unwrap() {
BlockStatus::Unknown => {},
status => {
assert_eq!(block.hash(), client.usage_info().chain.best_hash);
assert_eq!(status, BlockStatus::InChainWithState);
break
},
}
}
};
let dummy_block_recovery = async move {
loop {
if let Some(req) = recovery_chan_rx.next().await {
assert_eq!(req.hash, block_clone.hash());
assert_eq!(req.kind, RecoveryKind::Full);
Delay::new(Duration::from_millis(500)).await;
import_block(&mut &*client_clone, block_clone.clone(), BlockOrigin::Own, true)
.await;
}
}
};
block_on(async move {
futures::pin_mut!(consensus);
futures::pin_mut!(work);
select! {
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
_ = dummy_block_recovery.fuse() => {},
_ = work.fuse() => {},
}
});
}
#[test]
fn follow_finalized_works() {
sp_tracing::try_init_simple();
@@ -163,7 +287,7 @@ fn follow_finalized_works() {
let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None);
let work = async move {
finalized_sender.unbounded_send(block.header().clone()).unwrap();
@@ -204,7 +328,7 @@ fn follow_finalized_does_not_stop_on_unknown_block() {
let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None);
let work = async move {
for _ in 0..3usize {
@@ -254,7 +378,7 @@ fn follow_new_best_sets_best_after_it_is_imported() {
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None);
let work = async move {
new_best_heads_sender.unbounded_send(block.header().clone()).unwrap();
@@ -331,7 +455,7 @@ fn do_not_set_best_block_to_older_block() {
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None);
let client2 = client.clone();
let work = async move {
@@ -355,3 +479,216 @@ fn do_not_set_best_block_to_older_block() {
// Build and import a new best block.
build_and_import_block(client2.clone(), true);
}
#[test]
fn prune_blocks_on_level_overflow() {
// Here we are using the timestamp value to generate blocks with different hashes.
const LEVEL_LIMIT: usize = 3;
const TIMESTAMP_MULTIPLIER: u64 = 60000;
let backend = Arc::new(Backend::new_test(1000, 3));
let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build());
let mut para_import = ParachainBlockImport::new_with_limit(
client.clone(),
backend.clone(),
LevelLimit::Some(LEVEL_LIMIT),
);
let block0 = build_and_import_block_ext(
&*client,
BlockOrigin::NetworkInitialSync,
true,
&mut para_import,
None,
None,
);
let id0 = BlockId::Hash(block0.header.hash());
let blocks1 = (0..LEVEL_LIMIT)
.into_iter()
.map(|i| {
build_and_import_block_ext(
&*client,
if i == 1 { BlockOrigin::NetworkInitialSync } else { BlockOrigin::Own },
i == 1,
&mut para_import,
Some(id0),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
)
})
.collect::<Vec<_>>();
let id10 = BlockId::Hash(blocks1[0].header.hash());
let blocks2 = (0..2)
.into_iter()
.map(|i| {
build_and_import_block_ext(
&*client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id10),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
)
})
.collect::<Vec<_>>();
// Initial scenario (with B11 imported as best)
//
// B0 --+-- B10 --+-- B20
// +-- B11 +-- B21
// +-- B12
let leaves = backend.blockchain().leaves().unwrap();
let mut expected = vec![
blocks2[0].header.hash(),
blocks2[1].header.hash(),
blocks1[1].header.hash(),
blocks1[2].header.hash(),
];
assert_eq!(leaves, expected);
let best = client.usage_info().chain.best_hash;
assert_eq!(best, blocks1[1].header.hash());
let block13 = build_and_import_block_ext(
&*client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id0),
Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
);
// Expected scenario
//
// B0 --+-- B10 --+-- B20
// +-- B11 +-- B21
// +--(B13) <-- B12 has been replaced
let leaves = backend.blockchain().leaves().unwrap();
expected[3] = block13.header.hash();
assert_eq!(leaves, expected);
let block14 = build_and_import_block_ext(
&*client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id0),
Some(2 * LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
);
// Expected scenario
//
// B0 --+--(B14) <-- B10 has been replaced
// +-- B11
// +--(B13)
let leaves = backend.blockchain().leaves().unwrap();
expected.remove(0);
expected.remove(0);
expected.push(block14.header.hash());
assert_eq!(leaves, expected);
}
#[test]
fn restore_limit_monitor() {
// Here we are using the timestamp value to generate blocks with different hashes.
const LEVEL_LIMIT: usize = 2;
const TIMESTAMP_MULTIPLIER: u64 = 60000;
let backend = Arc::new(Backend::new_test(1000, 3));
let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build());
// Start with a block import not enforcing any limit...
let mut para_import = ParachainBlockImport::new_with_limit(
client.clone(),
backend.clone(),
LevelLimit::Some(usize::MAX),
);
let block00 = build_and_import_block_ext(
&*client,
BlockOrigin::NetworkInitialSync,
true,
&mut para_import,
None,
None,
);
let id00 = BlockId::Hash(block00.header.hash());
let blocks1 = (0..LEVEL_LIMIT + 1)
.into_iter()
.map(|i| {
build_and_import_block_ext(
&*client,
if i == 1 { BlockOrigin::NetworkInitialSync } else { BlockOrigin::Own },
i == 1,
&mut para_import,
Some(id00),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
)
})
.collect::<Vec<_>>();
let id10 = BlockId::Hash(blocks1[0].header.hash());
let _ = (0..LEVEL_LIMIT)
.into_iter()
.map(|i| {
build_and_import_block_ext(
&*client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id10),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
)
})
.collect::<Vec<_>>();
// Scenario before limit application (with B11 imported as best)
// Import order (freshess): B00, B10, B11, B12, B20, B21
//
// B00 --+-- B10 --+-- B20
// | +-- B21
// +-- B11
// |
// +-- B12
// Simulate a restart by forcing a new monitor structure instance
let mut para_import = ParachainBlockImport::new_with_limit(
client.clone(),
backend.clone(),
LevelLimit::Some(LEVEL_LIMIT),
);
let block13 = build_and_import_block_ext(
&*client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id00),
Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
);
// Expected scenario
//
// B0 --+-- B11
// +--(B13)
let leaves = backend.blockchain().leaves().unwrap();
let expected = vec![blocks1[1].header.hash(), block13.header.hash()];
assert_eq!(leaves, expected);
let monitor = para_import.monitor.unwrap();
let monitor = monitor.shared_data();
assert_eq!(monitor.import_counter, 5);
assert!(monitor.levels.iter().all(|(number, hashes)| {
hashes
.iter()
.filter(|hash| **hash != block13.header.hash())
.all(|hash| *number == *monitor.freshness.get(hash).unwrap())
}));
assert_eq!(*monitor.freshness.get(&block13.header.hash()).unwrap(), monitor.import_counter - 1);
}