mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-27 05:47:58 +00:00
Clean obsolete BABE's weight data (#10748)
* Clean obsolete BABE weight data * Take out test assertion from check closure * Optimize metadata access using `HeaderMetadata` trait * Apply suggestions from code review * Introduce finalize and import pre-commit synchronous actions * Do not hold locks between internal methods calls * Remove unused generic bound * Apply suggestions from code review * Register BABE's pre-commit actions on `block_import` instead of `start_babe` * PreCommit actions should be `Fn` instead of `FnMut` * More robust safenet in case of malformed finality notifications Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com>
This commit is contained in:
@@ -28,7 +28,7 @@ use log::debug;
|
||||
use rand::RngCore;
|
||||
use rand_chacha::{rand_core::SeedableRng, ChaChaRng};
|
||||
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
|
||||
use sc_client_api::{backend::TransactionFor, BlockchainEvents};
|
||||
use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer};
|
||||
use sc_consensus::{BoxBlockImport, BoxJustificationImport};
|
||||
use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
|
||||
use sc_keystore::LocalKeystore;
|
||||
@@ -608,8 +608,8 @@ fn propose_and_import_block<Transaction: Send + 'static>(
|
||||
slot: Option<Slot>,
|
||||
proposer_factory: &mut DummyFactory,
|
||||
block_import: &mut BoxBlockImport<TestBlock, Transaction>,
|
||||
) -> sp_core::H256 {
|
||||
let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap();
|
||||
) -> Hash {
|
||||
let mut proposer = block_on(proposer_factory.init(parent)).unwrap();
|
||||
|
||||
let slot = slot.unwrap_or_else(|| {
|
||||
let parent_pre_digest = find_pre_digest::<TestBlock>(parent).unwrap();
|
||||
@@ -625,7 +625,7 @@ fn propose_and_import_block<Transaction: Send + 'static>(
|
||||
|
||||
let parent_hash = parent.hash();
|
||||
|
||||
let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block;
|
||||
let mut block = block_on(proposer.propose_with(pre_digest)).unwrap().block;
|
||||
|
||||
let epoch_descriptor = proposer_factory
|
||||
.epoch_changes
|
||||
@@ -673,6 +673,29 @@ fn propose_and_import_block<Transaction: Send + 'static>(
|
||||
post_hash
|
||||
}
|
||||
|
||||
// Propose and import n valid BABE blocks that are built on top of the given parent.
|
||||
// The proposer takes care of producing epoch change digests according to the epoch
|
||||
// duration (which is set to 6 slots in the test runtime).
|
||||
fn propose_and_import_blocks<Transaction: Send + 'static>(
|
||||
client: &PeersFullClient,
|
||||
proposer_factory: &mut DummyFactory,
|
||||
block_import: &mut BoxBlockImport<TestBlock, Transaction>,
|
||||
parent_id: BlockId<TestBlock>,
|
||||
n: usize,
|
||||
) -> Vec<Hash> {
|
||||
let mut hashes = Vec::with_capacity(n);
|
||||
let mut parent_header = client.header(&parent_id).unwrap().unwrap();
|
||||
|
||||
for _ in 0..n {
|
||||
let block_hash =
|
||||
propose_and_import_block(&parent_header, None, proposer_factory, block_import);
|
||||
hashes.push(block_hash);
|
||||
parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap();
|
||||
}
|
||||
|
||||
hashes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn importing_block_one_sets_genesis_epoch() {
|
||||
let mut net = BabeTestNet::new(1);
|
||||
@@ -714,8 +737,6 @@ fn importing_block_one_sets_genesis_epoch() {
|
||||
|
||||
#[test]
|
||||
fn importing_epoch_change_block_prunes_tree() {
|
||||
use sc_client_api::Finalizer;
|
||||
|
||||
let mut net = BabeTestNet::new(1);
|
||||
|
||||
let peer = net.peer(0);
|
||||
@@ -732,26 +753,8 @@ fn importing_epoch_change_block_prunes_tree() {
|
||||
mutator: Arc::new(|_, _| ()),
|
||||
};
|
||||
|
||||
// This is just boilerplate code for proposing and importing n valid BABE
|
||||
// blocks that are built on top of the given parent. The proposer takes care
|
||||
// of producing epoch change digests according to the epoch duration (which
|
||||
// is set to 6 slots in the test runtime).
|
||||
let mut propose_and_import_blocks = |parent_id, n| {
|
||||
let mut hashes = Vec::new();
|
||||
let mut parent_header = client.header(&parent_id).unwrap().unwrap();
|
||||
|
||||
for _ in 0..n {
|
||||
let block_hash = propose_and_import_block(
|
||||
&parent_header,
|
||||
None,
|
||||
&mut proposer_factory,
|
||||
&mut block_import,
|
||||
);
|
||||
hashes.push(block_hash);
|
||||
parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap();
|
||||
}
|
||||
|
||||
hashes
|
||||
let mut propose_and_import_blocks_wrap = |parent_id, n| {
|
||||
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
|
||||
};
|
||||
|
||||
// This is the block tree that we're going to use in this test. Each node
|
||||
@@ -766,12 +769,12 @@ fn importing_epoch_change_block_prunes_tree() {
|
||||
|
||||
// Create and import the canon chain and keep track of fork blocks (A, C, D)
|
||||
// from the diagram above.
|
||||
let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30);
|
||||
let canon_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 30);
|
||||
|
||||
// Create the forks
|
||||
let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10);
|
||||
let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15);
|
||||
let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10);
|
||||
let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[0]), 10);
|
||||
let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[12]), 15);
|
||||
let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[18]), 10);
|
||||
|
||||
// We should be tracking a total of 9 epochs in the fork tree
|
||||
assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9);
|
||||
@@ -782,7 +785,7 @@ fn importing_epoch_change_block_prunes_tree() {
|
||||
// We finalize block #13 from the canon chain, so on the next epoch
|
||||
// change the tree should be pruned, to not contain F (#7).
|
||||
client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap();
|
||||
propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7);
|
||||
propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7);
|
||||
|
||||
// at this point no hashes from the first fork must exist on the tree
|
||||
assert!(!epoch_changes
|
||||
@@ -809,7 +812,7 @@ fn importing_epoch_change_block_prunes_tree() {
|
||||
|
||||
// finalizing block #25 from the canon chain should prune out the second fork
|
||||
client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap();
|
||||
propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8);
|
||||
propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8);
|
||||
|
||||
// at this point no hashes from the second fork must exist on the tree
|
||||
assert!(!epoch_changes
|
||||
@@ -894,3 +897,68 @@ fn babe_transcript_generation_match() {
|
||||
};
|
||||
debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn obsolete_blocks_aux_data_cleanup() {
|
||||
let mut net = BabeTestNet::new(1);
|
||||
|
||||
let peer = net.peer(0);
|
||||
let data = peer.data.as_ref().expect("babe link set up during initialization");
|
||||
let client = peer.client().as_client();
|
||||
|
||||
// Register the handler (as done by `babe_start`)
|
||||
let client_clone = client.clone();
|
||||
let on_finality = move |summary: &FinalityNotification<TestBlock>| {
|
||||
aux_storage_cleanup(client_clone.as_ref(), summary)
|
||||
};
|
||||
client.register_finality_action(Box::new(on_finality));
|
||||
|
||||
let mut proposer_factory = DummyFactory {
|
||||
client: client.clone(),
|
||||
config: data.link.config.clone(),
|
||||
epoch_changes: data.link.epoch_changes.clone(),
|
||||
mutator: Arc::new(|_, _| ()),
|
||||
};
|
||||
|
||||
let mut block_import = data.block_import.lock().take().expect("import set up during init");
|
||||
|
||||
let mut propose_and_import_blocks_wrap = |parent_id, n| {
|
||||
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
|
||||
};
|
||||
|
||||
let aux_data_check = |hashes: &[Hash], expected: bool| {
|
||||
hashes.iter().all(|hash| {
|
||||
aux_schema::load_block_weight(&*peer.client().as_backend(), hash)
|
||||
.unwrap()
|
||||
.is_some() == expected
|
||||
})
|
||||
};
|
||||
|
||||
// Create the following test scenario:
|
||||
//
|
||||
// /-----B3 --- B4 ( < fork2 )
|
||||
// G --- A1 --- A2 --- A3 --- A4 ( < fork1 )
|
||||
// \-----C4 --- C5 ( < fork3 )
|
||||
|
||||
let fork1_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 4);
|
||||
let fork2_hashes = propose_and_import_blocks_wrap(BlockId::Number(2), 2);
|
||||
let fork3_hashes = propose_and_import_blocks_wrap(BlockId::Number(3), 2);
|
||||
|
||||
// Check that aux data is present for all but the genesis block.
|
||||
assert!(aux_data_check(&[client.chain_info().genesis_hash], false));
|
||||
assert!(aux_data_check(&fork1_hashes, true));
|
||||
assert!(aux_data_check(&fork2_hashes, true));
|
||||
assert!(aux_data_check(&fork3_hashes, true));
|
||||
|
||||
// Finalize A3
|
||||
client.finalize_block(BlockId::Number(3), None, true).unwrap();
|
||||
|
||||
// Wiped: A1, A2
|
||||
assert!(aux_data_check(&fork1_hashes[..2], false));
|
||||
// Present: A3, A4
|
||||
assert!(aux_data_check(&fork1_hashes[2..], true));
|
||||
// Wiped: B3, B4
|
||||
assert!(aux_data_check(&fork2_hashes, false));
|
||||
// Present C4, C5
|
||||
assert!(aux_data_check(&fork3_hashes, true));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user