BABE's revert procedure (#11022)

* First rough draft for BABE revert

* Proper babe revert test

* Cleanup

* Test trivial cleanup

* Fix to make clippy happy

* Check polkadot companion

* Check cumulus companion

* Remove babe's blocks weight on revert

* Handle "empty" blockchain edge case

* Run companions

* Simplify the filter predicate

* Saturating sub is not required

* Run pipeline

* Run pipeline again...
This commit is contained in:
Davide Galassi
2022-03-24 09:51:55 +01:00
committed by GitHub
parent 208be86934
commit c534e00ffc
7 changed files with 314 additions and 22 deletions
@@ -735,6 +735,88 @@ fn importing_block_one_sets_genesis_epoch() {
assert_eq!(epoch_for_second_block, genesis_epoch);
}
#[test]
fn revert_prunes_epoch_changes_and_removes_weights() {
let mut net = BabeTestNet::new(1);
let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");
let client = peer.client().as_client();
let backend = peer.client().as_backend();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let epoch_changes = data.link.epoch_changes.clone();
let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};
let mut propose_and_import_blocks_wrap = |parent_id, n| {
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
};
// Test scenario.
// Information for epoch 19 is produced on three different forks at block #13.
// One branch starts before the revert point (epoch data should be maintained).
// One branch starts after the revert point (epoch data should be removed).
//
// *----------------- F(#13) --#18 < fork #2
// /
// A(#1) ---- B(#7) ----#8----+-----#12----- C(#13) ---- D(#19) ------#21 < canon
// \ ^ \
// \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3
// \ to #10
// *-----E(#7)---#11 < fork #1
let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 21);
let fork1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10);
let fork2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[7]), 10);
let fork3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[11]), 8);
// We should be tracking a total of 9 epochs in the fork tree
assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8);
// And only one root
assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1);
// Revert canon chain to block #10 (best(21) - 11)
revert(client.clone(), backend, 11).expect("revert should work for baked test scenario");
// Load and check epoch changes.
let actual_nodes = aux_schema::load_epoch_changes::<Block, TestClient>(
&*client,
data.link.config.genesis_config(),
)
.expect("load epoch changes")
.shared_data()
.tree()
.iter()
.map(|(h, _, _)| *h)
.collect::<Vec<_>>();
let expected_nodes = vec![
canon[0], // A
canon[6], // B
fork2[4], // F
fork1[5], // E
];
assert_eq!(actual_nodes, expected_nodes);
let weight_data_check = |hashes: &[Hash], expected: bool| {
hashes.iter().all(|hash| {
aux_schema::load_block_weight(&*client, hash).unwrap().is_some() == expected
})
};
assert!(weight_data_check(&canon[..10], true));
assert!(weight_data_check(&canon[10..], false));
assert!(weight_data_check(&fork1, true));
assert!(weight_data_check(&fork2, true));
assert!(weight_data_check(&fork3, false));
}
#[test]
fn importing_epoch_change_block_prunes_tree() {
let mut net = BabeTestNet::new(1);