grandpa: allow noting that the set has stalled (#6725)

* grandpa: remove unused methods to convert digest

* grandpa: add root extrinsic for scheduling forced change

* grandpa: add benchmark for schedule_forced_change

* grandpa: don't take authority weight in schedule_forced_change

* grandpa: add const for default forced change delay

* grandpa: adjust weights after benchmark on ref hardware

* grandpa: fix cleanup of forced changes on standard change application

* grandpa: replace schedule_forced_change with note_stalled

* grandpa: always trigger a session change when the set is stalled

* grandpa: fix bug on set id mutation after failed scheduled change

* grandpa: take delay as parameter in note_stalled

* grandpa: fix tests

* grandpa: fix cleanup of forced changes

* grandpa: add test for forced changes cleanup

* grandpa: add test for session rotation set id

* grandpa: add test for scheduling of forced changes on new session
This commit is contained in:
André Silva
2020-07-24 22:02:12 +01:00
committed by GitHub
parent 15679360fd
commit 2ec131142b
5 changed files with 304 additions and 87 deletions
@@ -423,9 +423,21 @@ where
fork_tree::FinalizationResult::Changed(change) => {
status.changed = true;
// if we are able to finalize any standard change then we can
// discard all pending forced changes (on different forks)
self.pending_forced_changes.clear();
let pending_forced_changes = std::mem::replace(
&mut self.pending_forced_changes,
Vec::new(),
);
// we will keep all forced change for any later blocks and that are a
// descendent of the finalized block (i.e. they are from this fork).
for change in pending_forced_changes {
if change.effective_number() > finalized_number &&
is_descendent_of(&finalized_hash, &change.canon_hash)
.map_err(fork_tree::Error::Client)?
{
self.pending_forced_changes.push(change)
}
}
if let Some(change) = change {
afg_log!(initial_sync,
@@ -1163,4 +1175,124 @@ mod tests {
Err(Error::InvalidAuthoritySet)
));
}
#[test]
fn cleans_up_stale_forced_changes_when_applying_standard_change() {
let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]), 1)];
let mut authorities = AuthoritySet {
current_authorities: current_authorities.clone(),
set_id: 0,
pending_standard_changes: ForkTree::new(),
pending_forced_changes: Vec::new(),
};
let new_set = current_authorities.clone();
// Create the following pending changes tree:
//
// [#C3]
// /
// /- (#C2)
// /
// (#A) - (#B) - [#C1]
// \
// (#C0) - [#D]
//
// () - Standard change
// [] - Forced change
let is_descendent_of = {
let hashes = vec!["B", "C0", "C1", "C2", "C3", "D"];
is_descendent_of(move |base, hash| match (*base, *hash) {
("B", "B") => false, // required to have the simpler case below
("A", b) | ("B", b) => hashes.iter().any(|h| *h == b),
("C0", "D") => true,
_ => false,
})
};
let mut add_pending_change = |canon_height, canon_hash, forced| {
let change = PendingChange {
next_authorities: new_set.clone(),
delay: 0,
canon_height,
canon_hash,
delay_kind: if forced {
DelayKind::Best {
median_last_finalized: 0,
}
} else {
DelayKind::Finalized
},
};
authorities
.add_pending_change(change, &is_descendent_of)
.unwrap();
};
add_pending_change(5, "A", false);
add_pending_change(10, "B", false);
add_pending_change(15, "C0", false);
add_pending_change(15, "C1", true);
add_pending_change(15, "C2", false);
add_pending_change(15, "C3", true);
add_pending_change(20, "D", true);
println!(
"pending_changes: {:?}",
authorities
.pending_changes()
.map(|c| c.canon_hash)
.collect::<Vec<_>>()
);
// applying the standard change at A should not prune anything
// other then the change that was applied
authorities
.apply_standard_changes("A", 5, &is_descendent_of, false)
.unwrap();
println!(
"pending_changes: {:?}",
authorities
.pending_changes()
.map(|c| c.canon_hash)
.collect::<Vec<_>>()
);
assert_eq!(authorities.pending_changes().count(), 6);
// same for B
authorities
.apply_standard_changes("B", 10, &is_descendent_of, false)
.unwrap();
assert_eq!(authorities.pending_changes().count(), 5);
let authorities2 = authorities.clone();
// finalizing C2 should clear all forced changes
authorities
.apply_standard_changes("C2", 15, &is_descendent_of, false)
.unwrap();
assert_eq!(authorities.pending_forced_changes.len(), 0);
// finalizing C0 should clear all forced changes but D
let mut authorities = authorities2;
authorities
.apply_standard_changes("C0", 15, &is_descendent_of, false)
.unwrap();
assert_eq!(authorities.pending_forced_changes.len(), 1);
assert_eq!(
authorities
.pending_forced_changes
.first()
.unwrap()
.canon_hash,
"D"
);
}
}
+12 -1
View File
@@ -19,8 +19,9 @@
#![cfg_attr(not(feature = "std"), no_std)]
use super::*;
use super::{*, Module as Grandpa};
use frame_benchmarking::benchmarks;
use frame_system::RawOrigin;
use sp_core::H256;
benchmarks! {
@@ -62,6 +63,15 @@ benchmarks! {
} verify {
assert!(sp_finality_grandpa::check_equivocation_proof(equivocation_proof2));
}
note_stalled {
let delay = 1000.into();
let best_finalized_block_number = 1.into();
}: _(RawOrigin::Root, delay, best_finalized_block_number)
verify {
assert!(Grandpa::<T>::stalled().is_some());
}
}
#[cfg(test)]
@@ -74,6 +84,7 @@ mod tests {
fn test_benchmarks() {
new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
assert_ok!(test_benchmark_check_equivocation_proof::<Test>());
assert_ok!(test_benchmark_note_stalled::<Test>());
})
}
+46 -46
View File
@@ -43,9 +43,10 @@ use frame_support::{
decl_error, decl_event, decl_module, decl_storage, storage, traits::KeyOwnerProofSystem,
Parameter,
};
use frame_system::{ensure_none, ensure_signed, DigestOf};
use frame_system::{ensure_none, ensure_root, ensure_signed};
use pallet_finality_tracker::OnFinalizationStalled;
use sp_runtime::{
generic::{DigestItem, OpaqueDigestItemId},
generic::DigestItem,
traits::Zero,
DispatchResult, KeyTypeId,
};
@@ -205,7 +206,7 @@ decl_storage! {
State get(fn state): StoredState<T::BlockNumber> = StoredState::Live;
/// Pending change: (signaled at, scheduled change).
PendingChange: Option<StoredPendingChange<T::BlockNumber>>;
PendingChange get(fn pending_change): Option<StoredPendingChange<T::BlockNumber>>;
/// next block number where we can force a change.
NextForced get(fn next_forced): Option<T::BlockNumber>;
@@ -280,6 +281,24 @@ decl_module! {
)?;
}
/// Note that the current authority set of the GRANDPA finality gadget has
/// stalled. This will trigger a forced authority set change at the beginning
/// of the next session, to be enacted `delay` blocks after that. The delay
/// should be high enough to safely assume that the block signalling the
/// forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters
/// will start the new authority set using the given finalized block as base.
/// Only callable by root.
#[weight = weight_for::note_stalled::<T>()]
fn note_stalled(
origin,
delay: T::BlockNumber,
best_finalized_block_number: T::BlockNumber,
) {
ensure_root(origin)?;
Self::on_stalled(delay, best_finalized_block_number)
}
fn on_finalize(block_number: T::BlockNumber) {
// check for scheduled pending authority set changes
if let Some(pending_change) = <PendingChange<T>>::get() {
@@ -295,7 +314,7 @@ decl_module! {
))
} else {
Self::deposit_log(ConsensusLog::ScheduledChange(
ScheduledChange{
ScheduledChange {
delay: pending_change.delay,
next_authorities: pending_change.next_authorities.clone(),
}
@@ -377,6 +396,11 @@ mod weight_for {
// fetching set id -> session index mappings
.saturating_add(T::DbWeight::get().reads(2))
}
pub fn note_stalled<T: super::Trait>() -> Weight {
(3 * WEIGHT_PER_MICROS)
.saturating_add(T::DbWeight::get().writes(1))
}
}
impl<T: Trait> Module<T> {
@@ -580,42 +604,6 @@ impl<T: Trait> Module<T> {
}
}
impl<T: Trait> Module<T> {
/// Attempt to extract a GRANDPA log from a generic digest.
pub fn grandpa_log(digest: &DigestOf<T>) -> Option<ConsensusLog<T::BlockNumber>> {
let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
digest.convert_first(|l| l.try_to::<ConsensusLog<T::BlockNumber>>(id))
}
/// Attempt to extract a pending set-change signal from a digest.
pub fn pending_change(digest: &DigestOf<T>)
-> Option<ScheduledChange<T::BlockNumber>>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_change())
}
/// Attempt to extract a forced set-change signal from a digest.
pub fn forced_change(digest: &DigestOf<T>)
-> Option<(T::BlockNumber, ScheduledChange<T::BlockNumber>)>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change())
}
/// Attempt to extract a pause signal from a digest.
pub fn pending_pause(digest: &DigestOf<T>)
-> Option<T::BlockNumber>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause())
}
/// Attempt to extract a resume signal from a digest.
pub fn pending_resume(digest: &DigestOf<T>)
-> Option<T::BlockNumber>
{
Self::grandpa_log(digest).and_then(|signal| signal.try_into_resume())
}
}
impl<T: Trait> sp_runtime::BoundToRuntimeAppPublic for Module<T> {
type Public = AuthorityId;
}
@@ -638,14 +626,26 @@ impl<T: Trait> pallet_session::OneSessionHandler<T::AccountId> for Module<T>
// Always issue a change if `session` says that the validators have changed.
// Even if their session keys are the same as before, the underlying economic
// identities have changed.
let current_set_id = if changed {
let current_set_id = if changed || <Stalled<T>>::exists() {
let next_authorities = validators.map(|(_, k)| (k, 1)).collect::<Vec<_>>();
if let Some((further_wait, median)) = <Stalled<T>>::take() {
let _ = Self::schedule_change(next_authorities, further_wait, Some(median));
let res = if let Some((further_wait, median)) = <Stalled<T>>::take() {
Self::schedule_change(next_authorities, further_wait, Some(median))
} else {
let _ = Self::schedule_change(next_authorities, Zero::zero(), None);
Self::schedule_change(next_authorities, Zero::zero(), None)
};
if res.is_ok() {
CurrentSetId::mutate(|s| {
*s += 1;
*s
})
} else {
// either the session module signalled that the validators have changed
// or the set was stalled. but since we didn't successfully schedule
// an authority set change we do not increment the set id.
Self::current_set_id()
}
CurrentSetId::mutate(|s| { *s += 1; *s })
} else {
// nothing's changed, neither economic conditions nor session keys. update the pointer
// of the current set.
@@ -663,7 +663,7 @@ impl<T: Trait> pallet_session::OneSessionHandler<T::AccountId> for Module<T>
}
}
impl<T: Trait> pallet_finality_tracker::OnFinalizationStalled<T::BlockNumber> for Module<T> {
impl<T: Trait> OnFinalizationStalled<T::BlockNumber> for Module<T> {
fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) {
// when we record old authority sets, we can use `pallet_finality_tracker::median`
// to figure out _who_ failed. until then, we can't meaningfully guard
+13 -14
View File
@@ -365,23 +365,18 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx
}
pub fn start_session(session_index: SessionIndex) {
let mut parent_hash = System::parent_hash();
for i in Session::current_index()..session_index {
System::on_finalize(System::block_number());
Session::on_finalize(System::block_number());
Staking::on_finalize(System::block_number());
System::set_block_number((i + 1).into());
Timestamp::set_timestamp(System::block_number() * 6000);
Grandpa::on_finalize(System::block_number());
// In order to be able to use `System::parent_hash()` in the tests
// we need to first get it via `System::finalize` and then set it
// the `System::initialize`. However, it is needed to be taken into
// consideration that finalizing will prune some data in `System`
// storage including old values `BlockHash` if that reaches above
// `BlockHashCount` capacity.
if System::block_number() > 1 {
let parent_hash = if System::block_number() > 1 {
let hdr = System::finalize();
parent_hash = hdr.hash();
}
hdr.hash()
} else {
System::parent_hash()
};
System::initialize(
&(i as u64 + 1),
@@ -390,9 +385,13 @@ pub fn start_session(session_index: SessionIndex) {
&Default::default(),
Default::default(),
);
System::set_block_number((i + 1).into());
Timestamp::set_timestamp(System::block_number() * 6000);
Session::on_initialize(System::block_number());
System::on_initialize(System::block_number());
Session::on_initialize(System::block_number());
Staking::on_initialize(System::block_number());
Grandpa::on_initialize(System::block_number());
}
assert_eq!(Session::current_index(), session_index);
+98 -23
View File
@@ -28,6 +28,7 @@ use frame_support::{
traits::{Currency, OnFinalize},
};
use frame_system::{EventRecord, Phase};
use pallet_session::OneSessionHandler;
use sp_core::H256;
use sp_keyring::Ed25519Keyring;
use sp_runtime::testing::Digest;
@@ -342,14 +343,15 @@ fn report_equivocation_current_set_works() {
start_era(1);
let authorities = Grandpa::grandpa_authorities();
let validators = Session::validators();
// make sure that all authorities have the same balance
for i in 0..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
// make sure that all validators have the same balance
for validator in &validators {
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);
assert_eq!(
Staking::eras_stakers(1, i as u64),
Staking::eras_stakers(1, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
@@ -388,11 +390,12 @@ fn report_equivocation_current_set_works() {
start_era(2);
// check that the balance of 0-th validator is slashed 100%.
assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&0), 0);
let equivocation_validator_id = validators[equivocation_authority_index];
assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0);
assert_eq!(
Staking::eras_stakers(2, 0),
Staking::eras_stakers(2, equivocation_validator_id),
pallet_staking::Exposure {
total: 0,
own: 0,
@@ -401,12 +404,16 @@ fn report_equivocation_current_set_works() {
);
// check that the balances of all other validators are left intact.
for i in 1..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
if *validator == equivocation_validator_id {
continue;
}
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);
assert_eq!(
Staking::eras_stakers(2, i as u64),
Staking::eras_stakers(2, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
@@ -425,6 +432,7 @@ fn report_equivocation_old_set_works() {
start_era(1);
let authorities = Grandpa::grandpa_authorities();
let validators = Session::validators();
let equivocation_authority_index = 0;
let equivocation_key = &authorities[equivocation_authority_index].0;
@@ -436,12 +444,12 @@ fn report_equivocation_old_set_works() {
start_era(2);
// make sure that all authorities have the same balance
for i in 0..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);
assert_eq!(
Staking::eras_stakers(2, i as u64),
Staking::eras_stakers(2, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
@@ -474,11 +482,13 @@ fn report_equivocation_old_set_works() {
start_era(3);
// check that the balance of 0-th validator is slashed 100%.
assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&0), 0);
let equivocation_validator_id = validators[equivocation_authority_index];
assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000);
assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0);
assert_eq!(
Staking::eras_stakers(3, 0),
Staking::eras_stakers(3, equivocation_validator_id),
pallet_staking::Exposure {
total: 0,
own: 0,
@@ -487,12 +497,16 @@ fn report_equivocation_old_set_works() {
);
// check that the balances of all other validators are left intact.
for i in 1..authorities.len() {
assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000);
assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000);
for validator in &validators {
if *validator == equivocation_validator_id {
continue;
}
assert_eq!(Balances::total_balance(validator), 10_000_000);
assert_eq!(Staking::slashable_balance_of(validator), 10_000);
assert_eq!(
Staking::eras_stakers(3, i as u64),
Staking::eras_stakers(3, validator),
pallet_staking::Exposure {
total: 10_000,
own: 10_000,
@@ -767,3 +781,64 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() {
);
});
}
#[test]
fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() {
new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
assert_eq!(Grandpa::current_set_id(), 0);
// starting a new era should lead to a change in the session
// validators and trigger a new set
start_era(1);
assert_eq!(Grandpa::current_set_id(), 1);
// we schedule a change delayed by 2 blocks, this should make it so that
// when we try to rotate the session at the beginning of the era we will
// fail to schedule a change (there's already one pending), so we should
// not increment the set id.
Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap();
start_era(2);
assert_eq!(Grandpa::current_set_id(), 1);
// everything should go back to normal after.
start_era(3);
assert_eq!(Grandpa::current_set_id(), 2);
// session rotation might also fail to schedule a change if it's for a
// forced change (i.e. grandpa is stalled) and it is too soon.
<NextForced<Test>>::put(1000);
<Stalled<Test>>::put((30, 1));
// NOTE: we cannot go through normal era rotation since having `Stalled`
// defined will also trigger a new set (regardless of whether the
// session validators changed)
Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty());
assert_eq!(Grandpa::current_set_id(), 2);
});
}
#[test]
fn always_schedules_a_change_on_new_session_when_stalled() {
new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
start_era(1);
assert!(Grandpa::pending_change().is_none());
assert_eq!(Grandpa::current_set_id(), 1);
// if the session handler reports no change then we should not schedule
// any pending change
Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
assert!(Grandpa::pending_change().is_none());
assert_eq!(Grandpa::current_set_id(), 1);
// if grandpa is stalled then we should **always** schedule a forced
// change on a new session
<Stalled<Test>>::put((10, 1));
Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
assert!(Grandpa::pending_change().is_some());
assert!(Grandpa::pending_change().unwrap().forced.is_some());
assert_eq!(Grandpa::current_set_id(), 2);
});
}