polkadot: remove grandpa pause support (#2511)

This was never used and we probably don't need it anyway.
This commit is contained in:
André Silva
2023-11-28 14:00:18 +00:00
committed by GitHub
parent c5f211d0de
commit b0b4451f31
8 changed files with 3 additions and 222 deletions
@@ -283,7 +283,6 @@ fn build_polkadot_full_node(
config,
polkadot_service::NewFullParams {
is_parachain_node,
grandpa_pause: None,
// Disable BEEFY. It should not be required by the internal relay chain node.
enable_beefy: false,
force_authoring_backoff: false,
-10
View File
@@ -82,16 +82,6 @@ pub struct RunCmd {
#[arg(long = "force-rococo")]
pub force_rococo: bool,
/// Setup a GRANDPA scheduled voting pause.
///
/// This parameter takes two values, namely a block number and a delay (in blocks).
///
/// After the given block number is finalized the GRANDPA voter will temporarily
/// stop voting for new blocks until the given delay has elapsed (i.e. until a
/// block at height `pause_block + delay` is imported).
#[arg(long = "grandpa-pause", num_args = 2)]
pub grandpa_pause: Vec<u32>,
/// Disable the BEEFY gadget.
///
/// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'.
-7
View File
@@ -215,12 +215,6 @@ where
set_default_ss58_version(chain_spec);
let grandpa_pause = if cli.run.grandpa_pause.is_empty() {
None
} else {
Some((cli.run.grandpa_pause[0], cli.run.grandpa_pause[1]))
};
if chain_spec.is_kusama() {
info!("----------------------------");
info!("This chain is not in any way");
@@ -257,7 +251,6 @@ where
config,
service::NewFullParams {
is_parachain_node: service::IsParachainNode::No,
grandpa_pause,
enable_beefy,
force_authoring_backoff: cli.run.force_authoring_backoff,
jaeger_agent,
@@ -16,8 +16,6 @@
//! Polkadot-specific GRANDPA integration utilities.
use std::sync::Arc;
use sp_runtime::traits::{Block as BlockT, Header as _, NumberFor};
use crate::HeaderProvider;
@@ -59,55 +57,6 @@ where
}
}
/// A custom GRANDPA voting rule that "pauses" voting (i.e. keeps voting for the
/// same last finalized block) after a given block at height `N` has been
/// finalized and for a delay of `M` blocks, i.e. until the best block reaches
/// `N` + `M`, the voter will keep voting for block `N`.
#[derive(Clone)]
pub(crate) struct PauseAfterBlockFor<N>(pub(crate) N, pub(crate) N);
impl<Block, B> grandpa::VotingRule<Block, B> for PauseAfterBlockFor<NumberFor<Block>>
where
Block: BlockT,
B: sp_blockchain::HeaderBackend<Block> + 'static,
{
fn restrict_vote(
&self,
backend: Arc<B>,
base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> grandpa::VotingRuleResult<Block> {
let aux = || {
// only restrict votes targeting a block higher than the block
// we've set for the pause
if *current_target.number() > self.0 {
// if we're past the pause period (i.e. `self.0 + self.1`)
// then we no longer need to restrict any votes
if *best_target.number() > self.0 + self.1 {
return None
}
// if we've finalized the pause block, just keep returning it
// until best number increases enough to pass the condition above
if *base.number() >= self.0 {
return Some((base.hash(), *base.number()))
}
// otherwise find the target header at the pause block
// to vote on
return walk_backwards_to_target_block(&*backend, self.0, current_target).ok()
}
None
};
let target = aux();
Box::pin(async move { target })
}
}
/// GRANDPA hard forks due to borked migration of session keys after a runtime
/// upgrade (at #1491596), the signaled authority set changes were invalid
/// (blank keys) and were impossible to finalize. The authorities for these
@@ -214,130 +163,3 @@ pub(crate) fn kusama_hard_forks() -> Vec<grandpa::AuthoritySetHardFork<Block>> {
})
.collect()
}
#[cfg(test)]
mod tests {
use consensus_common::BlockOrigin;
use grandpa::VotingRule;
use polkadot_test_client::{
ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder,
TestClientBuilder, TestClientBuilderExt,
};
use sp_blockchain::HeaderBackend;
use sp_runtime::traits::Header;
use std::sync::Arc;
#[test]
fn grandpa_pause_voting_rule_works() {
let _ = env_logger::try_init();
let client = Arc::new(TestClientBuilder::new().build());
let mut hashes = vec![];
hashes.push(client.info().genesis_hash);
let mut push_blocks = {
let mut client = client.clone();
move |hashes: &mut Vec<_>, n| {
for _ in 0..n {
let block = client.init_polkadot_block_builder().build().unwrap().block;
hashes.push(block.header.hash());
futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap();
}
}
};
let get_header = {
let client = client.clone();
move |n| client.expect_header(n).unwrap()
};
// the rule should filter all votes after block #20
// is finalized until block #50 is imported.
let voting_rule = super::PauseAfterBlockFor(20, 30);
// add 10 blocks
push_blocks(&mut hashes, 10);
assert_eq!(client.info().best_number, 10);
// we have not reached the pause block
// therefore nothing should be restricted
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(hashes[0]),
&get_header(hashes[10]),
&get_header(hashes[10])
)),
None,
);
// add 15 more blocks
// best block: #25
push_blocks(&mut hashes, 15);
// we are targeting the pause block,
// the vote should not be restricted
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(hashes[10]),
&get_header(hashes[20]),
&get_header(hashes[20])
)),
None,
);
// we are past the pause block, votes should
// be limited to the pause block.
let pause_block = get_header(hashes[20]);
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(hashes[10]),
&get_header(hashes[21]),
&get_header(hashes[21])
)),
Some((pause_block.hash(), *pause_block.number())),
);
// we've finalized the pause block, so we'll keep
// restricting our votes to it.
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(hashes[21]),
&get_header(hashes[21]),
)),
Some((pause_block.hash(), *pause_block.number())),
);
// add 30 more blocks
// best block: #55
push_blocks(&mut hashes, 30);
// we're at the last block of the pause, this block
// should still be considered in the pause period
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(hashes[50]),
&get_header(hashes[50]),
)),
Some((pause_block.hash(), *pause_block.number())),
);
// we're past the pause period, no votes should be filtered
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(hashes[51]),
&get_header(hashes[51]),
)),
None,
);
}
}
+3 -23
View File
@@ -623,7 +623,6 @@ where
#[cfg(feature = "full-node")]
pub struct NewFullParams<OverseerGenerator: OverseerGen> {
pub is_parachain_node: IsParachainNode,
pub grandpa_pause: Option<(u32, u32)>,
pub enable_beefy: bool,
/// Whether to enable the block authoring backoff on production networks
/// where it isn't enabled by default.
@@ -717,7 +716,6 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
mut config: Configuration,
NewFullParams {
is_parachain_node,
grandpa_pause,
enable_beefy,
force_authoring_backoff,
jaeger_agent,
@@ -1248,32 +1246,14 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
// provide better guarantees of block and vote data availability than
// the observer.
// add a custom voting rule to temporarily stop voting for new blocks
// after the given pause block is finalized and restarting after the
// given delay.
let mut builder = grandpa::VotingRulesBuilder::default();
let mut voting_rules_builder = grandpa::VotingRulesBuilder::default();
#[cfg(not(feature = "malus"))]
let _malus_finality_delay = None;
if let Some(delay) = _malus_finality_delay {
info!(?delay, "Enabling malus finality delay",);
builder = builder.add(grandpa::BeforeBestBlockBy(delay));
};
let voting_rule = match grandpa_pause {
Some((block, delay)) => {
info!(
block_number = %block,
delay = %delay,
"GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.",
block,
delay,
);
builder.add(grandpa_support::PauseAfterBlockFor(block, delay)).build()
},
None => builder.build(),
voting_rules_builder = voting_rules_builder.add(grandpa::BeforeBestBlockBy(delay));
};
let grandpa_config = grandpa::GrandpaParams {
@@ -1281,7 +1261,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
link: link_half,
network: network.clone(),
sync: sync_service.clone(),
voting_rule,
voting_rule: voting_rules_builder.build(),
prometheus_registry: prometheus_registry.clone(),
shared_voter_state,
telemetry: telemetry.as_ref().map(|x| x.handle()),
-1
View File
@@ -80,7 +80,6 @@ pub fn new_full(
config,
polkadot_service::NewFullParams {
is_parachain_node,
grandpa_pause: None,
enable_beefy: true,
force_authoring_backoff: false,
jaeger_agent: None,
@@ -62,7 +62,6 @@ fn main() -> Result<()> {
is_parachain_node: polkadot_service::IsParachainNode::Collator(
collator.collator_key(),
),
grandpa_pause: None,
enable_beefy: false,
force_authoring_backoff: false,
jaeger_agent: None,
@@ -82,7 +82,6 @@ fn main() -> Result<()> {
is_parachain_node: polkadot_service::IsParachainNode::Collator(
collator.collator_key(),
),
grandpa_pause: None,
enable_beefy: false,
force_authoring_backoff: false,
jaeger_agent: None,