Chain Selection Subsystem Logic (#3277)

* crate skeleton and type definitions

* add ChainSelectionMessage

* add error type

* run loop

* fix overseer

* simplify determine_new_blocks API

* write an overlay struct and fetch new blocks

* add new function to overlay

* more flow

* add leaves to overlay and add a strong type around leaves-set

* add is_parent_viable

* implement block import, ignoring reversions

* add stagnant-at to overlay

* add stagnant

* add revert consensus log

* flow for reversions

* extract and import block reversions

* recursively update viability

* remove redundant parameter from WriteBlockEntry

* do some removal of viable leaves

* address grumbles

* refactor

* address grumbles

* add comment about non-monotonicity

* extract backend to submodule

* begin the hunt for viable leaves

* viability pivots for updating the active leaves

* remove LeafSearchFrontier

* partially -> explicitly viable and untwist some booleans

* extract tree to submodule

* implement block finality update

* Implement block approval routine

* implement stagnant detection

* ensure blocks pruned on finality are removed from the active leaves set

* write down some planned test cases

* floww

* leaf loading

* implement best_leaf_containing

* write down a few more tests to do

* remove dependence of tree on header

* guide: ChainApiMessage::BlockWeight

* node: BlockWeight ChainAPI

* fix compile issue

* note a few TODOs for the future

* fetch block weight using new BlockWeight ChainAPI

* implement unimplemented

* sort leaves by block number after weight

* remove warnings and add more TODOs

* create test module

* storage for test backend

* wrap inner in mutex

* add write waker query to test backend

* Add OverseerSignal -> FromOverseer conversion

* add test harnes

* add no-op test

* add some more test helpers

* the first test

* more progress on tests

* test two subtrees

* determine-new-blocks: cleaner genesis avoidance and tighter ancestry requests

* don't make ancestry requests when asking for one block

* add a couple more tests

* add to AllMessages in guide

* remove bad spaces from bridge

* compact iterator

* test import with gaps

* more reversion tests

* test finalization pruning subtrees

* fixups

* test clobbering and fix bug in overlay

* exhaustive backend state after finalizaiton tested

* more finality tests

* leaf tests

* test approval

* test ChainSelectionMessage::Leaves thoroughly

* remove TODO

* avoid Ordering::is_ne so CI can build

* comment algorithmic complexity

* Update node/core/chain-selection/src/lib.rs

Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>

Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>
This commit is contained in:
Robert Habermeier
2021-06-21 18:39:43 +01:00
committed by GitHub
parent 6b1baba490
commit 74baed8b39
13 changed files with 3387 additions and 6 deletions
@@ -0,0 +1,235 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! An abstraction over storage used by the chain selection subsystem.
//!
//! This provides both a [`Backend`] trait and an [`OverlayedBackend`]
//! struct which allows in-memory changes to be applied on top of a
//! [`Backend`], maintaining consistency between queries and temporary writes,
//! before any commit to the underlying storage is made.
use polkadot_primitives::v1::{BlockNumber, Hash};
use std::collections::HashMap;
use crate::{Error, LeafEntrySet, BlockEntry, Timestamp};
pub(super) enum BackendWriteOp {
WriteBlockEntry(BlockEntry),
WriteBlocksByNumber(BlockNumber, Vec<Hash>),
WriteViableLeaves(LeafEntrySet),
WriteStagnantAt(Timestamp, Vec<Hash>),
DeleteBlocksByNumber(BlockNumber),
DeleteBlockEntry(Hash),
DeleteStagnantAt(Timestamp),
}
/// An abstraction over backend storage for the logic of this subsystem.
pub(super) trait Backend {
/// Load a block entry from the DB.
fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error>;
/// Load the active-leaves set.
fn load_leaves(&self) -> Result<LeafEntrySet, Error>;
/// Load the stagnant list at the given timestamp.
fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error>;
/// Load all stagnant lists up to and including the given unix timestamp
/// in ascending order.
fn load_stagnant_at_up_to(&self, up_to: Timestamp)
-> Result<Vec<(Timestamp, Vec<Hash>)>, Error>;
/// Load the earliest kept block number.
fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error>;
/// Load blocks by number.
fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error>;
/// Atomically write the list of operations, with later operations taking precedence over prior.
fn write<I>(&mut self, ops: I) -> Result<(), Error>
where I: IntoIterator<Item = BackendWriteOp>;
}
/// An in-memory overlay over the backend.
///
/// This maintains read-only access to the underlying backend, but can be
/// converted into a set of write operations which will, when written to
/// the underlying backend, give the same view as the state of the overlay.
pub(super) struct OverlayedBackend<'a, B: 'a> {
inner: &'a B,
// `None` means 'deleted', missing means query inner.
block_entries: HashMap<Hash, Option<BlockEntry>>,
// `None` means 'deleted', missing means query inner.
blocks_by_number: HashMap<BlockNumber, Option<Vec<Hash>>>,
// 'None' means 'deleted', missing means query inner.
stagnant_at: HashMap<Timestamp, Option<Vec<Hash>>>,
// 'None' means query inner.
leaves: Option<LeafEntrySet>,
}
impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
pub(super) fn new(backend: &'a B) -> Self {
OverlayedBackend {
inner: backend,
block_entries: HashMap::new(),
blocks_by_number: HashMap::new(),
stagnant_at: HashMap::new(),
leaves: None,
}
}
pub(super) fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error> {
if let Some(val) = self.block_entries.get(&hash) {
return Ok(val.clone())
}
self.inner.load_block_entry(hash)
}
pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> {
if let Some(val) = self.blocks_by_number.get(&number) {
return Ok(val.as_ref().map_or(Vec::new(), Clone::clone));
}
self.inner.load_blocks_by_number(number)
}
pub(super) fn load_leaves(&self) -> Result<LeafEntrySet, Error> {
if let Some(ref set) = self.leaves {
return Ok(set.clone())
}
self.inner.load_leaves()
}
pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error> {
if let Some(val) = self.stagnant_at.get(&timestamp) {
return Ok(val.as_ref().map_or(Vec::new(), Clone::clone));
}
self.inner.load_stagnant_at(timestamp)
}
pub(super) fn write_block_entry(&mut self, entry: BlockEntry) {
self.block_entries.insert(entry.block_hash, Some(entry));
}
pub(super) fn delete_block_entry(&mut self, hash: &Hash) {
self.block_entries.insert(*hash, None);
}
pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec<Hash>) {
if blocks.is_empty() {
self.blocks_by_number.insert(number, None);
} else {
self.blocks_by_number.insert(number, Some(blocks));
}
}
pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) {
self.blocks_by_number.insert(number, None);
}
pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) {
self.leaves = Some(leaves);
}
pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec<Hash>) {
self.stagnant_at.insert(timestamp, Some(hashes));
}
pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) {
self.stagnant_at.insert(timestamp, None);
}
/// Transform this backend into a set of write-ops to be written to the
/// inner backend.
pub(super) fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> {
let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteBlockEntry(v),
None => BackendWriteOp::DeleteBlockEntry(h),
});
let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v {
Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v),
None => BackendWriteOp::DeleteBlocksByNumber(n),
});
let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves);
let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v {
Some(v) => BackendWriteOp::WriteStagnantAt(n, v),
None => BackendWriteOp::DeleteStagnantAt(n),
});
block_entry_ops
.chain(blocks_by_number_ops)
.chain(leaf_ops)
.chain(stagnant_at_ops)
}
}
/// Attempt to find the given ancestor in the chain with given head.
///
/// If the ancestor is the most recently finalized block, and the `head` is
/// a known unfinalized block, this will return `true`.
///
/// If the ancestor is an unfinalized block and `head` is known, this will
/// return true if `ancestor` is in `head`'s chain.
///
/// If the ancestor is an older finalized block, this will return `false`.
fn contains_ancestor(
backend: &impl Backend,
head: Hash,
ancestor: Hash,
) -> Result<bool, Error> {
let mut current_hash = head;
loop {
if current_hash == ancestor { return Ok(true) }
match backend.load_block_entry(&current_hash)? {
Some(e) => { current_hash = e.parent_hash }
None => break
}
}
Ok(false)
}
/// This returns the best unfinalized leaf containing the required block.
///
/// If the required block is finalized but not the most recent finalized block,
/// this will return `None`.
///
/// If the required block is unfinalized but not an ancestor of any viable leaf,
/// this will return `None`.
//
// Note: this is O(N^2) in the depth of `required` and the number of leaves.
// We expect the number of unfinalized blocks to be small, as in, to not exceed
// single digits in practice, and exceedingly unlikely to surpass 1000.
//
// However, if we need to, we could implement some type of skip-list for
// fast ancestry checks.
pub(super) fn find_best_leaf_containing(
backend: &impl Backend,
required: Hash,
) -> Result<Option<Hash>, Error> {
let leaves = backend.load_leaves()?;
for leaf in leaves.into_hashes_descending() {
if contains_ancestor(backend, leaf, required)? {
return Ok(Some(leaf))
}
}
// If there are no viable leaves containing the ancestor
Ok(None)
}
@@ -0,0 +1,574 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Implements the Chain Selection Subsystem.
use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog};
use polkadot_node_primitives::BlockWeight;
use polkadot_subsystem::{
Subsystem, SubsystemContext, SubsystemError, SpawnedSubsystem,
OverseerSignal, FromOverseer,
messages::{ChainSelectionMessage, ChainApiMessage},
errors::ChainApiError,
};
use parity_scale_codec::Error as CodecError;
use futures::channel::oneshot;
use futures::prelude::*;
use std::time::{UNIX_EPOCH, SystemTime};
use crate::backend::{Backend, OverlayedBackend, BackendWriteOp};
mod backend;
mod tree;
#[cfg(test)]
mod tests;
const LOG_TARGET: &str = "parachain::chain-selection";
/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots.
type Timestamp = u64;
#[derive(Debug, Clone)]
enum Approval {
// Approved
Approved,
// Unapproved but not stagnant
Unapproved,
// Unapproved and stagnant.
Stagnant,
}
impl Approval {
fn is_stagnant(&self) -> bool {
matches!(*self, Approval::Stagnant)
}
}
#[derive(Debug, Clone)]
struct ViabilityCriteria {
// Whether this block has been explicitly reverted by one of its descendants.
explicitly_reverted: bool,
// The approval state of this block specifically.
approval: Approval,
// The earliest unviable ancestor - the hash of the earliest unfinalized
// block in the ancestry which is explicitly reverted or stagnant.
earliest_unviable_ancestor: Option<Hash>,
}
impl ViabilityCriteria {
fn is_viable(&self) -> bool {
self.is_parent_viable() && self.is_explicitly_viable()
}
// Whether the current block is explicitly viable.
// That is, whether the current block is neither reverted nor stagnant.
fn is_explicitly_viable(&self) -> bool {
!self.explicitly_reverted && !self.approval.is_stagnant()
}
// Whether the parent is viable. This assumes that the parent
// descends from the finalized chain.
fn is_parent_viable(&self) -> bool {
self.earliest_unviable_ancestor.is_none()
}
}
// Light entries describing leaves of the chain.
//
// These are ordered first by weight and then by block number.
#[derive(Debug, Clone, PartialEq)]
struct LeafEntry {
weight: BlockWeight,
block_number: BlockNumber,
block_hash: Hash,
}
impl PartialOrd for LeafEntry {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
let ord = self.weight.cmp(&other.weight)
.then(self.block_number.cmp(&other.block_number));
if !matches!(ord, std::cmp::Ordering::Equal) { Some(ord) } else { None }
}
}
#[derive(Debug, Default, Clone)]
struct LeafEntrySet {
inner: Vec<LeafEntry>
}
impl LeafEntrySet {
fn remove(&mut self, hash: &Hash) -> bool {
match self.inner.iter().position(|e| &e.block_hash == hash) {
None => false,
Some(i) => {
self.inner.remove(i);
true
}
}
}
fn insert(&mut self, new: LeafEntry) {
let mut pos = None;
for (i, e) in self.inner.iter().enumerate() {
if e == &new { return }
if e < &new {
pos = Some(i);
break
}
}
match pos {
None => self.inner.push(new),
Some(i) => self.inner.insert(i, new),
}
}
fn into_hashes_descending(self) -> impl Iterator<Item = Hash> {
self.inner.into_iter().map(|e| e.block_hash)
}
}
#[derive(Debug, Clone)]
struct BlockEntry {
block_hash: Hash,
block_number: BlockNumber,
parent_hash: Hash,
children: Vec<Hash>,
viability: ViabilityCriteria,
weight: BlockWeight,
}
impl BlockEntry {
fn leaf_entry(&self) -> LeafEntry {
LeafEntry {
block_hash: self.block_hash,
block_number: self.block_number,
weight: self.weight,
}
}
fn non_viable_ancestor_for_child(&self) -> Option<Hash> {
if self.viability.is_viable() {
None
} else {
self.viability.earliest_unviable_ancestor.or(Some(self.block_hash))
}
}
}
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
ChainApi(#[from] ChainApiError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
#[error(transparent)]
Subsystem(#[from] SubsystemError),
#[error(transparent)]
Codec(#[from] CodecError),
}
impl Error {
fn trace(&self) {
match self {
// don't spam the log with spurious errors
Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self),
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, err = ?self),
}
}
}
fn timestamp_now() -> Timestamp {
// `SystemTime` is notoriously non-monotonic, so our timers might not work
// exactly as expected. Regardless, stagnation is detected on the order of minutes,
// and slippage of a few seconds in either direction won't cause any major harm.
//
// The exact time that a block becomes stagnant in the local node is always expected
// to differ from other nodes due to network asynchrony and delays in block propagation.
// Non-monotonicity exarcerbates that somewhat, but not meaningfully.
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(e) => {
tracing::warn!(
target: LOG_TARGET,
err = ?e,
"Current time is before unix epoch. Validation will not work correctly."
);
0
}
}
}
fn stagnant_timeout_from_now() -> Timestamp {
// If a block isn't approved in 120 seconds, nodes will abandon it
// and begin building on another chain.
const STAGNANT_TIMEOUT: Timestamp = 120;
timestamp_now() + STAGNANT_TIMEOUT
}
// TODO https://github.com/paritytech/polkadot/issues/3293:
//
// This is used just so we can have a public function that calls
// `run` and eliminates all the unused errors.
//
// Should be removed when the real implementation is done.
struct VoidBackend;
impl Backend for VoidBackend {
fn load_block_entry(&self, _: &Hash) -> Result<Option<BlockEntry>, Error> {
Ok(None)
}
fn load_leaves(&self) -> Result<LeafEntrySet, Error> {
Ok(LeafEntrySet::default())
}
fn load_stagnant_at(&self, _: Timestamp) -> Result<Vec<Hash>, Error> {
Ok(Vec::new())
}
fn load_stagnant_at_up_to(&self, _: Timestamp)
-> Result<Vec<(Timestamp, Vec<Hash>)>, Error>
{
Ok(Vec::new())
}
fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error> {
Ok(None)
}
fn load_blocks_by_number(&self, _: BlockNumber) -> Result<Vec<Hash>, Error> {
Ok(Vec::new())
}
fn write<I>(&mut self, _: I) -> Result<(), Error>
where I: IntoIterator<Item = BackendWriteOp>
{
Ok(())
}
}
/// The chain selection subsystem.
pub struct ChainSelectionSubsystem;
impl<Context> Subsystem<Context> for ChainSelectionSubsystem
where Context: SubsystemContext<Message = ChainSelectionMessage>
{
fn start(self, ctx: Context) -> SpawnedSubsystem {
let backend = VoidBackend;
SpawnedSubsystem {
future: run(ctx, backend).map(|()| Ok(())).boxed(),
name: "chain-selection-subsystem",
}
}
}
async fn run<Context, B>(mut ctx: Context, mut backend: B)
where
Context: SubsystemContext<Message = ChainSelectionMessage>,
B: Backend,
{
loop {
let res = run_iteration(&mut ctx, &mut backend).await;
match res {
Err(e) => {
e.trace();
if let Error::Subsystem(SubsystemError::Context(_)) = e {
break;
}
}
Ok(()) => {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
break;
}
}
}
}
// Run the subsystem until an error is encountered or a `conclude` signal is received.
// Most errors are non-fatal and should lead to another call to this function.
//
// A return value of `Ok` indicates that an exit should be made, while non-fatal errors
// lead to another call to this function.
async fn run_iteration<Context, B>(ctx: &mut Context, backend: &mut B)
-> Result<(), Error>
where
Context: SubsystemContext<Message = ChainSelectionMessage>,
B: Backend,
{
// TODO https://github.com/paritytech/polkadot/issues/3293: Add stagnant checking timer loop.
loop {
match ctx.recv().await? {
FromOverseer::Signal(OverseerSignal::Conclude) => {
return Ok(())
}
FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
for leaf in update.activated {
let write_ops = handle_active_leaf(
ctx,
&*backend,
leaf.hash,
).await?;
backend.write(write_ops)?;
}
}
FromOverseer::Signal(OverseerSignal::BlockFinalized(h, n)) => {
handle_finalized_block(backend, h, n)?
}
FromOverseer::Communication { msg } => match msg {
ChainSelectionMessage::Approved(hash) => {
handle_approved_block(backend, hash)?
}
ChainSelectionMessage::Leaves(tx) => {
let leaves = load_leaves(ctx, &*backend).await?;
let _ = tx.send(leaves);
}
ChainSelectionMessage::BestLeafContaining(required, tx) => {
let best_containing = crate::backend::find_best_leaf_containing(
&*backend,
required,
)?;
// note - this may be none if the finalized block is
// a leaf. this is fine according to the expected usage of the
// function. `None` responses should just `unwrap_or(required)`,
// so if the required block is the finalized block, then voilá.
let _ = tx.send(best_containing);
}
}
};
}
}
async fn fetch_finalized(
ctx: &mut impl SubsystemContext,
) -> Result<Option<(Hash, BlockNumber)>, Error> {
let (number_tx, number_rx) = oneshot::channel();
let (hash_tx, hash_rx) = oneshot::channel();
ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx).into()).await;
let number = number_rx.await??;
ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx).into()).await;
match hash_rx.await?? {
None => {
tracing::warn!(
target: LOG_TARGET,
number,
"Missing hash for finalized block number"
);
return Ok(None)
}
Some(h) => Ok(Some((h, number)))
}
}
async fn fetch_header(
ctx: &mut impl SubsystemContext,
hash: Hash,
) -> Result<Option<Header>, Error> {
let (h_tx, h_rx) = oneshot::channel();
ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx).into()).await;
h_rx.await?.map_err(Into::into)
}
async fn fetch_block_weight(
ctx: &mut impl SubsystemContext,
hash: Hash,
) -> Result<Option<BlockWeight>, Error> {
let (tx, rx) = oneshot::channel();
ctx.send_message(ChainApiMessage::BlockWeight(hash, tx).into()).await;
rx.await?.map_err(Into::into)
}
// Handle a new active leaf.
async fn handle_active_leaf(
ctx: &mut impl SubsystemContext,
backend: &impl Backend,
hash: Hash,
) -> Result<Vec<BackendWriteOp>, Error> {
let lower_bound = match backend.load_first_block_number()? {
Some(l) => {
// We want to iterate back to finalized, and first block number
// is assumed to be 1 above finalized - the implicit root of the
// tree.
l.saturating_sub(1)
},
None => fetch_finalized(ctx).await?.map_or(1, |(_, n)| n),
};
let header = match fetch_header(ctx, hash).await? {
None => {
tracing::warn!(
target: LOG_TARGET,
?hash,
"Missing header for new head",
);
return Ok(Vec::new())
}
Some(h) => h,
};
let new_blocks = polkadot_node_subsystem_util::determine_new_blocks(
ctx.sender(),
|h| backend.load_block_entry(h).map(|b| b.is_some()),
hash,
&header,
lower_bound,
).await?;
let mut overlay = OverlayedBackend::new(backend);
// determine_new_blocks gives blocks in descending order.
// for this, we want ascending order.
for (hash, header) in new_blocks.into_iter().rev() {
let weight = match fetch_block_weight(ctx, hash).await? {
None => {
tracing::warn!(
target: LOG_TARGET,
?hash,
"Missing block weight for new head. Skipping chain.",
);
// If we don't know the weight, we can't import the block.
// And none of its descendents either.
break;
}
Some(w) => w,
};
let reversion_logs = extract_reversion_logs(&header);
crate::tree::import_block(
&mut overlay,
hash,
header.number,
header.parent_hash,
reversion_logs,
weight,
)?;
}
Ok(overlay.into_write_ops().collect())
}
// Extract all reversion logs from a header in ascending order.
//
// Ignores logs with number >= the block header number.
fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
let number = header.number;
let mut logs = header.digest.logs()
.iter()
.enumerate()
.filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) {
Err(e) => {
tracing::warn!(
target: LOG_TARGET,
err = ?e,
index = i,
block_hash = ?header.hash(),
"Digest item failed to encode"
);
None
}
Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b),
Ok(Some(ConsensusLog::Revert(b))) => {
tracing::warn!(
target: LOG_TARGET,
revert_target = b,
block_number = number,
block_hash = ?header.hash(),
"Block issued invalid revert digest targeting itself or future"
);
None
}
Ok(_) => None,
})
.collect::<Vec<_>>();
logs.sort();
logs
}
// Handle a finalized block event.
fn handle_finalized_block(
backend: &mut impl Backend,
finalized_hash: Hash,
finalized_number: BlockNumber,
) -> Result<(), Error> {
let ops = crate::tree::finalize_block(
&*backend,
finalized_hash,
finalized_number,
)?.into_write_ops();
backend.write(ops)
}
// Handle an approved block event.
fn handle_approved_block(
backend: &mut impl Backend,
approved_block: Hash,
) -> Result<(), Error> {
let ops = {
let mut overlay = OverlayedBackend::new(&*backend);
crate::tree::approve_block(
&mut overlay,
approved_block,
)?;
overlay.into_write_ops()
};
backend.write(ops)
}
// Load the leaves from the backend. If there are no leaves, then return
// the finalized block.
async fn load_leaves(
ctx: &mut impl SubsystemContext,
backend: &impl Backend,
) -> Result<Vec<Hash>, Error> {
let leaves: Vec<_> = backend.load_leaves()?
.into_hashes_descending()
.collect();
if leaves.is_empty() {
Ok(fetch_finalized(ctx).await?.map_or(Vec::new(), |(h, _)| vec![h]))
} else {
Ok(leaves)
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,584 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Implements the tree-view over the data backend which we use to determine
//! viable leaves.
//!
//! The metadata is structured as a tree, with the root implicitly being the
//! finalized block, which is not stored as part of the tree.
//!
//! Each direct descendant of the finalized block acts as its own sub-tree,
//! and as the finalized block advances, orphaned sub-trees are entirely pruned.
use polkadot_primitives::v1::{BlockNumber, Hash};
use polkadot_node_primitives::BlockWeight;
use std::collections::HashMap;
use super::{
LOG_TARGET,
Approval, BlockEntry, Error, LeafEntry, ViabilityCriteria,
Timestamp,
};
use crate::backend::{Backend, OverlayedBackend};
// A viability update to be applied to a block.
struct ViabilityUpdate(Option<Hash>);
impl ViabilityUpdate {
// Apply the viability update to a single block, yielding the updated
// block entry along with a vector of children and the updates to apply
// to them.
fn apply(self, mut entry: BlockEntry) -> (
BlockEntry,
Vec<(Hash, ViabilityUpdate)>
) {
// 1. When an ancestor has changed from unviable to viable,
// we erase the `earliest_unviable_ancestor` of all descendants
// until encountering a explicitly unviable descendant D.
//
// We then update the `earliest_unviable_ancestor` for all
// descendants of D to be equal to D.
//
// 2. When an ancestor A has changed from viable to unviable,
// we update the `earliest_unviable_ancestor` for all blocks
// to A.
//
// The following algorithm covers both cases.
//
// Furthermore, if there has been any change in viability,
// it is necessary to visit every single descendant of the root
// block.
//
// If a block B was unviable and is now viable, then every descendant
// has an `earliest_unviable_ancestor` which must be updated either
// to nothing or to the new earliest unviable ancestor.
//
// If a block B was viable and is now unviable, then every descendant
// has an `earliest_unviable_ancestor` which needs to be set to B.
let maybe_earliest_unviable = self.0;
let next_earliest_unviable = {
if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() {
Some(entry.block_hash)
} else {
maybe_earliest_unviable
}
};
entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable;
let recurse = entry.children.iter()
.cloned()
.map(move |c| (c, ViabilityUpdate(next_earliest_unviable)))
.collect();
(entry, recurse)
}
}
// Propagate viability update to descendants of the given block. This writes
// the `base` entry as well as all descendants. If the parent of the block
// entry is not viable, this wlil not affect any descendants.
//
// If the block entry provided is self-unviable, then it's assumed that an
// unviability update needs to be propagated to descendants.
//
// If the block entry provided is self-viable, then it's assumed that a
// viability update needs to be propagated to descendants.
fn propagate_viability_update(
backend: &mut OverlayedBackend<impl Backend>,
base: BlockEntry,
) -> Result<(), Error> {
enum BlockEntryRef {
Explicit(BlockEntry),
Hash(Hash),
}
if !base.viability.is_parent_viable() {
// If the parent of the block is still unviable,
// then the `earliest_viable_ancestor` will not change
// regardless of the change in the block here.
//
// Furthermore, in such cases, the set of viable leaves
// does not change at all.
backend.write_block_entry(base);
return Ok(())
}
let mut viable_leaves = backend.load_leaves()?;
// A mapping of Block Hash -> number
// Where the hash is the hash of a viable block which has
// at least 1 unviable child.
//
// The number is the number of known unviable children which is known
// as the pivot count.
let mut viability_pivots = HashMap::new();
// If the base block is itself explicitly unviable,
// this will change to a `Some(base_hash)` after the first
// invocation.
let viability_update = ViabilityUpdate(None);
// Recursively apply update to tree.
//
// As we go, we remove any blocks from the leaves which are no longer viable
// leaves. We also add blocks to the leaves-set which are obviously viable leaves.
// And we build up a frontier of blocks which may either be viable leaves or
// the ancestors of one.
let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)];
while let Some((entry_ref, update)) = tree_frontier.pop() {
let entry = match entry_ref {
BlockEntryRef::Explicit(entry) => entry,
BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? {
None => {
tracing::warn!(
target: LOG_TARGET,
block_hash = ?hash,
"Missing expected block entry"
);
continue;
}
Some(entry) => entry,
}
};
let (new_entry, children) = update.apply(entry);
if new_entry.viability.is_viable() {
// A block which is viable has a parent which is obviously not
// in the viable leaves set.
viable_leaves.remove(&new_entry.parent_hash);
// Furthermore, if the block is viable and has no children,
// it is viable by definition.
if new_entry.children.is_empty() {
viable_leaves.insert(new_entry.leaf_entry());
}
} else {
// A block which is not viable is certainly not a viable leaf.
viable_leaves.remove(&new_entry.block_hash);
// When the parent is viable but the entry itself is not, that means
// that the parent is a viability pivot. As we visit the children
// of a viability pivot, we build up an exhaustive pivot count.
if new_entry.viability.is_parent_viable() {
*viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1;
}
}
backend.write_block_entry(new_entry);
tree_frontier.extend(
children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update))
);
}
// Revisit the viability pivots now that we've traversed the entire subtree.
// After this point, the viable leaves set is fully updated. A proof follows.
//
// If the base has become unviable, then we've iterated into all descendants,
// made them unviable and removed them from the set. We know that the parent is
// viable as this function is a no-op otherwise, so we need to see if the parent
// has other children or not.
//
// If the base has become viable, then we've iterated into all descendants,
// and found all blocks which are viable and have no children. We've already added
// those blocks to the leaf set, but what we haven't detected
// is blocks which are viable and have children, but all of the children are
// unviable.
//
// The solution of viability pivots addresses both of these:
//
// When the base has become unviable, the parent's viability is unchanged and therefore
// any leaves descending from parent but not base are still in the viable leaves set.
// If the parent has only one child which is the base, the parent is now a viable leaf.
// We've already visited the base in recursive search so the set of pivots should
// contain only a single entry `(parent, 1)`. qed.
//
// When the base has become viable, we've already iterated into every descendant
// of the base and thus have collected a set of pivots whose corresponding pivot
// counts have already been exhaustively computed from their children. qed.
for (pivot, pivot_count) in viability_pivots {
match backend.load_block_entry(&pivot)? {
None => {
// This means the block is finalized. We might reach this
// code path when the base is a child of the finalized block
// and has become unviable.
//
// Each such child is the root of its own tree
// which, as an invariant, does not depend on the viability
// of the finalized block. So no siblings need to be inspected
// and we can ignore it safely.
//
// Furthermore, if the set of viable leaves is empty, the
// finalized block is implicitly the viable leaf.
continue
}
Some(entry) => {
if entry.children.len() == pivot_count {
viable_leaves.insert(entry.leaf_entry());
}
}
}
}
backend.write_leaves(viable_leaves);
Ok(())
}
/// Imports a new block and applies any reversions to ancestors.
pub(crate) fn import_block(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
parent_hash: Hash,
reversion_logs: Vec<BlockNumber>,
weight: BlockWeight,
) -> Result<(), Error> {
add_block(backend, block_hash, block_number, parent_hash, weight)?;
apply_reversions(
backend,
block_hash,
block_number,
reversion_logs,
)?;
Ok(())
}
// Load the given ancestor's block entry, in descending order from the `block_hash`.
// The ancestor_number must be at least one block less than the `block_number`.
//
// The returned entry will be `None` if the range is invalid or any block in the path had
// no entry present. If any block entry was missing, it can safely be assumed to
// be finalized.
fn load_ancestor(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
ancestor_number: BlockNumber,
) -> Result<Option<BlockEntry>, Error> {
if block_number <= ancestor_number { return Ok(None) }
let mut current_hash = block_hash;
let mut current_entry = None;
let segment_length = (block_number - ancestor_number) + 1;
for _ in 0..segment_length {
match backend.load_block_entry(&current_hash)? {
None => return Ok(None),
Some(entry) => {
let parent_hash = entry.parent_hash;
current_entry = Some(entry);
current_hash = parent_hash;
}
}
}
// Current entry should always be `Some` here.
Ok(current_entry)
}
// Add a new block to the tree, which is assumed to be unreverted and unapproved,
// but not stagnant. It inherits viability from its parent, if any.
//
// This updates the parent entry, if any, and updates the viable leaves set accordingly.
// This also schedules a stagnation-check update and adds the block to the blocks-by-number
// mapping.
fn add_block(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
parent_hash: Hash,
weight: BlockWeight,
) -> Result<(), Error> {
let mut leaves = backend.load_leaves()?;
let parent_entry = backend.load_block_entry(&parent_hash)?;
let inherited_viability = parent_entry.as_ref()
.and_then(|parent| parent.non_viable_ancestor_for_child());
// 1. Add the block to the DB assuming it's not reverted.
backend.write_block_entry(
BlockEntry {
block_hash,
block_number,
parent_hash,
children: Vec::new(),
viability: ViabilityCriteria {
earliest_unviable_ancestor: inherited_viability,
explicitly_reverted: false,
approval: Approval::Unapproved,
},
weight,
}
);
// 2. Update leaves if inherited viability is fine.
if inherited_viability.is_none() {
leaves.remove(&parent_hash);
leaves.insert(LeafEntry { block_hash, block_number, weight });
backend.write_leaves(leaves);
}
// 3. Update and write the parent
if let Some(mut parent_entry) = parent_entry {
parent_entry.children.push(block_hash);
backend.write_block_entry(parent_entry);
}
// 4. Add to blocks-by-number.
let mut blocks_by_number = backend.load_blocks_by_number(block_number)?;
blocks_by_number.push(block_hash);
backend.write_blocks_by_number(block_number, blocks_by_number);
// 5. Add stagnation timeout.
let stagnant_at = crate::stagnant_timeout_from_now();
let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?;
stagnant_at_list.push(block_hash);
backend.write_stagnant_at(stagnant_at, stagnant_at_list);
Ok(())
}
// Assuming that a block is already imported, accepts the number of the block
// as well as a list of reversions triggered by the block in ascending order.
fn apply_reversions(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
reversions: Vec<BlockNumber>,
) -> Result<(), Error> {
// Note: since revert numbers are in ascending order, the expensive propagation
// of unviability is only heavy on the first log.
for revert_number in reversions {
let mut ancestor_entry = match load_ancestor(
backend,
block_hash,
block_number,
revert_number,
)? {
None => {
tracing::warn!(
target: LOG_TARGET,
?block_hash,
block_number,
revert_target = revert_number,
"The hammer has dropped. \
A block has indicated that its finalized ancestor be reverted. \
Please inform an adult.",
);
continue
}
Some(ancestor_entry) => {
tracing::info!(
target: LOG_TARGET,
?block_hash,
block_number,
revert_target = revert_number,
revert_hash = ?ancestor_entry.block_hash,
"A block has signaled that its ancestor be reverted due to a bad parachain block.",
);
ancestor_entry
}
};
ancestor_entry.viability.explicitly_reverted = true;
propagate_viability_update(backend, ancestor_entry)?;
}
Ok(())
}
/// Finalize a block with the given number and hash.
///
/// This will prune all sub-trees not descending from the given block,
/// all block entries at or before the given height,
/// and will update the viability of all sub-trees descending from the given
/// block if the finalized block was not viable.
///
/// This is assumed to start with a fresh backend, and will produce
/// an overlay over the backend with all the changes applied.
pub(super) fn finalize_block<'a, B: Backend + 'a>(
backend: &'a B,
finalized_hash: Hash,
finalized_number: BlockNumber,
) -> Result<OverlayedBackend<'a, B>, Error> {
let earliest_stored_number = backend.load_first_block_number()?;
let mut backend = OverlayedBackend::new(backend);
let earliest_stored_number = match earliest_stored_number {
None => {
// This implies that there are no unfinalized blocks and hence nothing
// to update.
return Ok(backend);
}
Some(e) => e,
};
let mut viable_leaves = backend.load_leaves()?;
// Walk all numbers up to the finalized number and remove those entries.
for number in earliest_stored_number..finalized_number {
let blocks_at = backend.load_blocks_by_number(number)?;
backend.delete_blocks_by_number(number);
for block in blocks_at {
viable_leaves.remove(&block);
backend.delete_block_entry(&block);
}
}
// Remove all blocks at the finalized height, with the exception of the finalized block,
// and their descendants, recursively.
{
let blocks_at_finalized_height = backend.load_blocks_by_number(finalized_number)?;
backend.delete_blocks_by_number(finalized_number);
let mut frontier: Vec<_> = blocks_at_finalized_height
.into_iter()
.filter(|h| h != &finalized_hash)
.map(|h| (h, finalized_number))
.collect();
while let Some((dead_hash, dead_number)) = frontier.pop() {
let entry = backend.load_block_entry(&dead_hash)?;
backend.delete_block_entry(&dead_hash);
viable_leaves.remove(&dead_hash);
// This does a few extra `clone`s but is unlikely to be
// a bottleneck. Code complexity is very low as a result.
let mut blocks_at_height = backend.load_blocks_by_number(dead_number)?;
blocks_at_height.retain(|h| h != &dead_hash);
backend.write_blocks_by_number(dead_number, blocks_at_height);
// Add all children to the frontier.
let next_height = dead_number + 1;
frontier.extend(
entry.into_iter().flat_map(|e| e.children).map(|h| (h, next_height))
);
}
}
// Visit and remove the finalized block, fetching its children.
let children_of_finalized = {
let finalized_entry = backend.load_block_entry(&finalized_hash)?;
backend.delete_block_entry(&finalized_hash);
viable_leaves.remove(&finalized_hash);
finalized_entry.into_iter().flat_map(|e| e.children)
};
backend.write_leaves(viable_leaves);
// Update the viability of each child.
for child in children_of_finalized {
if let Some(mut child) = backend.load_block_entry(&child)? {
// Finalized blocks are always viable.
child.viability.earliest_unviable_ancestor = None;
propagate_viability_update(&mut backend, child)?;
} else {
tracing::debug!(
target: LOG_TARGET,
?finalized_hash,
finalized_number,
child_hash = ?child,
"Missing child of finalized block",
);
// No need to do anything, but this is an inconsistent state.
}
}
Ok(backend)
}
/// Mark a block as approved and update the viability of itself and its
/// descendants accordingly.
pub(super) fn approve_block(
backend: &mut OverlayedBackend<impl Backend>,
approved_hash: Hash,
) -> Result<(), Error> {
if let Some(mut entry) = backend.load_block_entry(&approved_hash)? {
let was_viable = entry.viability.is_viable();
entry.viability.approval = Approval::Approved;
let is_viable = entry.viability.is_viable();
// Approval can change the viability in only one direction.
// If the viability has changed, then we propagate that to children
// and recalculate the viable leaf set.
if !was_viable && is_viable {
propagate_viability_update(backend, entry)?;
} else {
backend.write_block_entry(entry);
}
} else {
tracing::debug!(
target: LOG_TARGET,
block_hash = ?approved_hash,
"Missing entry for freshly-approved block. Ignoring"
);
}
Ok(())
}
/// Check whether any blocks up to the given timestamp are stagnant and update
/// accordingly.
///
/// This accepts a fresh backend and returns an overlay on top of it representing
/// all changes made.
// TODO https://github.com/paritytech/polkadot/issues/3293:: remove allow
#[allow(unused)]
pub(super) fn detect_stagnant<'a, B: 'a + Backend>(
backend: &'a B,
up_to: Timestamp,
) -> Result<OverlayedBackend<'a, B>, Error> {
let stagnant_up_to = backend.load_stagnant_at_up_to(up_to)?;
let mut backend = OverlayedBackend::new(backend);
// As this is in ascending order, only the earliest stagnant
// blocks will involve heavy viability propagations.
for (timestamp, maybe_stagnant) in stagnant_up_to {
backend.delete_stagnant_at(timestamp);
for block_hash in maybe_stagnant {
if let Some(mut entry) = backend.load_block_entry(&block_hash)? {
let was_viable = entry.viability.is_viable();
if let Approval::Unapproved = entry.viability.approval {
entry.viability.approval = Approval::Stagnant;
}
let is_viable = entry.viability.is_viable();
if was_viable && !is_viable {
propagate_viability_update(&mut backend, entry)?;
} else {
backend.write_block_entry(entry);
}
}
}
}
Ok(backend)
}