feat: initialize Kurdistan SDK - independent fork of Polkadot SDK
This commit is contained in:
@@ -0,0 +1,237 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! An abstraction over storage used by the chain selection subsystem.
|
||||
//!
|
||||
//! This provides both a [`Backend`] trait and an [`OverlayedBackend`]
|
||||
//! struct which allows in-memory changes to be applied on top of a
|
||||
//! [`Backend`], maintaining consistency between queries and temporary writes,
|
||||
//! before any commit to the underlying storage is made.
|
||||
|
||||
use pezkuwi_primitives::{BlockNumber, Hash};
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::{BlockEntry, Error, LeafEntrySet, Timestamp};
|
||||
|
||||
pub(super) enum BackendWriteOp {
|
||||
WriteBlockEntry(BlockEntry),
|
||||
WriteBlocksByNumber(BlockNumber, Vec<Hash>),
|
||||
WriteViableLeaves(LeafEntrySet),
|
||||
WriteStagnantAt(Timestamp, Vec<Hash>),
|
||||
DeleteBlocksByNumber(BlockNumber),
|
||||
DeleteBlockEntry(Hash),
|
||||
DeleteStagnantAt(Timestamp),
|
||||
}
|
||||
|
||||
/// An abstraction over backend storage for the logic of this subsystem.
|
||||
pub(super) trait Backend {
|
||||
/// Load a block entry from the DB.
|
||||
fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error>;
|
||||
/// Load the active-leaves set.
|
||||
fn load_leaves(&self) -> Result<LeafEntrySet, Error>;
|
||||
/// Load the stagnant list at the given timestamp.
|
||||
fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error>;
|
||||
/// Load all stagnant lists up to and including the given Unix timestamp
|
||||
/// in ascending order. Stop fetching stagnant entries upon reaching `max_elements`.
|
||||
fn load_stagnant_at_up_to(
|
||||
&self,
|
||||
up_to: Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<Vec<(Timestamp, Vec<Hash>)>, Error>;
|
||||
/// Load the earliest kept block number.
|
||||
fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error>;
|
||||
/// Load blocks by number.
|
||||
fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error>;
|
||||
|
||||
/// Atomically write the list of operations, with later operations taking precedence over prior.
|
||||
fn write<I>(&mut self, ops: I) -> Result<(), Error>
|
||||
where
|
||||
I: IntoIterator<Item = BackendWriteOp>;
|
||||
}
|
||||
|
||||
/// An in-memory overlay over the backend.
|
||||
///
|
||||
/// This maintains read-only access to the underlying backend, but can be
|
||||
/// converted into a set of write operations which will, when written to
|
||||
/// the underlying backend, give the same view as the state of the overlay.
|
||||
pub(super) struct OverlayedBackend<'a, B: 'a> {
|
||||
inner: &'a B,
|
||||
|
||||
// `None` means 'deleted', missing means query inner.
|
||||
block_entries: HashMap<Hash, Option<BlockEntry>>,
|
||||
// `None` means 'deleted', missing means query inner.
|
||||
blocks_by_number: HashMap<BlockNumber, Option<Vec<Hash>>>,
|
||||
// 'None' means 'deleted', missing means query inner.
|
||||
stagnant_at: HashMap<Timestamp, Option<Vec<Hash>>>,
|
||||
// 'None' means query inner.
|
||||
leaves: Option<LeafEntrySet>,
|
||||
}
|
||||
|
||||
impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
|
||||
pub(super) fn new(backend: &'a B) -> Self {
|
||||
OverlayedBackend {
|
||||
inner: backend,
|
||||
block_entries: HashMap::new(),
|
||||
blocks_by_number: HashMap::new(),
|
||||
stagnant_at: HashMap::new(),
|
||||
leaves: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error> {
|
||||
if let Some(val) = self.block_entries.get(&hash) {
|
||||
return Ok(val.clone());
|
||||
}
|
||||
|
||||
self.inner.load_block_entry(hash)
|
||||
}
|
||||
|
||||
pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> {
|
||||
if let Some(val) = self.blocks_by_number.get(&number) {
|
||||
return Ok(val.as_ref().map_or(Vec::new(), Clone::clone));
|
||||
}
|
||||
|
||||
self.inner.load_blocks_by_number(number)
|
||||
}
|
||||
|
||||
pub(super) fn load_leaves(&self) -> Result<LeafEntrySet, Error> {
|
||||
if let Some(ref set) = self.leaves {
|
||||
return Ok(set.clone());
|
||||
}
|
||||
|
||||
self.inner.load_leaves()
|
||||
}
|
||||
|
||||
pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error> {
|
||||
if let Some(val) = self.stagnant_at.get(×tamp) {
|
||||
return Ok(val.as_ref().map_or(Vec::new(), Clone::clone));
|
||||
}
|
||||
|
||||
self.inner.load_stagnant_at(timestamp)
|
||||
}
|
||||
|
||||
pub(super) fn write_block_entry(&mut self, entry: BlockEntry) {
|
||||
self.block_entries.insert(entry.block_hash, Some(entry));
|
||||
}
|
||||
|
||||
pub(super) fn delete_block_entry(&mut self, hash: &Hash) {
|
||||
self.block_entries.insert(*hash, None);
|
||||
}
|
||||
|
||||
pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec<Hash>) {
|
||||
if blocks.is_empty() {
|
||||
self.blocks_by_number.insert(number, None);
|
||||
} else {
|
||||
self.blocks_by_number.insert(number, Some(blocks));
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) {
|
||||
self.blocks_by_number.insert(number, None);
|
||||
}
|
||||
|
||||
pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) {
|
||||
self.leaves = Some(leaves);
|
||||
}
|
||||
|
||||
pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec<Hash>) {
|
||||
self.stagnant_at.insert(timestamp, Some(hashes));
|
||||
}
|
||||
|
||||
pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) {
|
||||
self.stagnant_at.insert(timestamp, None);
|
||||
}
|
||||
|
||||
/// Transform this backend into a set of write-ops to be written to the
|
||||
/// inner backend.
|
||||
pub(super) fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> {
|
||||
let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v {
|
||||
Some(v) => BackendWriteOp::WriteBlockEntry(v),
|
||||
None => BackendWriteOp::DeleteBlockEntry(h),
|
||||
});
|
||||
|
||||
let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v {
|
||||
Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v),
|
||||
None => BackendWriteOp::DeleteBlocksByNumber(n),
|
||||
});
|
||||
|
||||
let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves);
|
||||
|
||||
let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v {
|
||||
Some(v) => BackendWriteOp::WriteStagnantAt(n, v),
|
||||
None => BackendWriteOp::DeleteStagnantAt(n),
|
||||
});
|
||||
|
||||
block_entry_ops
|
||||
.chain(blocks_by_number_ops)
|
||||
.chain(leaf_ops)
|
||||
.chain(stagnant_at_ops)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to find the given ancestor in the chain with given head.
|
||||
///
|
||||
/// If the ancestor is the most recently finalized block, and the `head` is
|
||||
/// a known unfinalized block, this will return `true`.
|
||||
///
|
||||
/// If the ancestor is an unfinalized block and `head` is known, this will
|
||||
/// return true if `ancestor` is in `head`'s chain.
|
||||
///
|
||||
/// If the ancestor is an older finalized block, this will return `false`.
|
||||
fn contains_ancestor(backend: &impl Backend, head: Hash, ancestor: Hash) -> Result<bool, Error> {
|
||||
let mut current_hash = head;
|
||||
loop {
|
||||
if current_hash == ancestor {
|
||||
return Ok(true);
|
||||
}
|
||||
match backend.load_block_entry(¤t_hash)? {
|
||||
Some(e) => current_hash = e.parent_hash,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// This returns the best unfinalized leaf containing the required block.
|
||||
///
|
||||
/// If the required block is finalized but not the most recent finalized block,
|
||||
/// this will return `None`.
|
||||
///
|
||||
/// If the required block is unfinalized but not an ancestor of any viable leaf,
|
||||
/// this will return `None`.
|
||||
//
|
||||
// Note: this is O(N^2) in the depth of `required` and the number of leaves.
|
||||
// We expect the number of unfinalized blocks to be small, as in, to not exceed
|
||||
// single digits in practice, and exceedingly unlikely to surpass 1000.
|
||||
//
|
||||
// However, if we need to, we could implement some type of skip-list for
|
||||
// fast ancestry checks.
|
||||
pub(super) fn find_best_leaf_containing(
|
||||
backend: &impl Backend,
|
||||
required: Hash,
|
||||
) -> Result<Option<Hash>, Error> {
|
||||
let leaves = backend.load_leaves()?;
|
||||
for leaf in leaves.into_hashes_descending() {
|
||||
if contains_ancestor(backend, leaf, required)? {
|
||||
return Ok(Some(leaf));
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no viable leaves containing the ancestor
|
||||
Ok(None)
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! A database [`Backend`][crate::backend::Backend] for the chain selection subsystem.
|
||||
|
||||
pub(super) mod v1;
|
||||
@@ -0,0 +1,631 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! A database [`Backend`][crate::backend::Backend] for the chain selection subsystem.
|
||||
//!
|
||||
//! This stores the following schema:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! ("CS_block_entry", Hash) -> BlockEntry;
|
||||
//! ("CS_block_height", BigEndianBlockNumber) -> Vec<Hash>;
|
||||
//! ("CS_stagnant_at", BigEndianTimestamp) -> Vec<Hash>;
|
||||
//! ("CS_leaves") -> LeafEntrySet;
|
||||
//! ```
|
||||
//!
|
||||
//! The big-endian encoding is used for creating iterators over the key-value DB which are
|
||||
//! accessible by prefix, to find the earliest block number stored as well as the all stagnant
|
||||
//! blocks.
|
||||
//!
|
||||
//! The `Vec`s stored are always non-empty. Empty `Vec`s are not stored on disk so there is no
|
||||
//! semantic difference between `None` and an empty `Vec`.
|
||||
|
||||
use crate::{
|
||||
backend::{Backend, BackendWriteOp},
|
||||
Error,
|
||||
};
|
||||
|
||||
use pezkuwi_node_primitives::BlockWeight;
|
||||
use pezkuwi_primitives::{BlockNumber, Hash};
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
use pezkuwi_node_subsystem_util::database::{DBTransaction, Database};
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
const BLOCK_ENTRY_PREFIX: &[u8; 14] = b"CS_block_entry";
|
||||
const BLOCK_HEIGHT_PREFIX: &[u8; 15] = b"CS_block_height";
|
||||
const STAGNANT_AT_PREFIX: &[u8; 14] = b"CS_stagnant_at";
|
||||
const LEAVES_KEY: &[u8; 9] = b"CS_leaves";
|
||||
|
||||
type Timestamp = u64;
|
||||
|
||||
#[derive(Debug, Encode, Decode, Clone, PartialEq)]
|
||||
enum Approval {
|
||||
#[codec(index = 0)]
|
||||
Approved,
|
||||
#[codec(index = 1)]
|
||||
Unapproved,
|
||||
#[codec(index = 2)]
|
||||
Stagnant,
|
||||
}
|
||||
|
||||
impl From<crate::Approval> for Approval {
|
||||
fn from(x: crate::Approval) -> Self {
|
||||
match x {
|
||||
crate::Approval::Approved => Approval::Approved,
|
||||
crate::Approval::Unapproved => Approval::Unapproved,
|
||||
crate::Approval::Stagnant => Approval::Stagnant,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Approval> for crate::Approval {
|
||||
fn from(x: Approval) -> crate::Approval {
|
||||
match x {
|
||||
Approval::Approved => crate::Approval::Approved,
|
||||
Approval::Unapproved => crate::Approval::Unapproved,
|
||||
Approval::Stagnant => crate::Approval::Stagnant,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Encode, Decode, Clone, PartialEq)]
|
||||
struct ViabilityCriteria {
|
||||
explicitly_reverted: bool,
|
||||
approval: Approval,
|
||||
earliest_unviable_ancestor: Option<Hash>,
|
||||
}
|
||||
|
||||
impl From<crate::ViabilityCriteria> for ViabilityCriteria {
|
||||
fn from(x: crate::ViabilityCriteria) -> Self {
|
||||
ViabilityCriteria {
|
||||
explicitly_reverted: x.explicitly_reverted,
|
||||
approval: x.approval.into(),
|
||||
earliest_unviable_ancestor: x.earliest_unviable_ancestor,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ViabilityCriteria> for crate::ViabilityCriteria {
|
||||
fn from(x: ViabilityCriteria) -> crate::ViabilityCriteria {
|
||||
crate::ViabilityCriteria {
|
||||
explicitly_reverted: x.explicitly_reverted,
|
||||
approval: x.approval.into(),
|
||||
earliest_unviable_ancestor: x.earliest_unviable_ancestor,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
struct LeafEntry {
|
||||
weight: BlockWeight,
|
||||
block_number: BlockNumber,
|
||||
block_hash: Hash,
|
||||
}
|
||||
|
||||
impl From<crate::LeafEntry> for LeafEntry {
|
||||
fn from(x: crate::LeafEntry) -> Self {
|
||||
LeafEntry { weight: x.weight, block_number: x.block_number, block_hash: x.block_hash }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LeafEntry> for crate::LeafEntry {
|
||||
fn from(x: LeafEntry) -> crate::LeafEntry {
|
||||
crate::LeafEntry {
|
||||
weight: x.weight,
|
||||
block_number: x.block_number,
|
||||
block_hash: x.block_hash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode)]
|
||||
struct LeafEntrySet {
|
||||
inner: Vec<LeafEntry>,
|
||||
}
|
||||
|
||||
impl From<crate::LeafEntrySet> for LeafEntrySet {
|
||||
fn from(x: crate::LeafEntrySet) -> Self {
|
||||
LeafEntrySet { inner: x.inner.into_iter().map(Into::into).collect() }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LeafEntrySet> for crate::LeafEntrySet {
|
||||
fn from(x: LeafEntrySet) -> crate::LeafEntrySet {
|
||||
crate::LeafEntrySet { inner: x.inner.into_iter().map(Into::into).collect() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Encode, Decode, Clone, PartialEq)]
|
||||
struct BlockEntry {
|
||||
block_hash: Hash,
|
||||
block_number: BlockNumber,
|
||||
parent_hash: Hash,
|
||||
children: Vec<Hash>,
|
||||
viability: ViabilityCriteria,
|
||||
weight: BlockWeight,
|
||||
}
|
||||
|
||||
impl From<crate::BlockEntry> for BlockEntry {
|
||||
fn from(x: crate::BlockEntry) -> Self {
|
||||
BlockEntry {
|
||||
block_hash: x.block_hash,
|
||||
block_number: x.block_number,
|
||||
parent_hash: x.parent_hash,
|
||||
children: x.children,
|
||||
viability: x.viability.into(),
|
||||
weight: x.weight,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BlockEntry> for crate::BlockEntry {
|
||||
fn from(x: BlockEntry) -> crate::BlockEntry {
|
||||
crate::BlockEntry {
|
||||
block_hash: x.block_hash,
|
||||
block_number: x.block_number,
|
||||
parent_hash: x.parent_hash,
|
||||
children: x.children,
|
||||
viability: x.viability.into(),
|
||||
weight: x.weight,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the database backend.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Config {
|
||||
/// The column where block metadata is stored.
|
||||
pub col_data: u32,
|
||||
}
|
||||
|
||||
/// The database backend.
|
||||
pub struct DbBackend {
|
||||
inner: Arc<dyn Database>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl DbBackend {
|
||||
/// Create a new [`DbBackend`] with the supplied key-value store and
|
||||
/// config.
|
||||
pub fn new(db: Arc<dyn Database>, config: Config) -> Self {
|
||||
DbBackend { inner: db, config }
|
||||
}
|
||||
}
|
||||
|
||||
impl Backend for DbBackend {
|
||||
fn load_block_entry(&self, hash: &Hash) -> Result<Option<crate::BlockEntry>, Error> {
|
||||
load_decode::<BlockEntry>(&*self.inner, self.config.col_data, &block_entry_key(hash))
|
||||
.map(|o| o.map(Into::into))
|
||||
}
|
||||
|
||||
fn load_leaves(&self) -> Result<crate::LeafEntrySet, Error> {
|
||||
load_decode::<LeafEntrySet>(&*self.inner, self.config.col_data, LEAVES_KEY)
|
||||
.map(|o| o.map(Into::into).unwrap_or_default())
|
||||
}
|
||||
|
||||
fn load_stagnant_at(&self, timestamp: crate::Timestamp) -> Result<Vec<Hash>, Error> {
|
||||
load_decode::<Vec<Hash>>(
|
||||
&*self.inner,
|
||||
self.config.col_data,
|
||||
&stagnant_at_key(timestamp.into()),
|
||||
)
|
||||
.map(|o| o.unwrap_or_default())
|
||||
}
|
||||
|
||||
fn load_stagnant_at_up_to(
|
||||
&self,
|
||||
up_to: crate::Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<Vec<(crate::Timestamp, Vec<Hash>)>, Error> {
|
||||
let stagnant_at_iter =
|
||||
self.inner.iter_with_prefix(self.config.col_data, &STAGNANT_AT_PREFIX[..]);
|
||||
|
||||
let val = stagnant_at_iter
|
||||
.filter_map(|r| match r {
|
||||
Ok((k, v)) => {
|
||||
match (decode_stagnant_at_key(&mut &k[..]), <Vec<_>>::decode(&mut &v[..]).ok())
|
||||
{
|
||||
(Some(at), Some(stagnant_at)) => Some(Ok((at, stagnant_at))),
|
||||
_ => None,
|
||||
}
|
||||
},
|
||||
Err(e) => Some(Err(e)),
|
||||
})
|
||||
.enumerate()
|
||||
.take_while(|(idx, r)| {
|
||||
r.as_ref().map_or(true, |(at, _)| *at <= up_to.into() && *idx < max_elements)
|
||||
})
|
||||
.map(|(_, v)| v)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error> {
|
||||
let blocks_at_height_iter =
|
||||
self.inner.iter_with_prefix(self.config.col_data, &BLOCK_HEIGHT_PREFIX[..]);
|
||||
|
||||
let val = blocks_at_height_iter
|
||||
.filter_map(|r| match r {
|
||||
Ok((k, _)) => decode_block_height_key(&k[..]).map(Ok),
|
||||
Err(e) => Some(Err(e)),
|
||||
})
|
||||
.next();
|
||||
|
||||
val.transpose().map_err(Error::from)
|
||||
}
|
||||
|
||||
fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> {
|
||||
load_decode::<Vec<Hash>>(&*self.inner, self.config.col_data, &block_height_key(number))
|
||||
.map(|o| o.unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Atomically write the list of operations, with later operations taking precedence over prior.
|
||||
fn write<I>(&mut self, ops: I) -> Result<(), Error>
|
||||
where
|
||||
I: IntoIterator<Item = BackendWriteOp>,
|
||||
{
|
||||
let mut tx = DBTransaction::new();
|
||||
for op in ops {
|
||||
match op {
|
||||
BackendWriteOp::WriteBlockEntry(block_entry) => {
|
||||
let block_entry: BlockEntry = block_entry.into();
|
||||
tx.put_vec(
|
||||
self.config.col_data,
|
||||
&block_entry_key(&block_entry.block_hash),
|
||||
block_entry.encode(),
|
||||
);
|
||||
},
|
||||
BackendWriteOp::WriteBlocksByNumber(block_number, v) =>
|
||||
if v.is_empty() {
|
||||
tx.delete(self.config.col_data, &block_height_key(block_number));
|
||||
} else {
|
||||
tx.put_vec(
|
||||
self.config.col_data,
|
||||
&block_height_key(block_number),
|
||||
v.encode(),
|
||||
);
|
||||
},
|
||||
BackendWriteOp::WriteViableLeaves(leaves) => {
|
||||
let leaves: LeafEntrySet = leaves.into();
|
||||
if leaves.inner.is_empty() {
|
||||
tx.delete(self.config.col_data, &LEAVES_KEY[..]);
|
||||
} else {
|
||||
tx.put_vec(self.config.col_data, &LEAVES_KEY[..], leaves.encode());
|
||||
}
|
||||
},
|
||||
BackendWriteOp::WriteStagnantAt(timestamp, stagnant_at) => {
|
||||
let timestamp: Timestamp = timestamp.into();
|
||||
if stagnant_at.is_empty() {
|
||||
tx.delete(self.config.col_data, &stagnant_at_key(timestamp));
|
||||
} else {
|
||||
tx.put_vec(
|
||||
self.config.col_data,
|
||||
&stagnant_at_key(timestamp),
|
||||
stagnant_at.encode(),
|
||||
);
|
||||
}
|
||||
},
|
||||
BackendWriteOp::DeleteBlocksByNumber(block_number) => {
|
||||
tx.delete(self.config.col_data, &block_height_key(block_number));
|
||||
},
|
||||
BackendWriteOp::DeleteBlockEntry(hash) => {
|
||||
tx.delete(self.config.col_data, &block_entry_key(&hash));
|
||||
},
|
||||
BackendWriteOp::DeleteStagnantAt(timestamp) => {
|
||||
let timestamp: Timestamp = timestamp.into();
|
||||
tx.delete(self.config.col_data, &stagnant_at_key(timestamp));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
self.inner.write(tx).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_decode<D: Decode>(
|
||||
db: &dyn Database,
|
||||
col_data: u32,
|
||||
key: &[u8],
|
||||
) -> Result<Option<D>, Error> {
|
||||
match db.get(col_data, key)? {
|
||||
None => Ok(None),
|
||||
Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into),
|
||||
}
|
||||
}
|
||||
|
||||
fn block_entry_key(hash: &Hash) -> [u8; 14 + 32] {
|
||||
let mut key = [0; 14 + 32];
|
||||
key[..14].copy_from_slice(BLOCK_ENTRY_PREFIX);
|
||||
hash.using_encoded(|s| key[14..].copy_from_slice(s));
|
||||
key
|
||||
}
|
||||
|
||||
fn block_height_key(number: BlockNumber) -> [u8; 15 + 4] {
|
||||
let mut key = [0; 15 + 4];
|
||||
key[..15].copy_from_slice(BLOCK_HEIGHT_PREFIX);
|
||||
key[15..].copy_from_slice(&number.to_be_bytes());
|
||||
key
|
||||
}
|
||||
|
||||
fn stagnant_at_key(timestamp: Timestamp) -> [u8; 14 + 8] {
|
||||
let mut key = [0; 14 + 8];
|
||||
key[..14].copy_from_slice(STAGNANT_AT_PREFIX);
|
||||
key[14..].copy_from_slice(×tamp.to_be_bytes());
|
||||
key
|
||||
}
|
||||
|
||||
fn decode_block_height_key(key: &[u8]) -> Option<BlockNumber> {
|
||||
if key.len() != 15 + 4 {
|
||||
return None;
|
||||
}
|
||||
if !key.starts_with(BLOCK_HEIGHT_PREFIX) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut bytes = [0; 4];
|
||||
bytes.copy_from_slice(&key[15..]);
|
||||
Some(BlockNumber::from_be_bytes(bytes))
|
||||
}
|
||||
|
||||
fn decode_stagnant_at_key(key: &[u8]) -> Option<Timestamp> {
|
||||
if key.len() != 14 + 8 {
|
||||
return None;
|
||||
}
|
||||
if !key.starts_with(STAGNANT_AT_PREFIX) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut bytes = [0; 8];
|
||||
bytes.copy_from_slice(&key[14..]);
|
||||
Some(Timestamp::from_be_bytes(bytes))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[cfg(test)]
|
||||
fn test_db() -> Arc<dyn Database> {
|
||||
let db = kvdb_memorydb::create(1);
|
||||
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]);
|
||||
Arc::new(db)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_height_key_decodes() {
|
||||
let key = block_height_key(5);
|
||||
assert_eq!(decode_block_height_key(&key), Some(5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stagnant_at_key_decodes() {
|
||||
let key = stagnant_at_key(5);
|
||||
assert_eq!(decode_stagnant_at_key(&key), Some(5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lower_block_height_key_lesser() {
|
||||
for i in 0..256 {
|
||||
for j in 1..=256 {
|
||||
let key_a = block_height_key(i);
|
||||
let key_b = block_height_key(i + j);
|
||||
|
||||
assert!(key_a < key_b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lower_stagnant_at_key_lesser() {
|
||||
for i in 0..256 {
|
||||
for j in 1..=256 {
|
||||
let key_a = stagnant_at_key(i);
|
||||
let key_b = stagnant_at_key(i + j);
|
||||
|
||||
assert!(key_a < key_b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_read_block_entry() {
|
||||
let db = test_db();
|
||||
let config = Config { col_data: 0 };
|
||||
|
||||
let mut backend = DbBackend::new(db, config);
|
||||
|
||||
let block_entry = BlockEntry {
|
||||
block_hash: Hash::repeat_byte(1),
|
||||
block_number: 1,
|
||||
parent_hash: Hash::repeat_byte(0),
|
||||
children: vec![],
|
||||
viability: ViabilityCriteria {
|
||||
earliest_unviable_ancestor: None,
|
||||
explicitly_reverted: false,
|
||||
approval: Approval::Unapproved,
|
||||
},
|
||||
weight: 100,
|
||||
};
|
||||
|
||||
backend
|
||||
.write(vec![BackendWriteOp::WriteBlockEntry(block_entry.clone().into())])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
backend.load_block_entry(&block_entry.block_hash).unwrap().map(BlockEntry::from),
|
||||
Some(block_entry),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_block_entry() {
|
||||
let db = test_db();
|
||||
let config = Config { col_data: 0 };
|
||||
|
||||
let mut backend = DbBackend::new(db, config);
|
||||
|
||||
let block_entry = BlockEntry {
|
||||
block_hash: Hash::repeat_byte(1),
|
||||
block_number: 1,
|
||||
parent_hash: Hash::repeat_byte(0),
|
||||
children: vec![],
|
||||
viability: ViabilityCriteria {
|
||||
earliest_unviable_ancestor: None,
|
||||
explicitly_reverted: false,
|
||||
approval: Approval::Unapproved,
|
||||
},
|
||||
weight: 100,
|
||||
};
|
||||
|
||||
backend
|
||||
.write(vec![BackendWriteOp::WriteBlockEntry(block_entry.clone().into())])
|
||||
.unwrap();
|
||||
|
||||
backend
|
||||
.write(vec![BackendWriteOp::DeleteBlockEntry(block_entry.block_hash)])
|
||||
.unwrap();
|
||||
|
||||
assert!(backend.load_block_entry(&block_entry.block_hash).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn earliest_block_number() {
|
||||
let db = test_db();
|
||||
let config = Config { col_data: 0 };
|
||||
|
||||
let mut backend = DbBackend::new(db, config);
|
||||
|
||||
assert!(backend.load_first_block_number().unwrap().is_none());
|
||||
|
||||
backend
|
||||
.write(vec![
|
||||
BackendWriteOp::WriteBlocksByNumber(2, vec![Hash::repeat_byte(0)]),
|
||||
BackendWriteOp::WriteBlocksByNumber(5, vec![Hash::repeat_byte(0)]),
|
||||
BackendWriteOp::WriteBlocksByNumber(10, vec![Hash::repeat_byte(0)]),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(backend.load_first_block_number().unwrap(), Some(2));
|
||||
|
||||
backend
|
||||
.write(vec![
|
||||
BackendWriteOp::WriteBlocksByNumber(2, vec![]),
|
||||
BackendWriteOp::DeleteBlocksByNumber(5),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(backend.load_first_block_number().unwrap(), Some(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stagnant_at_up_to() {
|
||||
let db = test_db();
|
||||
let config = Config { col_data: 0 };
|
||||
|
||||
let mut backend = DbBackend::new(db, config);
|
||||
|
||||
// Prove that it's cheap
|
||||
assert!(backend
|
||||
.load_stagnant_at_up_to(Timestamp::max_value(), usize::MAX)
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
|
||||
backend
|
||||
.write(vec![
|
||||
BackendWriteOp::WriteStagnantAt(2, vec![Hash::repeat_byte(1)]),
|
||||
BackendWriteOp::WriteStagnantAt(5, vec![Hash::repeat_byte(2)]),
|
||||
BackendWriteOp::WriteStagnantAt(10, vec![Hash::repeat_byte(3)]),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(Timestamp::max_value(), usize::MAX).unwrap(),
|
||||
vec![
|
||||
(2, vec![Hash::repeat_byte(1)]),
|
||||
(5, vec![Hash::repeat_byte(2)]),
|
||||
(10, vec![Hash::repeat_byte(3)]),
|
||||
]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(10, usize::MAX).unwrap(),
|
||||
vec![
|
||||
(2, vec![Hash::repeat_byte(1)]),
|
||||
(5, vec![Hash::repeat_byte(2)]),
|
||||
(10, vec![Hash::repeat_byte(3)]),
|
||||
]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(9, usize::MAX).unwrap(),
|
||||
vec![(2, vec![Hash::repeat_byte(1)]), (5, vec![Hash::repeat_byte(2)]),]
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(9, 1).unwrap(),
|
||||
vec![(2, vec![Hash::repeat_byte(1)]),]
|
||||
);
|
||||
|
||||
backend.write(vec![BackendWriteOp::DeleteStagnantAt(2)]).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(5, usize::MAX).unwrap(),
|
||||
vec![(5, vec![Hash::repeat_byte(2)]),]
|
||||
);
|
||||
|
||||
backend.write(vec![BackendWriteOp::WriteStagnantAt(5, vec![])]).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
backend.load_stagnant_at_up_to(10, usize::MAX).unwrap(),
|
||||
vec![(10, vec![Hash::repeat_byte(3)]),]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_read_blocks_at_height() {
|
||||
let db = test_db();
|
||||
let config = Config { col_data: 0 };
|
||||
|
||||
let mut backend = DbBackend::new(db, config);
|
||||
|
||||
backend
|
||||
.write(vec![
|
||||
BackendWriteOp::WriteBlocksByNumber(2, vec![Hash::repeat_byte(1)]),
|
||||
BackendWriteOp::WriteBlocksByNumber(5, vec![Hash::repeat_byte(2)]),
|
||||
BackendWriteOp::WriteBlocksByNumber(10, vec![Hash::repeat_byte(3)]),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(backend.load_blocks_by_number(2).unwrap(), vec![Hash::repeat_byte(1)]);
|
||||
|
||||
assert_eq!(backend.load_blocks_by_number(3).unwrap(), vec![]);
|
||||
|
||||
backend
|
||||
.write(vec![
|
||||
BackendWriteOp::WriteBlocksByNumber(2, vec![]),
|
||||
BackendWriteOp::DeleteBlocksByNumber(5),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(backend.load_blocks_by_number(2).unwrap(), vec![]);
|
||||
|
||||
assert_eq!(backend.load_blocks_by_number(5).unwrap(), vec![]);
|
||||
|
||||
assert_eq!(backend.load_blocks_by_number(10).unwrap(), vec![Hash::repeat_byte(3)]);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,743 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Implements the Chain Selection Subsystem.
|
||||
|
||||
use pezkuwi_node_primitives::BlockWeight;
|
||||
use pezkuwi_node_subsystem::{
|
||||
errors::ChainApiError,
|
||||
messages::{ChainApiMessage, ChainSelectionMessage},
|
||||
overseer::{self, SubsystemSender},
|
||||
FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use pezkuwi_node_subsystem_util::database::Database;
|
||||
use pezkuwi_primitives::{BlockNumber, ConsensusLog, Hash, Header};
|
||||
|
||||
use codec::Error as CodecError;
|
||||
use futures::{channel::oneshot, future::Either, prelude::*};
|
||||
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use crate::backend::{Backend, BackendWriteOp, OverlayedBackend};
|
||||
|
||||
mod backend;
|
||||
mod db_backend;
|
||||
mod tree;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
const LOG_TARGET: &str = "teyrchain::chain-selection";
|
||||
/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS
|
||||
/// reboots.
|
||||
type Timestamp = u64;
|
||||
|
||||
// If a block isn't approved in 120 seconds, nodes will abandon it
|
||||
// and begin building on another chain.
|
||||
const STAGNANT_TIMEOUT: Timestamp = 120;
|
||||
// Delay pruning of the stagnant keys in prune only mode by 25 hours to avoid interception with the
|
||||
// finality
|
||||
const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60;
|
||||
// Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration
|
||||
const MAX_STAGNANT_ENTRIES: usize = 1000;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum Approval {
|
||||
// Approved
|
||||
Approved,
|
||||
// Unapproved but not stagnant
|
||||
Unapproved,
|
||||
// Unapproved and stagnant.
|
||||
Stagnant,
|
||||
}
|
||||
|
||||
impl Approval {
|
||||
fn is_stagnant(&self) -> bool {
|
||||
matches!(*self, Approval::Stagnant)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ViabilityCriteria {
|
||||
// Whether this block has been explicitly reverted by one of its descendants.
|
||||
explicitly_reverted: bool,
|
||||
// The approval state of this block specifically.
|
||||
approval: Approval,
|
||||
// The earliest unviable ancestor - the hash of the earliest unfinalized
|
||||
// block in the ancestry which is explicitly reverted or stagnant.
|
||||
earliest_unviable_ancestor: Option<Hash>,
|
||||
}
|
||||
|
||||
impl ViabilityCriteria {
|
||||
fn is_viable(&self) -> bool {
|
||||
self.is_parent_viable() && self.is_explicitly_viable()
|
||||
}
|
||||
|
||||
// Whether the current block is explicitly viable.
|
||||
// That is, whether the current block is neither reverted nor stagnant.
|
||||
fn is_explicitly_viable(&self) -> bool {
|
||||
!self.explicitly_reverted && !self.approval.is_stagnant()
|
||||
}
|
||||
|
||||
// Whether the parent is viable. This assumes that the parent
|
||||
// descends from the finalized chain.
|
||||
fn is_parent_viable(&self) -> bool {
|
||||
self.earliest_unviable_ancestor.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
// Light entries describing leaves of the chain.
|
||||
//
|
||||
// These are ordered first by weight and then by block number.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
struct LeafEntry {
|
||||
weight: BlockWeight,
|
||||
block_number: BlockNumber,
|
||||
block_hash: Hash,
|
||||
}
|
||||
|
||||
impl PartialOrd for LeafEntry {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
let ord = self.weight.cmp(&other.weight).then(self.block_number.cmp(&other.block_number));
|
||||
|
||||
if !matches!(ord, std::cmp::Ordering::Equal) {
|
||||
Some(ord)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct LeafEntrySet {
|
||||
inner: Vec<LeafEntry>,
|
||||
}
|
||||
|
||||
impl LeafEntrySet {
|
||||
fn remove(&mut self, hash: &Hash) -> bool {
|
||||
match self.inner.iter().position(|e| &e.block_hash == hash) {
|
||||
None => false,
|
||||
Some(i) => {
|
||||
self.inner.remove(i);
|
||||
true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn insert(&mut self, new: LeafEntry) {
|
||||
let mut pos = None;
|
||||
for (i, e) in self.inner.iter().enumerate() {
|
||||
if e == &new {
|
||||
return;
|
||||
}
|
||||
if e < &new {
|
||||
pos = Some(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
match pos {
|
||||
None => self.inner.push(new),
|
||||
Some(i) => self.inner.insert(i, new),
|
||||
}
|
||||
}
|
||||
|
||||
fn into_hashes_descending(self) -> impl Iterator<Item = Hash> {
|
||||
self.inner.into_iter().map(|e| e.block_hash)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct BlockEntry {
|
||||
block_hash: Hash,
|
||||
block_number: BlockNumber,
|
||||
parent_hash: Hash,
|
||||
children: Vec<Hash>,
|
||||
viability: ViabilityCriteria,
|
||||
weight: BlockWeight,
|
||||
}
|
||||
|
||||
impl BlockEntry {
|
||||
fn leaf_entry(&self) -> LeafEntry {
|
||||
LeafEntry {
|
||||
block_hash: self.block_hash,
|
||||
block_number: self.block_number,
|
||||
weight: self.weight,
|
||||
}
|
||||
}
|
||||
|
||||
fn non_viable_ancestor_for_child(&self) -> Option<Hash> {
|
||||
if self.viability.is_viable() {
|
||||
None
|
||||
} else {
|
||||
self.viability.earliest_unviable_ancestor.or(Some(self.block_hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
ChainApi(#[from] ChainApiError),
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Oneshot(#[from] oneshot::Canceled),
|
||||
|
||||
#[error(transparent)]
|
||||
Subsystem(#[from] SubsystemError),
|
||||
|
||||
#[error(transparent)]
|
||||
Codec(#[from] CodecError),
|
||||
}
|
||||
|
||||
impl Error {
|
||||
fn trace(&self) {
|
||||
match self {
|
||||
// don't spam the log with spurious errors
|
||||
Self::Oneshot(_) => gum::debug!(target: LOG_TARGET, err = ?self),
|
||||
// it's worth reporting otherwise
|
||||
_ => gum::warn!(target: LOG_TARGET, err = ?self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A clock used for fetching the current timestamp.
|
||||
pub trait Clock {
|
||||
/// Get the current timestamp.
|
||||
fn timestamp_now(&self) -> Timestamp;
|
||||
}
|
||||
|
||||
struct SystemClock;
|
||||
|
||||
impl Clock for SystemClock {
|
||||
fn timestamp_now(&self) -> Timestamp {
|
||||
// `SystemTime` is notoriously non-monotonic, so our timers might not work
|
||||
// exactly as expected. Regardless, stagnation is detected on the order of minutes,
|
||||
// and slippage of a few seconds in either direction won't cause any major harm.
|
||||
//
|
||||
// The exact time that a block becomes stagnant in the local node is always expected
|
||||
// to differ from other nodes due to network asynchrony and delays in block propagation.
|
||||
// Non-monotonicity exacerbates that somewhat, but not meaningfully.
|
||||
|
||||
match SystemTime::now().duration_since(UNIX_EPOCH) {
|
||||
Ok(d) => d.as_secs(),
|
||||
Err(e) => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
err = ?e,
|
||||
"Current time is before unix epoch. Validation will not work correctly."
|
||||
);
|
||||
|
||||
0
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The interval, in seconds to check for stagnant blocks.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StagnantCheckInterval(Option<Duration>);
|
||||
|
||||
impl Default for StagnantCheckInterval {
|
||||
fn default() -> Self {
|
||||
// 5 seconds is a reasonable balance between avoiding DB reads and
|
||||
// ensuring validators are generally in agreement on stagnant blocks.
|
||||
//
|
||||
// Assuming a network delay of D, the longest difference in view possible
|
||||
// between 2 validators is D + 5s.
|
||||
const DEFAULT_STAGNANT_CHECK_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
StagnantCheckInterval(Some(DEFAULT_STAGNANT_CHECK_INTERVAL))
|
||||
}
|
||||
}
|
||||
|
||||
impl StagnantCheckInterval {
|
||||
/// Create a new stagnant-check interval wrapping the given duration.
|
||||
pub fn new(interval: Duration) -> Self {
|
||||
StagnantCheckInterval(Some(interval))
|
||||
}
|
||||
|
||||
/// Create a `StagnantCheckInterval` which never triggers.
|
||||
pub fn never() -> Self {
|
||||
StagnantCheckInterval(None)
|
||||
}
|
||||
|
||||
fn timeout_stream(&self) -> impl Stream<Item = ()> {
|
||||
match self.0 {
|
||||
Some(interval) => Either::Left({
|
||||
let mut delay = futures_timer::Delay::new(interval);
|
||||
|
||||
futures::stream::poll_fn(move |cx| {
|
||||
let poll = delay.poll_unpin(cx);
|
||||
if poll.is_ready() {
|
||||
delay.reset(interval)
|
||||
}
|
||||
|
||||
poll.map(Some)
|
||||
})
|
||||
}),
|
||||
None => Either::Right(futures::stream::pending()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Mode of the stagnant check operations: check and prune or prune only
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum StagnantCheckMode {
|
||||
CheckAndPrune,
|
||||
PruneOnly,
|
||||
}
|
||||
|
||||
impl Default for StagnantCheckMode {
|
||||
fn default() -> Self {
|
||||
StagnantCheckMode::PruneOnly
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the chain selection subsystem.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
/// The column in the database that the storage should use.
|
||||
pub col_data: u32,
|
||||
/// How often to check for stagnant blocks.
|
||||
pub stagnant_check_interval: StagnantCheckInterval,
|
||||
/// Mode of stagnant checks
|
||||
pub stagnant_check_mode: StagnantCheckMode,
|
||||
}
|
||||
|
||||
/// The chain selection subsystem.
|
||||
pub struct ChainSelectionSubsystem {
|
||||
config: Config,
|
||||
db: Arc<dyn Database>,
|
||||
}
|
||||
|
||||
impl ChainSelectionSubsystem {
|
||||
/// Create a new instance of the subsystem with the given config
|
||||
/// and key-value store.
|
||||
pub fn new(config: Config, db: Arc<dyn Database>) -> Self {
|
||||
ChainSelectionSubsystem { config, db }
|
||||
}
|
||||
|
||||
/// Revert to the block corresponding to the specified `hash`.
|
||||
/// The operation is not allowed for blocks older than the last finalized one.
|
||||
pub fn revert_to(&self, hash: Hash) -> Result<(), Error> {
|
||||
let config = db_backend::v1::Config { col_data: self.config.col_data };
|
||||
let mut backend = db_backend::v1::DbBackend::new(self.db.clone(), config);
|
||||
|
||||
let ops = tree::revert_to(&backend, hash)?.into_write_ops();
|
||||
|
||||
backend.write(ops)
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::subsystem(ChainSelection, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> ChainSelectionSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let backend = db_backend::v1::DbBackend::new(
|
||||
self.db,
|
||||
db_backend::v1::Config { col_data: self.config.col_data },
|
||||
);
|
||||
|
||||
SpawnedSubsystem {
|
||||
future: run(
|
||||
ctx,
|
||||
backend,
|
||||
self.config.stagnant_check_interval,
|
||||
self.config.stagnant_check_mode,
|
||||
Box::new(SystemClock),
|
||||
)
|
||||
.map(Ok)
|
||||
.boxed(),
|
||||
name: "chain-selection-subsystem",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ChainSelection, prefix = self::overseer)]
|
||||
async fn run<Context, B>(
|
||||
mut ctx: Context,
|
||||
mut backend: B,
|
||||
stagnant_check_interval: StagnantCheckInterval,
|
||||
stagnant_check_mode: StagnantCheckMode,
|
||||
clock: Box<dyn Clock + Send + Sync>,
|
||||
) where
|
||||
B: Backend,
|
||||
{
|
||||
#![allow(clippy::all)]
|
||||
loop {
|
||||
let res = run_until_error(
|
||||
&mut ctx,
|
||||
&mut backend,
|
||||
&stagnant_check_interval,
|
||||
&stagnant_check_mode,
|
||||
&*clock,
|
||||
)
|
||||
.await;
|
||||
match res {
|
||||
Err(e) => {
|
||||
e.trace();
|
||||
// All errors are considered fatal right now:
|
||||
break;
|
||||
},
|
||||
Ok(()) => {
|
||||
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the subsystem until an error is encountered or a `conclude` signal is received.
|
||||
// Most errors are non-fatal and should lead to another call to this function.
|
||||
//
|
||||
// A return value of `Ok` indicates that an exit should be made, while non-fatal errors
|
||||
// lead to another call to this function.
|
||||
#[overseer::contextbounds(ChainSelection, prefix = self::overseer)]
|
||||
async fn run_until_error<Context, B>(
|
||||
ctx: &mut Context,
|
||||
backend: &mut B,
|
||||
stagnant_check_interval: &StagnantCheckInterval,
|
||||
stagnant_check_mode: &StagnantCheckMode,
|
||||
clock: &(dyn Clock + Sync),
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
B: Backend,
|
||||
{
|
||||
let mut stagnant_check_stream = stagnant_check_interval.timeout_stream();
|
||||
loop {
|
||||
futures::select! {
|
||||
msg = ctx.recv().fuse() => {
|
||||
let msg = msg?;
|
||||
match msg {
|
||||
FromOrchestra::Signal(OverseerSignal::Conclude) => {
|
||||
return Ok(())
|
||||
}
|
||||
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
|
||||
if let Some(leaf) = update.activated {
|
||||
let write_ops = handle_active_leaf(
|
||||
ctx.sender(),
|
||||
&*backend,
|
||||
clock.timestamp_now() + STAGNANT_TIMEOUT,
|
||||
leaf.hash,
|
||||
).await?;
|
||||
|
||||
backend.write(write_ops)?;
|
||||
}
|
||||
}
|
||||
FromOrchestra::Signal(OverseerSignal::BlockFinalized(h, n)) => {
|
||||
handle_finalized_block(backend, h, n)?
|
||||
}
|
||||
FromOrchestra::Communication { msg } => match msg {
|
||||
ChainSelectionMessage::Approved(hash) => {
|
||||
handle_approved_block(backend, hash)?
|
||||
}
|
||||
ChainSelectionMessage::Leaves(tx) => {
|
||||
let leaves = load_leaves(ctx.sender(), &*backend).await?;
|
||||
let _ = tx.send(leaves);
|
||||
}
|
||||
ChainSelectionMessage::BestLeafContaining(required, tx) => {
|
||||
let best_containing = backend::find_best_leaf_containing(
|
||||
&*backend,
|
||||
required,
|
||||
)?;
|
||||
|
||||
// note - this may be none if the finalized block is
|
||||
// a leaf. this is fine according to the expected usage of the
|
||||
// function. `None` responses should just `unwrap_or(required)`,
|
||||
// so if the required block is the finalized block, then voilá.
|
||||
|
||||
let _ = tx.send(best_containing);
|
||||
}
|
||||
ChainSelectionMessage::RevertBlocks(blocks_to_revert) => {
|
||||
let write_ops = handle_revert_blocks(backend, blocks_to_revert)?;
|
||||
backend.write(write_ops)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = stagnant_check_stream.next().fuse() => {
|
||||
match stagnant_check_mode {
|
||||
StagnantCheckMode::CheckAndPrune => detect_stagnant(backend, clock.timestamp_now(), MAX_STAGNANT_ENTRIES),
|
||||
StagnantCheckMode::PruneOnly => {
|
||||
let now_timestamp = clock.timestamp_now();
|
||||
prune_only_stagnant(backend, now_timestamp - STAGNANT_PRUNE_DELAY, MAX_STAGNANT_ENTRIES)
|
||||
},
|
||||
}?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_finalized(
|
||||
sender: &mut impl SubsystemSender<ChainApiMessage>,
|
||||
) -> Result<Option<(Hash, BlockNumber)>, Error> {
|
||||
let (number_tx, number_rx) = oneshot::channel();
|
||||
|
||||
sender.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx)).await;
|
||||
|
||||
let number = match number_rx.await? {
|
||||
Ok(number) => number,
|
||||
Err(err) => {
|
||||
gum::warn!(target: LOG_TARGET, ?err, "Fetching finalized number failed");
|
||||
return Ok(None);
|
||||
},
|
||||
};
|
||||
|
||||
let (hash_tx, hash_rx) = oneshot::channel();
|
||||
|
||||
sender.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx)).await;
|
||||
|
||||
match hash_rx.await? {
|
||||
Err(err) => {
|
||||
gum::warn!(target: LOG_TARGET, number, ?err, "Fetching finalized block number failed");
|
||||
Ok(None)
|
||||
},
|
||||
Ok(None) => {
|
||||
gum::warn!(target: LOG_TARGET, number, "Missing hash for finalized block number");
|
||||
Ok(None)
|
||||
},
|
||||
Ok(Some(h)) => Ok(Some((h, number))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_header(
|
||||
sender: &mut impl SubsystemSender<ChainApiMessage>,
|
||||
hash: Hash,
|
||||
) -> Result<Option<Header>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send_message(ChainApiMessage::BlockHeader(hash, tx)).await;
|
||||
|
||||
Ok(rx.await?.unwrap_or_else(|err| {
|
||||
gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
|
||||
None
|
||||
}))
|
||||
}
|
||||
|
||||
async fn fetch_block_weight(
|
||||
sender: &mut impl overseer::SubsystemSender<ChainApiMessage>,
|
||||
hash: Hash,
|
||||
) -> Result<Option<BlockWeight>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send_message(ChainApiMessage::BlockWeight(hash, tx)).await;
|
||||
|
||||
let res = rx.await?;
|
||||
|
||||
Ok(res.unwrap_or_else(|err| {
|
||||
gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
|
||||
None
|
||||
}))
|
||||
}
|
||||
|
||||
// Handle a new active leaf.
|
||||
async fn handle_active_leaf(
|
||||
sender: &mut impl overseer::ChainSelectionSenderTrait,
|
||||
backend: &impl Backend,
|
||||
stagnant_at: Timestamp,
|
||||
hash: Hash,
|
||||
) -> Result<Vec<BackendWriteOp>, Error> {
|
||||
let lower_bound = match backend.load_first_block_number()? {
|
||||
Some(l) => {
|
||||
// We want to iterate back to finalized, and first block number
|
||||
// is assumed to be 1 above finalized - the implicit root of the
|
||||
// tree.
|
||||
l.saturating_sub(1)
|
||||
},
|
||||
None => fetch_finalized(sender).await?.map_or(1, |(_, n)| n),
|
||||
};
|
||||
|
||||
let header = match fetch_header(sender, hash).await? {
|
||||
None => {
|
||||
gum::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
|
||||
return Ok(Vec::new());
|
||||
},
|
||||
Some(h) => h,
|
||||
};
|
||||
|
||||
let new_blocks = pezkuwi_node_subsystem_util::determine_new_blocks(
|
||||
sender,
|
||||
|h| backend.load_block_entry(h).map(|b| b.is_some()),
|
||||
hash,
|
||||
&header,
|
||||
lower_bound,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut overlay = OverlayedBackend::new(backend);
|
||||
|
||||
// determine_new_blocks gives blocks in descending order.
|
||||
// for this, we want ascending order.
|
||||
for (hash, header) in new_blocks.into_iter().rev() {
|
||||
let weight = match fetch_block_weight(sender, hash).await? {
|
||||
None => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
?hash,
|
||||
"Missing block weight for new head. Skipping chain.",
|
||||
);
|
||||
|
||||
// If we don't know the weight, we can't import the block.
|
||||
// And none of its descendants either.
|
||||
break;
|
||||
},
|
||||
Some(w) => w,
|
||||
};
|
||||
|
||||
let reversion_logs = extract_reversion_logs(&header);
|
||||
tree::import_block(
|
||||
&mut overlay,
|
||||
hash,
|
||||
header.number,
|
||||
header.parent_hash,
|
||||
reversion_logs,
|
||||
weight,
|
||||
stagnant_at,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(overlay.into_write_ops().collect())
|
||||
}
|
||||
|
||||
// Extract all reversion logs from a header in ascending order.
|
||||
//
|
||||
// Ignores logs with number > the block header number.
|
||||
fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
|
||||
let number = header.number;
|
||||
let mut logs = header
|
||||
.digest
|
||||
.logs()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) {
|
||||
Err(e) => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
err = ?e,
|
||||
index = i,
|
||||
block_hash = ?header.hash(),
|
||||
"Digest item failed to encode"
|
||||
);
|
||||
|
||||
None
|
||||
},
|
||||
Ok(Some(ConsensusLog::Revert(b))) if b <= number => Some(b),
|
||||
Ok(Some(ConsensusLog::Revert(b))) => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
revert_target = b,
|
||||
block_number = number,
|
||||
block_hash = ?header.hash(),
|
||||
"Block issued invalid revert digest targeting future"
|
||||
);
|
||||
|
||||
None
|
||||
},
|
||||
Ok(_) => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
logs.sort();
|
||||
|
||||
logs
|
||||
}
|
||||
|
||||
/// Handle a finalized block event.
|
||||
fn handle_finalized_block(
|
||||
backend: &mut impl Backend,
|
||||
finalized_hash: Hash,
|
||||
finalized_number: BlockNumber,
|
||||
) -> Result<(), Error> {
|
||||
let ops = tree::finalize_block(&*backend, finalized_hash, finalized_number)?.into_write_ops();
|
||||
|
||||
backend.write(ops)
|
||||
}
|
||||
|
||||
// Handle an approved block event.
|
||||
fn handle_approved_block(backend: &mut impl Backend, approved_block: Hash) -> Result<(), Error> {
|
||||
let ops = {
|
||||
let mut overlay = OverlayedBackend::new(&*backend);
|
||||
|
||||
tree::approve_block(&mut overlay, approved_block)?;
|
||||
|
||||
overlay.into_write_ops()
|
||||
};
|
||||
|
||||
backend.write(ops)
|
||||
}
|
||||
|
||||
// Here we revert a provided group of blocks. The most common cause for this is that
|
||||
// the dispute coordinator has notified chain selection of a dispute which concluded
|
||||
// against a candidate.
|
||||
fn handle_revert_blocks(
|
||||
backend: &impl Backend,
|
||||
blocks_to_revert: Vec<(BlockNumber, Hash)>,
|
||||
) -> Result<Vec<BackendWriteOp>, Error> {
|
||||
let mut overlay = OverlayedBackend::new(backend);
|
||||
for (block_number, block_hash) in blocks_to_revert {
|
||||
tree::apply_single_reversion(&mut overlay, block_hash, block_number)?;
|
||||
}
|
||||
|
||||
Ok(overlay.into_write_ops().collect())
|
||||
}
|
||||
|
||||
fn detect_stagnant(
|
||||
backend: &mut impl Backend,
|
||||
now: Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<(), Error> {
|
||||
let ops = {
|
||||
let overlay = tree::detect_stagnant(&*backend, now, max_elements)?;
|
||||
|
||||
overlay.into_write_ops()
|
||||
};
|
||||
|
||||
backend.write(ops)
|
||||
}
|
||||
|
||||
fn prune_only_stagnant(
|
||||
backend: &mut impl Backend,
|
||||
up_to: Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<(), Error> {
|
||||
let ops = {
|
||||
let overlay = tree::prune_only_stagnant(&*backend, up_to, max_elements)?;
|
||||
|
||||
overlay.into_write_ops()
|
||||
};
|
||||
|
||||
backend.write(ops)
|
||||
}
|
||||
|
||||
// Load the leaves from the backend. If there are no leaves, then return
|
||||
// the finalized block.
|
||||
async fn load_leaves(
|
||||
sender: &mut impl overseer::SubsystemSender<ChainApiMessage>,
|
||||
backend: &impl Backend,
|
||||
) -> Result<Vec<Hash>, Error> {
|
||||
let leaves: Vec<_> = backend.load_leaves()?.into_hashes_descending().collect();
|
||||
|
||||
if leaves.is_empty() {
|
||||
Ok(fetch_finalized(sender).await?.map_or(Vec::new(), |(h, _)| vec![h]))
|
||||
} else {
|
||||
Ok(leaves)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,782 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Implements the tree-view over the data backend which we use to determine
|
||||
//! viable leaves.
|
||||
//!
|
||||
//! The metadata is structured as a tree, with the root implicitly being the
|
||||
//! finalized block, which is not stored as part of the tree.
|
||||
//!
|
||||
//! Each direct descendant of the finalized block acts as its own sub-tree,
|
||||
//! and as the finalized block advances, orphaned sub-trees are entirely pruned.
|
||||
|
||||
use pezkuwi_node_primitives::BlockWeight;
|
||||
use pezkuwi_node_subsystem::ChainApiError;
|
||||
use pezkuwi_primitives::{BlockNumber, Hash};
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::{Approval, BlockEntry, Error, LeafEntry, Timestamp, ViabilityCriteria, LOG_TARGET};
|
||||
use crate::backend::{Backend, OverlayedBackend};
|
||||
|
||||
// A viability update to be applied to a block.
|
||||
struct ViabilityUpdate(Option<Hash>);
|
||||
|
||||
impl ViabilityUpdate {
|
||||
// Apply the viability update to a single block, yielding the updated
|
||||
// block entry along with a vector of children and the updates to apply
|
||||
// to them.
|
||||
fn apply(self, mut entry: BlockEntry) -> (BlockEntry, Vec<(Hash, ViabilityUpdate)>) {
|
||||
// 1. When an ancestor has changed from unviable to viable,
|
||||
// we erase the `earliest_unviable_ancestor` of all descendants
|
||||
// until encountering a explicitly unviable descendant D.
|
||||
//
|
||||
// We then update the `earliest_unviable_ancestor` for all
|
||||
// descendants of D to be equal to D.
|
||||
//
|
||||
// 2. When an ancestor A has changed from viable to unviable,
|
||||
// we update the `earliest_unviable_ancestor` for all blocks
|
||||
// to A.
|
||||
//
|
||||
// The following algorithm covers both cases.
|
||||
//
|
||||
// Furthermore, if there has been any change in viability,
|
||||
// it is necessary to visit every single descendant of the root
|
||||
// block.
|
||||
//
|
||||
// If a block B was unviable and is now viable, then every descendant
|
||||
// has an `earliest_unviable_ancestor` which must be updated either
|
||||
// to nothing or to the new earliest unviable ancestor.
|
||||
//
|
||||
// If a block B was viable and is now unviable, then every descendant
|
||||
// has an `earliest_unviable_ancestor` which needs to be set to B.
|
||||
|
||||
let maybe_earliest_unviable = self.0;
|
||||
let next_earliest_unviable = {
|
||||
if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() {
|
||||
Some(entry.block_hash)
|
||||
} else {
|
||||
maybe_earliest_unviable
|
||||
}
|
||||
};
|
||||
entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable;
|
||||
|
||||
let recurse = entry
|
||||
.children
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(move |c| (c, ViabilityUpdate(next_earliest_unviable)))
|
||||
.collect();
|
||||
|
||||
(entry, recurse)
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate viability update to descendants of the given block. This writes
|
||||
// the `base` entry as well as all descendants. If the parent of the block
|
||||
// entry is not viable, this will not affect any descendants.
|
||||
//
|
||||
// If the block entry provided is self-unviable, then it's assumed that an
|
||||
// unviability update needs to be propagated to descendants.
|
||||
//
|
||||
// If the block entry provided is self-viable, then it's assumed that a
|
||||
// viability update needs to be propagated to descendants.
|
||||
fn propagate_viability_update(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
base: BlockEntry,
|
||||
) -> Result<(), Error> {
|
||||
enum BlockEntryRef {
|
||||
Explicit(BlockEntry),
|
||||
Hash(Hash),
|
||||
}
|
||||
|
||||
if !base.viability.is_parent_viable() {
|
||||
// If the parent of the block is still unviable,
|
||||
// then the `earliest_viable_ancestor` will not change
|
||||
// regardless of the change in the block here.
|
||||
//
|
||||
// Furthermore, in such cases, the set of viable leaves
|
||||
// does not change at all.
|
||||
backend.write_block_entry(base);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut viable_leaves = backend.load_leaves()?;
|
||||
|
||||
// A mapping of Block Hash -> number
|
||||
// Where the hash is the hash of a viable block which has
|
||||
// at least 1 unviable child.
|
||||
//
|
||||
// The number is the number of known unviable children which is known
|
||||
// as the pivot count.
|
||||
let mut viability_pivots = HashMap::new();
|
||||
|
||||
// If the base block is itself explicitly unviable,
|
||||
// this will change to a `Some(base_hash)` after the first
|
||||
// invocation.
|
||||
let viability_update = ViabilityUpdate(None);
|
||||
|
||||
// Recursively apply update to tree.
|
||||
//
|
||||
// As we go, we remove any blocks from the leaves which are no longer viable
|
||||
// leaves. We also add blocks to the leaves-set which are obviously viable leaves.
|
||||
// And we build up a frontier of blocks which may either be viable leaves or
|
||||
// the ancestors of one.
|
||||
let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)];
|
||||
while let Some((entry_ref, update)) = tree_frontier.pop() {
|
||||
let entry = match entry_ref {
|
||||
BlockEntryRef::Explicit(entry) => entry,
|
||||
BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? {
|
||||
None => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
block_hash = ?hash,
|
||||
"Missing expected block entry"
|
||||
);
|
||||
|
||||
continue;
|
||||
},
|
||||
Some(entry) => entry,
|
||||
},
|
||||
};
|
||||
|
||||
let (new_entry, children) = update.apply(entry);
|
||||
|
||||
if new_entry.viability.is_viable() {
|
||||
// A block which is viable has a parent which is obviously not
|
||||
// in the viable leaves set.
|
||||
viable_leaves.remove(&new_entry.parent_hash);
|
||||
|
||||
// Furthermore, if the block is viable and has no children,
|
||||
// it is viable by definition.
|
||||
if new_entry.children.is_empty() {
|
||||
viable_leaves.insert(new_entry.leaf_entry());
|
||||
}
|
||||
} else {
|
||||
// A block which is not viable is certainly not a viable leaf.
|
||||
viable_leaves.remove(&new_entry.block_hash);
|
||||
|
||||
// When the parent is viable but the entry itself is not, that means
|
||||
// that the parent is a viability pivot. As we visit the children
|
||||
// of a viability pivot, we build up an exhaustive pivot count.
|
||||
if new_entry.viability.is_parent_viable() {
|
||||
*viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
backend.write_block_entry(new_entry);
|
||||
|
||||
tree_frontier
|
||||
.extend(children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)));
|
||||
}
|
||||
|
||||
// Revisit the viability pivots now that we've traversed the entire subtree.
|
||||
// After this point, the viable leaves set is fully updated. A proof follows.
|
||||
//
|
||||
// If the base has become unviable, then we've iterated into all descendants,
|
||||
// made them unviable and removed them from the set. We know that the parent is
|
||||
// viable as this function is a no-op otherwise, so we need to see if the parent
|
||||
// has other children or not.
|
||||
//
|
||||
// If the base has become viable, then we've iterated into all descendants,
|
||||
// and found all blocks which are viable and have no children. We've already added
|
||||
// those blocks to the leaf set, but what we haven't detected
|
||||
// is blocks which are viable and have children, but all of the children are
|
||||
// unviable.
|
||||
//
|
||||
// The solution of viability pivots addresses both of these:
|
||||
//
|
||||
// When the base has become unviable, the parent's viability is unchanged and therefore
|
||||
// any leaves descending from parent but not base are still in the viable leaves set.
|
||||
// If the parent has only one child which is the base, the parent is now a viable leaf.
|
||||
// We've already visited the base in recursive search so the set of pivots should
|
||||
// contain only a single entry `(parent, 1)`. qed.
|
||||
//
|
||||
// When the base has become viable, we've already iterated into every descendant
|
||||
// of the base and thus have collected a set of pivots whose corresponding pivot
|
||||
// counts have already been exhaustively computed from their children. qed.
|
||||
for (pivot, pivot_count) in viability_pivots {
|
||||
match backend.load_block_entry(&pivot)? {
|
||||
None => {
|
||||
// This means the block is finalized. We might reach this
|
||||
// code path when the base is a child of the finalized block
|
||||
// and has become unviable.
|
||||
//
|
||||
// Each such child is the root of its own tree
|
||||
// which, as an invariant, does not depend on the viability
|
||||
// of the finalized block. So no siblings need to be inspected
|
||||
// and we can ignore it safely.
|
||||
//
|
||||
// Furthermore, if the set of viable leaves is empty, the
|
||||
// finalized block is implicitly the viable leaf.
|
||||
continue;
|
||||
},
|
||||
Some(entry) =>
|
||||
if entry.children.len() == pivot_count {
|
||||
viable_leaves.insert(entry.leaf_entry());
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
backend.write_leaves(viable_leaves);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Imports a new block and applies any reversions to ancestors or the block itself.
|
||||
pub(crate) fn import_block(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
block_hash: Hash,
|
||||
block_number: BlockNumber,
|
||||
parent_hash: Hash,
|
||||
reversion_logs: Vec<BlockNumber>,
|
||||
weight: BlockWeight,
|
||||
stagnant_at: Timestamp,
|
||||
) -> Result<(), Error> {
|
||||
let block_entry =
|
||||
add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?;
|
||||
apply_reversions(backend, block_entry, reversion_logs)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Load the given ancestor's block entry, in descending order from the `block_hash`.
|
||||
// The ancestor_number must be not higher than the `block_entry`'s.
|
||||
//
|
||||
// The returned entry will be `None` if the range is invalid or any block in the path had
|
||||
// no entry present. If any block entry was missing, it can safely be assumed to
|
||||
// be finalized.
|
||||
fn load_ancestor(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
block_entry: &BlockEntry,
|
||||
ancestor_number: BlockNumber,
|
||||
) -> Result<Option<BlockEntry>, Error> {
|
||||
let block_hash = block_entry.block_hash;
|
||||
let block_number = block_entry.block_number;
|
||||
if block_number == ancestor_number {
|
||||
return Ok(Some(block_entry.clone()));
|
||||
} else if block_number < ancestor_number {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut current_hash = block_hash;
|
||||
let mut current_entry = None;
|
||||
|
||||
let segment_length = (block_number - ancestor_number) + 1;
|
||||
for _ in 0..segment_length {
|
||||
match backend.load_block_entry(¤t_hash)? {
|
||||
None => return Ok(None),
|
||||
Some(entry) => {
|
||||
let parent_hash = entry.parent_hash;
|
||||
current_entry = Some(entry);
|
||||
current_hash = parent_hash;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Current entry should always be `Some` here.
|
||||
Ok(current_entry)
|
||||
}
|
||||
|
||||
// Add a new block to the tree, which is assumed to be unreverted and unapproved,
|
||||
// but not stagnant. It inherits viability from its parent, if any.
|
||||
//
|
||||
// This updates the parent entry, if any, and updates the viable leaves set accordingly.
|
||||
// This also schedules a stagnation-check update and adds the block to the blocks-by-number
|
||||
// mapping.
|
||||
fn add_block(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
block_hash: Hash,
|
||||
block_number: BlockNumber,
|
||||
parent_hash: Hash,
|
||||
weight: BlockWeight,
|
||||
stagnant_at: Timestamp,
|
||||
) -> Result<BlockEntry, Error> {
|
||||
let mut leaves = backend.load_leaves()?;
|
||||
let parent_entry = backend.load_block_entry(&parent_hash)?;
|
||||
|
||||
let inherited_viability =
|
||||
parent_entry.as_ref().and_then(|parent| parent.non_viable_ancestor_for_child());
|
||||
|
||||
// 1. Add the block to the DB assuming it's not reverted.
|
||||
let block_entry = BlockEntry {
|
||||
block_hash,
|
||||
block_number,
|
||||
parent_hash,
|
||||
children: Vec::new(),
|
||||
viability: ViabilityCriteria {
|
||||
earliest_unviable_ancestor: inherited_viability,
|
||||
explicitly_reverted: false,
|
||||
approval: Approval::Unapproved,
|
||||
},
|
||||
weight,
|
||||
};
|
||||
backend.write_block_entry(block_entry.clone());
|
||||
|
||||
// 2. Update leaves if inherited viability is fine.
|
||||
if inherited_viability.is_none() {
|
||||
leaves.remove(&parent_hash);
|
||||
leaves.insert(LeafEntry { block_hash, block_number, weight });
|
||||
backend.write_leaves(leaves);
|
||||
}
|
||||
|
||||
// 3. Update and write the parent
|
||||
if let Some(mut parent_entry) = parent_entry {
|
||||
parent_entry.children.push(block_hash);
|
||||
backend.write_block_entry(parent_entry);
|
||||
}
|
||||
|
||||
// 4. Add to blocks-by-number.
|
||||
let mut blocks_by_number = backend.load_blocks_by_number(block_number)?;
|
||||
blocks_by_number.push(block_hash);
|
||||
backend.write_blocks_by_number(block_number, blocks_by_number);
|
||||
|
||||
// 5. Add stagnation timeout.
|
||||
let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?;
|
||||
stagnant_at_list.push(block_hash);
|
||||
backend.write_stagnant_at(stagnant_at, stagnant_at_list);
|
||||
|
||||
Ok(block_entry)
|
||||
}
|
||||
|
||||
/// Assuming that a block is already imported, accepts the number of the block
|
||||
/// as well as a list of reversions triggered by the block in ascending order.
|
||||
fn apply_reversions(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
block_entry: BlockEntry,
|
||||
reversions: Vec<BlockNumber>,
|
||||
) -> Result<(), Error> {
|
||||
// Note: since revert numbers are in ascending order, the expensive propagation
|
||||
// of unviability is only heavy on the first log.
|
||||
for revert_number in reversions {
|
||||
let maybe_block_entry = load_ancestor(backend, &block_entry, revert_number)?;
|
||||
if let Some(entry) = &maybe_block_entry {
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?revert_number,
|
||||
revert_hash = ?entry.block_hash,
|
||||
"Block marked as reverted via scraped on-chain reversions"
|
||||
);
|
||||
}
|
||||
revert_single_block_entry_if_present(
|
||||
backend,
|
||||
maybe_block_entry,
|
||||
None,
|
||||
revert_number,
|
||||
Some(block_entry.block_hash),
|
||||
Some(block_entry.block_number),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Marks a single block as explicitly reverted, then propagates viability updates
|
||||
/// to all its children. This is triggered when the disputes subsystem signals that
|
||||
/// a dispute has concluded against a candidate.
|
||||
pub(crate) fn apply_single_reversion(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
revert_hash: Hash,
|
||||
revert_number: BlockNumber,
|
||||
) -> Result<(), Error> {
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?revert_number,
|
||||
?revert_hash,
|
||||
"Block marked as reverted via ChainSelectionMessage::RevertBlocks"
|
||||
);
|
||||
let maybe_block_entry = backend.load_block_entry(&revert_hash)?;
|
||||
revert_single_block_entry_if_present(
|
||||
backend,
|
||||
maybe_block_entry,
|
||||
Some(revert_hash),
|
||||
revert_number,
|
||||
None,
|
||||
None,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn revert_single_block_entry_if_present(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
maybe_block_entry: Option<BlockEntry>,
|
||||
maybe_revert_hash: Option<Hash>,
|
||||
revert_number: BlockNumber,
|
||||
maybe_reporting_hash: Option<Hash>,
|
||||
maybe_reporting_number: Option<BlockNumber>,
|
||||
) -> Result<(), Error> {
|
||||
match maybe_block_entry {
|
||||
None => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
?maybe_revert_hash,
|
||||
revert_target = revert_number,
|
||||
?maybe_reporting_hash,
|
||||
?maybe_reporting_number,
|
||||
"The hammer has dropped. \
|
||||
The protocol has indicated that a finalized block be reverted. \
|
||||
Please inform an adult.",
|
||||
);
|
||||
},
|
||||
Some(mut block_entry) => {
|
||||
gum::info!(
|
||||
target: LOG_TARGET,
|
||||
?maybe_revert_hash,
|
||||
revert_target = revert_number,
|
||||
?maybe_reporting_hash,
|
||||
?maybe_reporting_number,
|
||||
"Unfinalized block reverted due to a bad teyrchain block.",
|
||||
);
|
||||
|
||||
block_entry.viability.explicitly_reverted = true;
|
||||
// Marks children of reverted block as non-viable
|
||||
propagate_viability_update(backend, block_entry)?;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finalize a block with the given number and hash.
|
||||
///
|
||||
/// This will prune all sub-trees not descending from the given block,
|
||||
/// all block entries at or before the given height,
|
||||
/// and will update the viability of all sub-trees descending from the given
|
||||
/// block if the finalized block was not viable.
|
||||
///
|
||||
/// This is assumed to start with a fresh backend, and will produce
|
||||
/// an overlay over the backend with all the changes applied.
|
||||
pub(super) fn finalize_block<'a, B: Backend + 'a>(
|
||||
backend: &'a B,
|
||||
finalized_hash: Hash,
|
||||
finalized_number: BlockNumber,
|
||||
) -> Result<OverlayedBackend<'a, B>, Error> {
|
||||
let earliest_stored_number = backend.load_first_block_number()?;
|
||||
let mut backend = OverlayedBackend::new(backend);
|
||||
|
||||
let earliest_stored_number = match earliest_stored_number {
|
||||
None => {
|
||||
// This implies that there are no unfinalized blocks and hence nothing
|
||||
// to update.
|
||||
return Ok(backend);
|
||||
},
|
||||
Some(e) => e,
|
||||
};
|
||||
|
||||
let mut viable_leaves = backend.load_leaves()?;
|
||||
|
||||
// Walk all numbers up to the finalized number and remove those entries.
|
||||
for number in earliest_stored_number..finalized_number {
|
||||
let blocks_at = backend.load_blocks_by_number(number)?;
|
||||
backend.delete_blocks_by_number(number);
|
||||
|
||||
for block in blocks_at {
|
||||
viable_leaves.remove(&block);
|
||||
backend.delete_block_entry(&block);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all blocks at the finalized height, with the exception of the finalized block,
|
||||
// and their descendants, recursively.
|
||||
{
|
||||
let blocks_at_finalized_height = backend.load_blocks_by_number(finalized_number)?;
|
||||
backend.delete_blocks_by_number(finalized_number);
|
||||
|
||||
let mut frontier: Vec<_> = blocks_at_finalized_height
|
||||
.into_iter()
|
||||
.filter(|h| h != &finalized_hash)
|
||||
.map(|h| (h, finalized_number))
|
||||
.collect();
|
||||
|
||||
while let Some((dead_hash, dead_number)) = frontier.pop() {
|
||||
let entry = backend.load_block_entry(&dead_hash)?;
|
||||
backend.delete_block_entry(&dead_hash);
|
||||
viable_leaves.remove(&dead_hash);
|
||||
|
||||
// This does a few extra `clone`s but is unlikely to be
|
||||
// a bottleneck. Code complexity is very low as a result.
|
||||
let mut blocks_at_height = backend.load_blocks_by_number(dead_number)?;
|
||||
blocks_at_height.retain(|h| h != &dead_hash);
|
||||
backend.write_blocks_by_number(dead_number, blocks_at_height);
|
||||
|
||||
// Add all children to the frontier.
|
||||
let next_height = dead_number + 1;
|
||||
frontier.extend(entry.into_iter().flat_map(|e| e.children).map(|h| (h, next_height)));
|
||||
}
|
||||
}
|
||||
|
||||
// Visit and remove the finalized block, fetching its children.
|
||||
let children_of_finalized = {
|
||||
let finalized_entry = backend.load_block_entry(&finalized_hash)?;
|
||||
backend.delete_block_entry(&finalized_hash);
|
||||
viable_leaves.remove(&finalized_hash);
|
||||
|
||||
finalized_entry.into_iter().flat_map(|e| e.children)
|
||||
};
|
||||
|
||||
backend.write_leaves(viable_leaves);
|
||||
|
||||
// Update the viability of each child.
|
||||
for child in children_of_finalized {
|
||||
if let Some(mut child) = backend.load_block_entry(&child)? {
|
||||
// Finalized blocks are always viable.
|
||||
child.viability.earliest_unviable_ancestor = None;
|
||||
|
||||
propagate_viability_update(&mut backend, child)?;
|
||||
} else {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?finalized_hash,
|
||||
finalized_number,
|
||||
child_hash = ?child,
|
||||
"Missing child of finalized block",
|
||||
);
|
||||
|
||||
// No need to do anything, but this is an inconsistent state.
|
||||
}
|
||||
}
|
||||
|
||||
Ok(backend)
|
||||
}
|
||||
|
||||
/// Mark a block as approved and update the viability of itself and its
|
||||
/// descendants accordingly.
|
||||
pub(super) fn approve_block(
|
||||
backend: &mut OverlayedBackend<impl Backend>,
|
||||
approved_hash: Hash,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(mut entry) = backend.load_block_entry(&approved_hash)? {
|
||||
let was_viable = entry.viability.is_viable();
|
||||
entry.viability.approval = Approval::Approved;
|
||||
let is_viable = entry.viability.is_viable();
|
||||
|
||||
// Approval can change the viability in only one direction.
|
||||
// If the viability has changed, then we propagate that to children
|
||||
// and recalculate the viable leaf set.
|
||||
if !was_viable && is_viable {
|
||||
propagate_viability_update(backend, entry)?;
|
||||
} else {
|
||||
backend.write_block_entry(entry);
|
||||
}
|
||||
} else {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
block_hash = ?approved_hash,
|
||||
"Missing entry for freshly-approved block. Ignoring"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check whether any blocks up to the given timestamp are stagnant and update
|
||||
/// accordingly.
|
||||
///
|
||||
/// This accepts a fresh backend and returns an overlay on top of it representing
|
||||
/// all changes made.
|
||||
pub(super) fn detect_stagnant<'a, B: 'a + Backend>(
|
||||
backend: &'a B,
|
||||
up_to: Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<OverlayedBackend<'a, B>, Error> {
|
||||
let stagnant_up_to = backend.load_stagnant_at_up_to(up_to, max_elements)?;
|
||||
let mut backend = OverlayedBackend::new(backend);
|
||||
|
||||
let (min_ts, max_ts) = match stagnant_up_to.len() {
|
||||
0 => (0 as Timestamp, 0 as Timestamp),
|
||||
1 => (stagnant_up_to[0].0, stagnant_up_to[0].0),
|
||||
n => (stagnant_up_to[0].0, stagnant_up_to[n - 1].0),
|
||||
};
|
||||
|
||||
// As this is in ascending order, only the earliest stagnant
|
||||
// blocks will involve heavy viability propagations.
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?up_to,
|
||||
?min_ts,
|
||||
?max_ts,
|
||||
"Prepared {} stagnant entries for checking/pruning",
|
||||
stagnant_up_to.len()
|
||||
);
|
||||
|
||||
for (timestamp, maybe_stagnant) in stagnant_up_to {
|
||||
backend.delete_stagnant_at(timestamp);
|
||||
|
||||
for block_hash in maybe_stagnant {
|
||||
if let Some(mut entry) = backend.load_block_entry(&block_hash)? {
|
||||
let was_viable = entry.viability.is_viable();
|
||||
if let Approval::Unapproved = entry.viability.approval {
|
||||
entry.viability.approval = Approval::Stagnant;
|
||||
}
|
||||
let is_viable = entry.viability.is_viable();
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?block_hash,
|
||||
?timestamp,
|
||||
?was_viable,
|
||||
?is_viable,
|
||||
"Found existing stagnant entry"
|
||||
);
|
||||
|
||||
if was_viable && !is_viable {
|
||||
propagate_viability_update(&mut backend, entry)?;
|
||||
} else {
|
||||
backend.write_block_entry(entry);
|
||||
}
|
||||
} else {
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?block_hash,
|
||||
?timestamp,
|
||||
"Found non-existing stagnant entry"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(backend)
|
||||
}
|
||||
|
||||
/// Prune stagnant entries at some timestamp without other checks
|
||||
/// This function is intended just to clean leftover entries when the real
|
||||
/// stagnant checks are disabled
|
||||
pub(super) fn prune_only_stagnant<'a, B: 'a + Backend>(
|
||||
backend: &'a B,
|
||||
up_to: Timestamp,
|
||||
max_elements: usize,
|
||||
) -> Result<OverlayedBackend<'a, B>, Error> {
|
||||
let stagnant_up_to = backend.load_stagnant_at_up_to(up_to, max_elements)?;
|
||||
let mut backend = OverlayedBackend::new(backend);
|
||||
|
||||
let (min_ts, max_ts) = match stagnant_up_to.len() {
|
||||
0 => (0 as Timestamp, 0 as Timestamp),
|
||||
1 => (stagnant_up_to[0].0, stagnant_up_to[0].0),
|
||||
n => (stagnant_up_to[0].0, stagnant_up_to[n - 1].0),
|
||||
};
|
||||
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?up_to,
|
||||
?min_ts,
|
||||
?max_ts,
|
||||
"Prepared {} stagnant entries for pruning",
|
||||
stagnant_up_to.len()
|
||||
);
|
||||
|
||||
for (timestamp, _) in stagnant_up_to {
|
||||
backend.delete_stagnant_at(timestamp);
|
||||
}
|
||||
|
||||
Ok(backend)
|
||||
}
|
||||
|
||||
/// Revert the tree to the block relative to `hash`.
|
||||
///
|
||||
/// This accepts a fresh backend and returns an overlay on top of it representing
|
||||
/// all changes made.
|
||||
pub(super) fn revert_to<'a, B: Backend + 'a>(
|
||||
backend: &'a B,
|
||||
hash: Hash,
|
||||
) -> Result<OverlayedBackend<'a, B>, Error> {
|
||||
let first_number = backend.load_first_block_number()?.unwrap_or_default();
|
||||
|
||||
let mut backend = OverlayedBackend::new(backend);
|
||||
|
||||
let mut entry = match backend.load_block_entry(&hash)? {
|
||||
Some(entry) => entry,
|
||||
None => {
|
||||
// May be a revert to the last finalized block. If this is the case,
|
||||
// then revert to this block should be handled specially since no
|
||||
// information about finalized blocks is persisted within the tree.
|
||||
//
|
||||
// We use part of the information contained in the finalized block
|
||||
// children (that are expected to be in the tree) to construct a
|
||||
// dummy block entry for the last finalized block. This will be
|
||||
// wiped as soon as the next block is finalized.
|
||||
|
||||
let blocks = backend.load_blocks_by_number(first_number)?;
|
||||
|
||||
let block = blocks
|
||||
.first()
|
||||
.and_then(|hash| backend.load_block_entry(hash).ok())
|
||||
.flatten()
|
||||
.ok_or_else(|| {
|
||||
ChainApiError::from(format!(
|
||||
"Lookup failure for block at height {}",
|
||||
first_number
|
||||
))
|
||||
})?;
|
||||
|
||||
// The parent is expected to be the last finalized block.
|
||||
if block.parent_hash != hash {
|
||||
return Err(ChainApiError::from("Can't revert below last finalized block").into());
|
||||
}
|
||||
|
||||
// The weight is set to the one of the first child. Even though this is
|
||||
// not accurate, it does the job. The reason is that the revert point is
|
||||
// the last finalized block, i.e. this is the best and only choice.
|
||||
let block_number = first_number.saturating_sub(1);
|
||||
let viability = ViabilityCriteria {
|
||||
explicitly_reverted: false,
|
||||
approval: Approval::Approved,
|
||||
earliest_unviable_ancestor: None,
|
||||
};
|
||||
let entry = BlockEntry {
|
||||
block_hash: hash,
|
||||
block_number,
|
||||
parent_hash: Hash::default(),
|
||||
children: blocks,
|
||||
viability,
|
||||
weight: block.weight,
|
||||
};
|
||||
// This becomes the first entry according to the block number.
|
||||
backend.write_blocks_by_number(block_number, vec![hash]);
|
||||
entry
|
||||
},
|
||||
};
|
||||
|
||||
let mut stack: Vec<_> = std::mem::take(&mut entry.children)
|
||||
.into_iter()
|
||||
.map(|h| (h, entry.block_number + 1))
|
||||
.collect();
|
||||
|
||||
// Write revert point block entry without the children.
|
||||
backend.write_block_entry(entry.clone());
|
||||
|
||||
let mut viable_leaves = backend.load_leaves()?;
|
||||
|
||||
viable_leaves.insert(LeafEntry {
|
||||
block_hash: hash,
|
||||
block_number: entry.block_number,
|
||||
weight: entry.weight,
|
||||
});
|
||||
|
||||
while let Some((hash, number)) = stack.pop() {
|
||||
let entry = backend.load_block_entry(&hash)?;
|
||||
backend.delete_block_entry(&hash);
|
||||
|
||||
viable_leaves.remove(&hash);
|
||||
|
||||
let mut blocks_at_height = backend.load_blocks_by_number(number)?;
|
||||
blocks_at_height.retain(|h| h != &hash);
|
||||
backend.write_blocks_by_number(number, blocks_at_height);
|
||||
|
||||
stack.extend(entry.into_iter().flat_map(|e| e.children).map(|h| (h, number + 1)));
|
||||
}
|
||||
|
||||
backend.write_leaves(viable_leaves);
|
||||
|
||||
Ok(backend)
|
||||
}
|
||||
Reference in New Issue
Block a user